[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] amd: Extended migration support



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1220004821 -3600
# Node ID 10e0e90831f0c92cb15e69198e8895b72102939a
# Parent  8623a537aff14dfb2efcdb2ec9cd9787d5de513c
amd: Extended migration support

This patch adds support for AMD's extended migration, aka CPUID
features and extended features masking.

Signed-off-by: Travis Betak <travis.betak@xxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c          |  138 +++++++++++++++++++++++++++++++++++++++-
 xen/arch/x86/cpu/amd.h          |  103 +++++++++++++++++++++++++++++
 xen/include/asm-x86/msr-index.h |    3 
 3 files changed, 243 insertions(+), 1 deletion(-)

diff -r 8623a537aff1 -r 10e0e90831f0 xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Fri Aug 29 10:46:46 2008 +0100
+++ b/xen/arch/x86/cpu/amd.c    Fri Aug 29 11:13:41 2008 +0100
@@ -10,8 +10,142 @@
 #include <asm/hvm/support.h>
 
 #include "cpu.h"
+#include "amd.h"
 
 int start_svm(struct cpuinfo_x86 *c);
+
+/*
+ * Pre-canned values for overriding the CPUID features 
+ * and extended features masks.
+ *
+ * Currently supported processors:
+ * 
+ * "fam_0f_rev_c"
+ * "fam_0f_rev_d"
+ * "fam_0f_rev_e"
+ * "fam_0f_rev_f"
+ * "fam_0f_rev_g"
+ * "fam_10_rev_b"
+ * "fam_10_rev_c"
+ * "fam_11_rev_b"
+ */
+static char opt_famrev[14];
+string_param("cpuid_mask_cpu", opt_famrev);
+
+/* Finer-grained CPUID feature control. */
+static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
+integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
+static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ext_ecx);
+integer_param("cpuid_mask_edx", opt_cpuid_mask_ext_edx);
+
+static inline void wrmsr_amd(unsigned int index, unsigned int lo, 
+               unsigned int hi)
+{
+       asm volatile (
+               "wrmsr"
+               : /* No outputs */
+               : "c" (index), "a" (lo), 
+               "d" (hi), "D" (0x9c5a203a)
+       );
+}
+
+/*
+ * Mask the features and extended features returned by CPUID.  Parameters are
+ * set from the boot line via two methods:
+ *
+ *   1) Specific processor revision string
+ *   2) User-defined masks
+ *
+ * The processor revision string parameter has precedene.
+ */
+static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+{
+       static unsigned int feat_ecx, feat_edx;
+       static unsigned int extfeat_ecx, extfeat_edx;
+       static enum { not_parsed, no_mask, set_mask } status;
+
+       if (status == no_mask)
+               return;
+
+       if (status == set_mask)
+               goto setmask;
+
+       ASSERT((status == not_parsed) && (smp_processor_id() == 0));
+       status = no_mask;
+
+       if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
+           opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) {
+               feat_ecx = opt_cpuid_mask_ecx ? : ~0U;
+               feat_edx = opt_cpuid_mask_edx ? : ~0U;
+               extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U;
+               extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U;
+       } else if (*opt_famrev == '\0') {
+               return;
+       } else if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
+               feat_ecx = AMD_FEATURES_K8_REV_C_ECX;
+               feat_edx = AMD_FEATURES_K8_REV_C_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_K8_REV_C_ECX;
+               extfeat_edx = AMD_EXTFEATURES_K8_REV_C_EDX;
+       } else if (!strcmp(opt_famrev, "fam_0f_rev_d")) {
+               feat_ecx = AMD_FEATURES_K8_REV_D_ECX;
+               feat_edx = AMD_FEATURES_K8_REV_D_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_K8_REV_D_ECX;
+               extfeat_edx = AMD_EXTFEATURES_K8_REV_D_EDX;
+       } else if (!strcmp(opt_famrev, "fam_0f_rev_e")) {
+               feat_ecx = AMD_FEATURES_K8_REV_E_ECX;
+               feat_edx = AMD_FEATURES_K8_REV_E_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_K8_REV_E_ECX;
+               extfeat_edx = AMD_EXTFEATURES_K8_REV_E_EDX;
+       } else if (!strcmp(opt_famrev, "fam_0f_rev_f")) {
+               feat_ecx = AMD_FEATURES_K8_REV_F_ECX;
+               feat_edx = AMD_FEATURES_K8_REV_F_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_K8_REV_F_ECX;
+               extfeat_edx = AMD_EXTFEATURES_K8_REV_F_EDX;
+       } else if (!strcmp(opt_famrev, "fam_0f_rev_g")) {
+               feat_ecx = AMD_FEATURES_K8_REV_G_ECX;
+               feat_edx = AMD_FEATURES_K8_REV_G_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_K8_REV_G_ECX;
+               extfeat_edx = AMD_EXTFEATURES_K8_REV_G_EDX;
+       } else if (!strcmp(opt_famrev, "fam_10_rev_b")) {
+               feat_ecx = AMD_FEATURES_FAM10h_REV_B_ECX;
+               feat_edx = AMD_FEATURES_FAM10h_REV_B_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_B_ECX;
+               extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_B_EDX;
+       } else if (!strcmp(opt_famrev, "fam_10_rev_c")) {
+               feat_ecx = AMD_FEATURES_FAM10h_REV_C_ECX;
+               feat_edx = AMD_FEATURES_FAM10h_REV_C_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_C_ECX;
+               extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_C_EDX;
+       } else if (!strcmp(opt_famrev, "fam_11_rev_b")) {
+               feat_ecx = AMD_FEATURES_FAM11h_REV_B_ECX;
+               feat_edx = AMD_FEATURES_FAM11h_REV_B_EDX;
+               extfeat_ecx = AMD_EXTFEATURES_FAM11h_REV_B_ECX;
+               extfeat_edx = AMD_EXTFEATURES_FAM11h_REV_B_EDX;
+       } else {
+               printk("Invalid processor string: %s\n", opt_famrev);
+               printk("CPUID will not be masked\n");
+               return;
+       }
+
+       status = set_mask;
+       printk("Writing CPUID feature mask ECX:EDX -> %08Xh:%08Xh\n", 
+              feat_ecx, feat_edx);
+       printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n", 
+              extfeat_ecx, extfeat_edx);
+
+ setmask:
+       /* FIXME check if processor supports CPUID masking */
+       /* AMD processors prior to family 10h required a 32-bit password */
+       if (c->x86 >= 0x10) {
+               wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
+               wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
+       } else if (c->x86 == 0x0f) {
+               wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
+               wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
+       }
+}
 
 /*
  * amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush
@@ -115,7 +249,7 @@ static void check_disable_c1e(unsigned i
                on_each_cpu(disable_c1e, NULL, 1, 1);
 }
 
-static void __init init_amd(struct cpuinfo_x86 *c)
+static void __devinit init_amd(struct cpuinfo_x86 *c)
 {
        u32 l, h;
        int mbytes = num_physpages >> (20-PAGE_SHIFT);
@@ -367,6 +501,8 @@ static void __init init_amd(struct cpuin
        /* Prevent TSC drift in non single-processor, single-core platforms. */
        if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
                disable_c1_ramping();
+
+       set_cpuidmask(c);
 
        start_svm(c);
 }
diff -r 8623a537aff1 -r 10e0e90831f0 xen/arch/x86/cpu/amd.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/cpu/amd.h    Fri Aug 29 11:13:41 2008 +0100
@@ -0,0 +1,103 @@
+/*
+ * amd.h - AMD processor specific definitions
+ */
+
+#ifndef __AMD_H__
+#define __AMD_H__
+
+#include <asm/cpufeature.h>
+
+/* CPUID masked for use by AMD-V Extended Migration */
+
+#define X86_FEATURE_BITPOS(_feature_) ((_feature_) % 32)
+#define __bit(_x_) (1U << X86_FEATURE_BITPOS(_x_))
+
+/* Family 0Fh, Revision C */
+#define AMD_FEATURES_K8_REV_C_ECX  0
+#define AMD_FEATURES_K8_REV_C_EDX (                                    \
+       __bit(X86_FEATURE_FPU)      | __bit(X86_FEATURE_VME)   |        \
+       __bit(X86_FEATURE_DE)       | __bit(X86_FEATURE_PSE)   |        \
+       __bit(X86_FEATURE_TSC)      | __bit(X86_FEATURE_MSR)   |        \
+       __bit(X86_FEATURE_PAE)      | __bit(X86_FEATURE_MCE)   |        \
+       __bit(X86_FEATURE_CX8)      | __bit(X86_FEATURE_APIC)  |        \
+       __bit(X86_FEATURE_SEP)      | __bit(X86_FEATURE_MTRR)  |        \
+       __bit(X86_FEATURE_PGE)      | __bit(X86_FEATURE_MCA)   |        \
+       __bit(X86_FEATURE_CMOV)     | __bit(X86_FEATURE_PAT)   |        \
+       __bit(X86_FEATURE_PSE36)    | __bit(X86_FEATURE_CLFLSH)|        \
+       __bit(X86_FEATURE_MMX)      | __bit(X86_FEATURE_FXSR)  |        \
+       __bit(X86_FEATURE_XMM)      | __bit(X86_FEATURE_XMM2))
+#define AMD_EXTFEATURES_K8_REV_C_ECX  0 
+#define AMD_EXTFEATURES_K8_REV_C_EDX  (                                        
\
+       __bit(X86_FEATURE_FPU)      | __bit(X86_FEATURE_VME)   |        \
+       __bit(X86_FEATURE_DE)       | __bit(X86_FEATURE_PSE)   |        \
+       __bit(X86_FEATURE_TSC)      | __bit(X86_FEATURE_MSR)   |        \
+       __bit(X86_FEATURE_PAE)      | __bit(X86_FEATURE_MCE)   |        \
+       __bit(X86_FEATURE_CX8)      | __bit(X86_FEATURE_APIC)  |        \
+       __bit(X86_FEATURE_SYSCALL)  | __bit(X86_FEATURE_MTRR)  |        \
+       __bit(X86_FEATURE_PGE)      | __bit(X86_FEATURE_MCA)   |        \
+       __bit(X86_FEATURE_CMOV)     | __bit(X86_FEATURE_PAT)   |        \
+       __bit(X86_FEATURE_PSE36)    | __bit(X86_FEATURE_NX)    |        \
+       __bit(X86_FEATURE_MMXEXT)   | __bit(X86_FEATURE_MMX)   |        \
+       __bit(X86_FEATURE_FXSR)     | __bit(X86_FEATURE_LM)    |        \
+       __bit(X86_FEATURE_3DNOWEXT) | __bit(X86_FEATURE_3DNOW))
+
+/* Family 0Fh, Revision D */
+#define AMD_FEATURES_K8_REV_D_ECX         AMD_FEATURES_K8_REV_C_ECX
+#define AMD_FEATURES_K8_REV_D_EDX         AMD_FEATURES_K8_REV_C_EDX
+#define AMD_EXTFEATURES_K8_REV_D_ECX     (AMD_EXTFEATURES_K8_REV_C_ECX |\
+       __bit(X86_FEATURE_LAHF_LM))
+#define AMD_EXTFEATURES_K8_REV_D_EDX     (AMD_EXTFEATURES_K8_REV_C_EDX |\
+       __bit(X86_FEATURE_FFXSR))
+
+/* Family 0Fh, Revision E */
+#define AMD_FEATURES_K8_REV_E_ECX        (AMD_FEATURES_K8_REV_D_ECX |  \
+       __bit(X86_FEATURE_XMM3))
+#define AMD_FEATURES_K8_REV_E_EDX        (AMD_FEATURES_K8_REV_D_EDX |  \
+       __bit(X86_FEATURE_HT))
+#define AMD_EXTFEATURES_K8_REV_E_ECX     (AMD_EXTFEATURES_K8_REV_D_ECX |\
+       __bit(X86_FEATURE_CMP_LEGACY)) 
+#define AMD_EXTFEATURES_K8_REV_E_EDX      AMD_EXTFEATURES_K8_REV_D_EDX
+
+/* Family 0Fh, Revision F */
+#define AMD_FEATURES_K8_REV_F_ECX        (AMD_FEATURES_K8_REV_E_ECX |  \
+       __bit(X86_FEATURE_CX16))
+#define AMD_FEATURES_K8_REV_F_EDX         AMD_FEATURES_K8_REV_E_EDX
+#define AMD_EXTFEATURES_K8_REV_F_ECX     (AMD_EXTFEATURES_K8_REV_E_ECX |\
+       __bit(X86_FEATURE_SVME) | __bit(X86_FEATURE_EXTAPICSPACE) |     \
+       __bit(X86_FEATURE_ALTMOVCR))
+#define AMD_EXTFEATURES_K8_REV_F_EDX     (AMD_EXTFEATURES_K8_REV_E_EDX |\
+       __bit(X86_FEATURE_RDTSCP))
+
+/* Family 0Fh, Revision G */
+#define AMD_FEATURES_K8_REV_G_ECX         AMD_FEATURES_K8_REV_F_ECX
+#define AMD_FEATURES_K8_REV_G_EDX         AMD_FEATURES_K8_REV_F_EDX
+#define AMD_EXTFEATURES_K8_REV_G_ECX     (AMD_EXTFEATURES_K8_REV_F_ECX |\
+       __bit(X86_FEATURE_3DNOWPF))
+#define AMD_EXTFEATURES_K8_REV_G_EDX      AMD_EXTFEATURES_K8_REV_F_EDX
+
+/* Family 10h, Revision B */
+#define AMD_FEATURES_FAM10h_REV_B_ECX    (AMD_FEATURES_K8_REV_F_ECX |  \
+       __bit(X86_FEATURE_POPCNT) | __bit(X86_FEATURE_MWAIT))
+#define AMD_FEATURES_FAM10h_REV_B_EDX     AMD_FEATURES_K8_REV_F_EDX
+#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
+       __bit(X86_FEATURE_ABM) | __bit(X86_FEATURE_SSE4A) |             \
+       __bit(X86_FEATURE_MISALIGNSSE) | __bit(X86_FEATURE_OSVW) |      \
+       __bit(X86_FEATURE_IBS))
+#define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\
+       __bit(X86_FEATURE_PAGE1GB))
+
+/* Family 10h, Revision C */
+#define AMD_FEATURES_FAM10h_REV_C_ECX     AMD_FEATURES_FAM10h_REV_B_ECX
+#define AMD_FEATURES_FAM10h_REV_C_EDX     AMD_FEATURES_FAM10h_REV_B_EDX
+#define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\
+       __bit(X86_FEATURE_SKINIT) | __bit(X86_FEATURE_WDT))
+#define AMD_EXTFEATURES_FAM10h_REV_C_EDX  AMD_EXTFEATURES_FAM10h_REV_B_EDX
+
+/* Family 11h, Revision B */
+#define AMD_FEATURES_FAM11h_REV_B_ECX     AMD_FEATURES_K8_REV_G_ECX
+#define AMD_FEATURES_FAM11h_REV_B_EDX     AMD_FEATURES_K8_REV_G_EDX
+#define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\
+       __bit(X86_FEATURE_SKINIT))
+#define AMD_EXTFEATURES_FAM11h_REV_B_EDX  AMD_EXTFEATURES_K8_REV_G_EDX
+
+#endif /* __AMD_H__ */
diff -r 8623a537aff1 -r 10e0e90831f0 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h   Fri Aug 29 10:46:46 2008 +0100
+++ b/xen/include/asm-x86/msr-index.h   Fri Aug 29 11:13:41 2008 +0100
@@ -186,6 +186,9 @@
 #define MSR_K8_ENABLE_C1E              0xc0010055
 #define MSR_K8_VM_CR                   0xc0010114
 #define MSR_K8_VM_HSAVE_PA             0xc0010117
+
+#define MSR_K8_FEATURE_MASK            0xc0011004
+#define MSR_K8_EXT_FEATURE_MASK                0xc0011005
 
 /* MSR_K8_VM_CR bits: */
 #define _K8_VMCR_SVME_DISABLE          4

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.