[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: AMD Fam10/11 adjustments
# HG changeset patch # User Keir Fraser <keir@xxxxxxxxxxxxx> # Date 1192101114 -3600 # Node ID 0d7d6804af2204dece108959f4fa40764bd2734f # Parent 49323c8b8633fc02164c8b949c2b55e167aeca1e x86: AMD Fam10/11 adjustments Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> Sync up affected files with 2.6.23-rc9. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/apic.c | 2 xen/arch/x86/cpu/amd.c | 39 ++- xen/arch/x86/cpu/mcheck/mce.c | 25 +- xen/arch/x86/cpu/mtrr/amd.c | 2 xen/arch/x86/cpu/mtrr/cyrix.c | 33 ++- xen/arch/x86/cpu/mtrr/generic.c | 135 ++++++++---- xen/arch/x86/cpu/mtrr/main.c | 109 +++++++--- xen/arch/x86/cpu/mtrr/mtrr.h | 27 +- xen/arch/x86/hvm/svm/svm.c | 18 - xen/arch/x86/hvm/vmx/vmx.c | 14 - xen/arch/x86/nmi.c | 9 xen/arch/x86/oprofile/nmi_int.c | 8 xen/arch/x86/oprofile/op_model_athlon.c | 18 + xen/arch/x86/platform_hypercall.c | 3 xen/arch/x86/smpboot.c | 7 xen/arch/x86/traps.c | 8 xen/include/asm-x86/msr-index.h | 334 ++++++++++++++++++++++++++++++++ xen/include/asm-x86/msr.h | 294 ---------------------------- xen/include/asm-x86/mtrr.h | 2 xen/include/asm-x86/processor.h | 6 xen/include/xen/config.h | 1 21 files changed, 649 insertions(+), 445 deletions(-) diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/apic.c Thu Oct 11 12:11:54 2007 +0100 @@ -737,7 +737,7 @@ static int __init detect_init_APIC (void switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || - (boot_cpu_data.x86 == 15)) + (boot_cpu_data.x86 >= 15 && boot_cpu_data.x86 <= 17)) break; goto no_apic; case X86_VENDOR_INTEL: diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/amd.c Thu Oct 11 12:11:54 2007 +0100 @@ -100,6 +100,8 @@ static void disable_c1_ramping(void) } } +int force_mwait __cpuinitdata; + static void __init init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -182,10 +184,7 @@ static void __init init_amd(struct cpuin f_vide(); rdtscl(d2); d = d2-d; - - /* Knock these two lines out if it debugs out ok */ - printk(KERN_INFO "AMD K6 stepping B detected - "); - /* -- cut here -- */ + if (d > 20*K6_BUG_LOOP) printk("system stability may be impaired when more than 32 MB are used.\n"); else @@ -279,6 +278,9 @@ static void __init init_amd(struct cpuin switch (c->x86) { case 15: + /* Use K8 tuning for Fam10h and Fam11h */ + case 0x10: + case 0x11: set_bit(X86_FEATURE_K8, c->x86_capability); break; case 6: @@ -305,8 +307,6 @@ static void __init init_amd(struct cpuin if (cpuid_eax(0x80000000) >= 0x80000008) { c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; - if (c->x86_max_cores & (c->x86_max_cores - 1)) - c->x86_max_cores = 1; } if (cpuid_eax(0x80000000) >= 0x80000007) { @@ -317,15 +317,17 @@ static void __init init_amd(struct cpuin #ifdef CONFIG_X86_HT /* - * On a AMD dual core setup the lower bits of the APIC id - * distingush the cores. Assumes number of cores is a power - * of two. + * On a AMD multi core setup the lower bits of the APIC id + * distingush the cores. */ if (c->x86_max_cores > 1) { int cpu = smp_processor_id(); - unsigned bits = 0; - while ((1 << bits) < c->x86_max_cores) - bits++; + unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; + + if (bits == 0) { + while ((1 << bits) < c->x86_max_cores) + bits++; + } cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); phys_proc_id[cpu] >>= bits; printk(KERN_INFO "CPU %d(%d) -> Core %d\n", @@ -333,6 +335,13 @@ static void __init init_amd(struct cpuin } #endif + if (c->x86 == 0x10 && !force_mwait) + clear_bit(X86_FEATURE_MWAIT, c->x86_capability); + + /* K6s reports MCEs but don't actually have all the MSRs */ + if (c->x86 < 6) + clear_bit(X86_FEATURE_MCE, c->x86_capability); + /* Prevent TSC drift in non single-processor, single-core platforms. */ if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c)) disable_c1_ramping(); @@ -340,7 +349,7 @@ static void __init init_amd(struct cpuin start_svm(c); } -static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) +static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) { /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { @@ -353,7 +362,7 @@ static unsigned int amd_size_cache(struc return size; } -static struct cpu_dev amd_cpu_dev __initdata = { +static struct cpu_dev amd_cpu_dev __cpuinitdata = { .c_vendor = "AMD", .c_ident = { "AuthenticAMD" }, .c_models = { @@ -378,5 +387,3 @@ int __init amd_init_cpu(void) cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; return 0; } - -//early_arch_initcall(amd_init_cpu); diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/mcheck/mce.c --- a/xen/arch/x86/cpu/mcheck/mce.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce.c Thu Oct 11 12:11:54 2007 +0100 @@ -17,6 +17,8 @@ int mce_disabled = 0; int mce_disabled = 0; int nr_mce_banks; +EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ + /* Handle unconfigured int18 (should never happen) */ static fastcall void unexpected_machine_check(struct cpu_user_regs * regs, long error_code) { @@ -34,8 +36,7 @@ void mcheck_init(struct cpuinfo_x86 *c) switch (c->x86_vendor) { case X86_VENDOR_AMD: - if (c->x86==6 || c->x86==15) - amd_mcheck_init(c); + amd_mcheck_init(c); break; case X86_VENDOR_INTEL: @@ -61,16 +62,28 @@ void mcheck_init(struct cpuinfo_x86 *c) } } -static int __init mcheck_disable(char *str) +static unsigned long old_cr4 __initdata; + +void __init stop_mce(void) +{ + old_cr4 = read_cr4(); + clear_in_cr4(X86_CR4_MCE); +} + +void __init restart_mce(void) +{ + if (old_cr4 & X86_CR4_MCE) + set_in_cr4(X86_CR4_MCE); +} + +static void __init mcheck_disable(char *str) { mce_disabled = 1; - return 0; } -static int __init mcheck_enable(char *str) +static void __init mcheck_enable(char *str) { mce_disabled = -1; - return 0; } custom_param("nomce", mcheck_disable); diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/mtrr/amd.c --- a/xen/arch/x86/cpu/mtrr/amd.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/mtrr/amd.c Thu Oct 11 12:11:54 2007 +0100 @@ -7,7 +7,7 @@ static void amd_get_mtrr(unsigned int reg, unsigned long *base, - unsigned int *size, mtrr_type * type) + unsigned long *size, mtrr_type * type) { unsigned long low, high; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/mtrr/cyrix.c --- a/xen/arch/x86/cpu/mtrr/cyrix.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/mtrr/cyrix.c Thu Oct 11 12:11:54 2007 +0100 @@ -9,7 +9,7 @@ int arr3_protected; static void cyrix_get_arr(unsigned int reg, unsigned long *base, - unsigned int *size, mtrr_type * type) + unsigned long *size, mtrr_type * type) { unsigned long flags; unsigned char arr, ccr3, rcr, shift; @@ -77,7 +77,7 @@ cyrix_get_arr(unsigned int reg, unsigned } static int -cyrix_get_free_region(unsigned long base, unsigned long size) +cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) /* [SUMMARY] Get a free ARR. <base> The starting (base) address of the region. <size> The size (in bytes) of the region. @@ -86,9 +86,24 @@ cyrix_get_free_region(unsigned long base { int i; mtrr_type ltype; - unsigned long lbase; - unsigned int lsize; - + unsigned long lbase, lsize; + + switch (replace_reg) { + case 7: + if (size < 0x40) + break; + case 6: + case 5: + case 4: + return replace_reg; + case 3: + if (arr3_protected) + break; + case 2: + case 1: + case 0: + return replace_reg; + } /* If we are to set up a region >32M then look at ARR7 immediately */ if (size > 0x2000) { cyrix_get_arr(7, &lbase, &lsize, <ype); @@ -121,7 +136,7 @@ static void prepare_set(void) /* Save value of CR4 and clear Page Global Enable (bit 7) */ if ( cpu_has_pge ) { cr4 = read_cr4(); - write_cr4(cr4 & (unsigned char) ~(1 << 7)); + write_cr4(cr4 & ~X86_CR4_PGE); } /* Disable and flush caches. Note that wbinvd flushes the TLBs as @@ -214,16 +229,16 @@ static void cyrix_set_arr(unsigned int r typedef struct { unsigned long base; - unsigned int size; + unsigned long size; mtrr_type type; } arr_state_t; -static arr_state_t arr_state[8] __devinitdata = { +static arr_state_t arr_state[8] = { {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} }; -static unsigned char ccr_state[7] __devinitdata = { 0, 0, 0, 0, 0, 0, 0 }; +static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; static void cyrix_set_all(void) { diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/mtrr/generic.c --- a/xen/arch/x86/cpu/mtrr/generic.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/mtrr/generic.c Thu Oct 11 12:11:54 2007 +0100 @@ -15,21 +15,34 @@ struct mtrr_state { struct mtrr_var_range *var_ranges; mtrr_type fixed_ranges[NUM_FIXED_RANGES]; unsigned char enabled; + unsigned char have_fixed; mtrr_type def_type; }; +struct fixed_range_block { + int base_msr; /* start address of an MTRR block */ + int ranges; /* number of MTRRs in this block */ +}; + +static struct fixed_range_block fixed_range_blocks[] = { + { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ + { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ + { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ + {} +}; + static unsigned long smp_changes_mask; static struct mtrr_state mtrr_state = {}; /* Get the MSR pair relating to a var range */ -static void __init +static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); } -static void __init +static void get_fixed_ranges(mtrr_type * frs) { unsigned int *p = (unsigned int *) frs; @@ -41,6 +54,12 @@ get_fixed_ranges(mtrr_type * frs) rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); for (i = 0; i < 8; i++) rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); +} + +void mtrr_save_fixed_ranges(void *info) +{ + if (cpu_has_mtrr) + get_fixed_ranges(mtrr_state.fixed_ranges); } /* Grab all of the MTRR state for this CPU into *state */ @@ -58,9 +77,13 @@ void __init get_mtrr_state(void) } vrs = mtrr_state.var_ranges; + rdmsr(MTRRcap_MSR, lo, dummy); + mtrr_state.have_fixed = (lo >> 8) & 1; + for (i = 0; i < num_var_ranges; i++) get_mtrr_var_range(i, &vrs[i]); - get_fixed_ranges(mtrr_state.fixed_ranges); + if (mtrr_state.have_fixed) + get_fixed_ranges(mtrr_state.fixed_ranges); rdmsr(MTRRdefType_MSR, lo, dummy); mtrr_state.def_type = (lo & 0xff); @@ -95,7 +118,45 @@ void mtrr_wrmsr(unsigned msr, unsigned a smp_processor_id(), msr, a, b); } -int generic_get_free_region(unsigned long base, unsigned long size) +/** + * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs + * see AMD publication no. 24593, chapter 3.2.1 for more information + */ +static inline void k8_enable_fixed_iorrs(void) +{ + unsigned lo, hi; + + rdmsr(MSR_K8_SYSCFG, lo, hi); + mtrr_wrmsr(MSR_K8_SYSCFG, lo + | K8_MTRRFIXRANGE_DRAM_ENABLE + | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); +} + +/** + * Checks and updates an fixed-range MTRR if it differs from the value it + * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. + * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information + * \param msr MSR address of the MTTR which should be checked and updated + * \param changed pointer which indicates whether the MTRR needed to be changed + * \param msrwords pointer to the MSR values which the MSR should have + */ +static void set_fixed_range(int msr, int * changed, unsigned int * msrwords) +{ + unsigned lo, hi; + + rdmsr(msr, lo, hi); + + if (lo != msrwords[0] || hi != msrwords[1]) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 == 15 && + ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) + k8_enable_fixed_iorrs(); + mtrr_wrmsr(msr, msrwords[0], msrwords[1]); + *changed = TRUE; + } +} + +int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) /* [SUMMARY] Get a free MTRR. <base> The starting (base) address of the region. <size> The size (in bytes) of the region. @@ -104,10 +165,11 @@ int generic_get_free_region(unsigned lon { int i, max; mtrr_type ltype; - unsigned long lbase; - unsigned lsize; + unsigned long lbase, lsize; max = num_var_ranges; + if (replace_reg >= 0 && replace_reg < max) + return replace_reg; for (i = 0; i < max; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); if (lsize == 0) @@ -117,7 +179,7 @@ int generic_get_free_region(unsigned lon } static void generic_get_mtrr(unsigned int reg, unsigned long *base, - unsigned int *size, mtrr_type * type) + unsigned long *size, mtrr_type *type) { unsigned int mask_lo, mask_hi, base_lo, base_hi; @@ -143,36 +205,21 @@ static void generic_get_mtrr(unsigned in *type = base_lo & 0xff; } +/** + * Checks and updates the fixed-range MTRRs if they differ from the saved set + * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges() + */ static int set_fixed_ranges(mtrr_type * frs) { - unsigned int *p = (unsigned int *) frs; + unsigned long long *saved = (unsigned long long *) frs; int changed = FALSE; - int i; - unsigned int lo, hi; - - rdmsr(MTRRfix64K_00000_MSR, lo, hi); - if (p[0] != lo || p[1] != hi) { - mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]); - changed = TRUE; - } - - for (i = 0; i < 2; i++) { - rdmsr(MTRRfix16K_80000_MSR + i, lo, hi); - if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) { - mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], - p[3 + i * 2]); - changed = TRUE; - } - } - - for (i = 0; i < 8; i++) { - rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi); - if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) { - mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], - p[7 + i * 2]); - changed = TRUE; - } - } + int block=-1, range; + + while (fixed_range_blocks[++block].ranges) + for (range=0; range < fixed_range_blocks[block].ranges; range++) + set_fixed_range(fixed_range_blocks[block].base_msr + range, + &changed, (unsigned int *) saved++); + return changed; } @@ -202,7 +249,9 @@ static int set_mtrr_var_ranges(unsigned return changed; } -static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi) +static u32 deftype_lo, deftype_hi; + +static unsigned long set_mtrr_state(void) /* [SUMMARY] Set the MTRR state for this CPU. <state> The MTRR state information to read. <ctxt> Some relevant CPU context. @@ -217,14 +266,14 @@ static unsigned long set_mtrr_state(u32 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) change_mask |= MTRR_CHANGE_MASK_VARIABLE; - if (set_fixed_ranges(mtrr_state.fixed_ranges)) + if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) change_mask |= MTRR_CHANGE_MASK_FIXED; /* Set_mtrr_restore restores the old value of MTRRdefType, so to set it we fiddle with the saved value */ if ((deftype_lo & 0xff) != mtrr_state.def_type || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { - deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10); + deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); change_mask |= MTRR_CHANGE_MASK_DEFTYPE; } @@ -233,7 +282,6 @@ static unsigned long set_mtrr_state(u32 static unsigned long cr4 = 0; -static u32 deftype_lo, deftype_hi; static DEFINE_SPINLOCK(set_atomicity_lock); /* @@ -271,7 +319,7 @@ static void prepare_set(void) rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); } static void post_set(void) @@ -300,7 +348,7 @@ static void generic_set_all(void) prepare_set(); /* Actually set the state */ - mask = set_mtrr_state(deftype_lo,deftype_hi); + mask = set_mtrr_state(); post_set(); local_irq_restore(flags); @@ -366,7 +414,7 @@ int generic_validate_add_page(unsigned l printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; } - if (!(base + size < 0x70000000 || base > 0x7003FFFF) && + if (!(base + size < 0x70000 || base > 0x7003F) && (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK)) { printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); @@ -374,11 +422,6 @@ int generic_validate_add_page(unsigned l } } - if (base + size < 0x100) { - printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n", - base, size); - return -EINVAL; - } /* Check upper bits of base and last are equal and lower bits are 0 for base and 1 for last */ last = base + size - 1; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/mtrr/main.c --- a/xen/arch/x86/cpu/mtrr/main.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/mtrr/main.c Thu Oct 11 12:11:54 2007 +0100 @@ -43,19 +43,21 @@ #include "mtrr.h" /* No blocking mutexes in Xen. Spin instead. */ -#define DECLARE_MUTEX(_m) DEFINE_SPINLOCK(_m) -#define down(_m) spin_lock(_m) -#define up(_m) spin_unlock(_m) +#define DEFINE_MUTEX(_m) DEFINE_SPINLOCK(_m) +#define mutex_lock(_m) spin_lock(_m) +#define mutex_unlock(_m) spin_unlock(_m) #define lock_cpu_hotplug() ((void)0) #define unlock_cpu_hotplug() ((void)0) #define dump_stack() ((void)0) +#define get_cpu() smp_processor_id() +#define put_cpu() do {} while(0) u32 num_var_ranges = 0; unsigned int *usage_table; -static DECLARE_MUTEX(mtrr_sem); - -u32 size_or_mask, size_and_mask; +static DEFINE_MUTEX(mtrr_mutex); + +u64 size_or_mask, size_and_mask; static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; @@ -70,7 +72,7 @@ extern int arr3_protected; #define arr3_protected 0 #endif -static char *mtrr_strings[MTRR_NUM_TYPES] = +static const char *mtrr_strings[MTRR_NUM_TYPES] = { "uncachable", /* 0 */ "write-combining", /* 1 */ @@ -81,7 +83,7 @@ static char *mtrr_strings[MTRR_NUM_TYPES "write-back", /* 6 */ }; -char *mtrr_attrib_to_str(int x) +const char *mtrr_attrib_to_str(int x) { return (x <= 6) ? mtrr_strings[x] : "?"; } @@ -166,6 +168,13 @@ static void ipi_handler(void *info) } #endif + +static inline int types_compatible(mtrr_type type1, mtrr_type type2) { + return type1 == MTRR_TYPE_UNCACHABLE || + type2 == MTRR_TYPE_UNCACHABLE || + (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || + (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); +} /** * set_mtrr - update mtrrs on all processors @@ -217,6 +226,8 @@ static void set_mtrr(unsigned int reg, u data.smp_size = size; data.smp_type = type; atomic_set(&data.count, num_booting_cpus() - 1); + /* make sure data.count is visible before unleashing other CPUs */ + smp_wmb(); atomic_set(&data.gate,0); /* Start the ball rolling on other CPUs */ @@ -230,6 +241,7 @@ static void set_mtrr(unsigned int reg, u /* ok, reset count and toggle gate */ atomic_set(&data.count, num_booting_cpus() - 1); + smp_wmb(); atomic_set(&data.gate,1); /* do our MTRR business */ @@ -248,6 +260,7 @@ static void set_mtrr(unsigned int reg, u cpu_relax(); atomic_set(&data.count, num_booting_cpus() - 1); + smp_wmb(); atomic_set(&data.gate,0); /* @@ -262,8 +275,8 @@ static void set_mtrr(unsigned int reg, u /** * mtrr_add_page - Add a memory type region - * @base: Physical base address of region in pages (4 KB) - * @size: Physical size of region in pages (4 KB) + * @base: Physical base address of region in pages (in units of 4 kB!) + * @size: Physical size of region in pages (4 kB) * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * @@ -299,11 +312,9 @@ int mtrr_add_page(unsigned long base, un int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, char increment) { - int i; + int i, replace, error; mtrr_type ltype; - unsigned long lbase; - unsigned int lsize; - int error; + unsigned long lbase, lsize; if (!mtrr_if) return -ENXIO; @@ -323,34 +334,47 @@ int mtrr_add_page(unsigned long base, un return -ENOSYS; } + if (!size) { + printk(KERN_WARNING "mtrr: zero sized request\n"); + return -EINVAL; + } + if (base & size_or_mask || size & size_or_mask) { printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); return -EINVAL; } error = -EINVAL; + replace = -1; /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); /* Search for existing MTRR */ - down(&mtrr_sem); + mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); - if (base >= lbase + lsize) - continue; - if ((base < lbase) && (base + size <= lbase)) + if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) continue; /* At this point we know there is some kind of overlap/enclosure */ - if ((base < lbase) || (base + size > lbase + lsize)) { + if (base < lbase || base + size - 1 > lbase + lsize - 1) { + if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { + /* New region encloses an existing region */ + if (type == ltype) { + replace = replace == -1 ? i : -2; + continue; + } + else if (types_compatible(type, ltype)) + continue; + } printk(KERN_WARNING "mtrr: 0x%lx000,0x%lx000 overlaps existing" - " 0x%lx000,0x%x000\n", base, size, lbase, + " 0x%lx000,0x%lx000\n", base, size, lbase, lsize); goto out; } /* New region is enclosed by an existing region */ if (ltype != type) { - if (type == MTRR_TYPE_UNCACHABLE) + if (types_compatible(type, ltype)) continue; printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", base, size, mtrr_attrib_to_str(ltype), @@ -363,15 +387,23 @@ int mtrr_add_page(unsigned long base, un goto out; } /* Search for an empty MTRR */ - i = mtrr_if->get_free_region(base, size); + i = mtrr_if->get_free_region(base, size, replace); if (i >= 0) { set_mtrr(i, base, size, type); - usage_table[i] = 1; + if (likely(replace < 0)) + usage_table[i] = 1; + else { + usage_table[i] = usage_table[replace] + !!increment; + if (unlikely(replace != i)) { + set_mtrr(replace, 0, 0, 0); + usage_table[replace] = 0; + } + } } else printk(KERN_INFO "mtrr: no more MTRRs available\n"); error = i; out: - up(&mtrr_sem); + mutex_unlock(&mtrr_mutex); unlock_cpu_hotplug(); return error; } @@ -454,8 +486,7 @@ int mtrr_del_page(int reg, unsigned long { int i, max; mtrr_type ltype; - unsigned long lbase; - unsigned int lsize; + unsigned long lbase, lsize; int error = -EINVAL; if (!mtrr_if) @@ -464,7 +495,7 @@ int mtrr_del_page(int reg, unsigned long max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); - down(&mtrr_sem); + mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { @@ -503,7 +534,7 @@ int mtrr_del_page(int reg, unsigned long set_mtrr(reg, 0, 0, 0); error = reg; out: - up(&mtrr_sem); + mutex_unlock(&mtrr_mutex); unlock_cpu_hotplug(); return error; } @@ -554,7 +585,7 @@ struct mtrr_value { struct mtrr_value { mtrr_type ltype; unsigned long lbase; - unsigned int lsize; + unsigned long lsize; }; /** @@ -587,8 +618,8 @@ void __init mtrr_bp_init(void) boot_cpu_data.x86_mask == 0x4)) phys_addr = 36; - size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1); - size_and_mask = ~size_or_mask & 0xfff00000; + size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1); + size_and_mask = ~size_or_mask & 0xfffff00000ULL; } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && boot_cpu_data.x86 == 6) { /* VIA C* family have Intel style MTRRs, but @@ -635,7 +666,7 @@ void mtrr_ap_init(void) if (!mtrr_if || !use_intel()) return; /* - * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, + * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, * but this routine will be called in cpu boot time, holding the lock * breaks it. This routine is called in two cases: 1.very earily time * of software resume, when there absolutely isn't mtrr entry changes; @@ -649,6 +680,20 @@ void mtrr_ap_init(void) local_irq_restore(flags); } +/** + * Save current fixed-range MTRR state of the BSP + */ +void mtrr_save_state(void) +{ + int cpu = get_cpu(); + + if (cpu == 0) + mtrr_save_fixed_ranges(NULL); + else + on_selected_cpus(cpumask_of_cpu(0), mtrr_save_fixed_ranges, NULL, 1, 1); + put_cpu(); +} + static int __init mtrr_init_finialize(void) { if (!mtrr_if) diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/cpu/mtrr/mtrr.h --- a/xen/arch/x86/cpu/mtrr/mtrr.h Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/cpu/mtrr/mtrr.h Thu Oct 11 12:11:54 2007 +0100 @@ -43,15 +43,16 @@ struct mtrr_ops { void (*set_all)(void); void (*get)(unsigned int reg, unsigned long *base, - unsigned int *size, mtrr_type * type); - int (*get_free_region) (unsigned long base, unsigned long size); - + unsigned long *size, mtrr_type * type); + int (*get_free_region)(unsigned long base, unsigned long size, + int replace_reg); int (*validate_add_page)(unsigned long base, unsigned long size, unsigned int type); int (*have_wrcomb)(void); }; -extern int generic_get_free_region(unsigned long base, unsigned long size); +extern int generic_get_free_region(unsigned long base, unsigned long size, + int replace_reg); extern int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type); @@ -62,17 +63,17 @@ extern int positive_have_wrcomb(void); /* library functions for processor-specific routines */ struct set_mtrr_context { unsigned long flags; - unsigned long deftype_lo; - unsigned long deftype_hi; unsigned long cr4val; - unsigned long ccr3; + u32 deftype_lo; + u32 deftype_hi; + u32 ccr3; }; struct mtrr_var_range { - unsigned long base_lo; - unsigned long base_hi; - unsigned long mask_lo; - unsigned long mask_hi; + u32 base_lo; + u32 base_hi; + u32 mask_lo; + u32 mask_hi; }; void set_mtrr_done(struct set_mtrr_context *ctxt); @@ -83,7 +84,7 @@ void get_mtrr_state(void); extern void set_mtrr_ops(struct mtrr_ops * ops); -extern u32 size_or_mask, size_and_mask; +extern u64 size_or_mask, size_and_mask; extern struct mtrr_ops * mtrr_if; #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) @@ -92,6 +93,6 @@ extern unsigned int num_var_ranges; extern unsigned int num_var_ranges; void mtrr_state_warn(void); -char *mtrr_attrib_to_str(int x); +const char *mtrr_attrib_to_str(int x); void mtrr_wrmsr(unsigned, unsigned, unsigned); diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Oct 11 12:11:54 2007 +0100 @@ -120,7 +120,7 @@ static enum handler_return long_mode_do_ return HNDL_exception_raised; break; - case MSR_K8_MC4_MISC: /* Threshold register */ + case MSR_IA32_MC4_MISC: /* Threshold register */ /* * MCA/MCE: Threshold register is reported to be locked, so we ignore * all write accesses. This behaviour matches real HW, so guests should @@ -1776,7 +1776,7 @@ static void svm_do_msr_access( if (vmcb->exitinfo1 == 0) { switch (ecx) { - case MSR_IA32_TIME_STAMP_COUNTER: + case MSR_IA32_TSC: msr_content = hvm_get_guest_time(v); break; @@ -1788,7 +1788,7 @@ static void svm_do_msr_access( msr_content = v->arch.hvm_vcpu.guest_efer; break; - case MSR_K8_MC4_MISC: /* Threshold register */ + case MSR_IA32_MC4_MISC: /* Threshold register */ /* * MCA/MCE: We report that the threshold register is unavailable * for OS use (locked by the BIOS). @@ -1812,11 +1812,11 @@ static void svm_do_msr_access( case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_STATUS: - case MSR_K8_MC1_STATUS: - case MSR_K8_MC2_STATUS: - case MSR_K8_MC3_STATUS: - case MSR_K8_MC4_STATUS: - case MSR_K8_MC5_STATUS: + case MSR_IA32_MC1_STATUS: + case MSR_IA32_MC2_STATUS: + case MSR_IA32_MC3_STATUS: + case MSR_IA32_MC4_STATUS: + case MSR_IA32_MC5_STATUS: /* No point in letting the guest see real MCEs */ msr_content = 0; break; @@ -1850,7 +1850,7 @@ static void svm_do_msr_access( switch (ecx) { - case MSR_IA32_TIME_STAMP_COUNTER: + case MSR_IA32_TSC: hvm_set_guest_time(v, msr_content); pt_reset(v); break; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Oct 11 12:11:54 2007 +0100 @@ -2247,7 +2247,7 @@ static int vmx_do_msr_read(struct cpu_us switch ( ecx ) { - case MSR_IA32_TIME_STAMP_COUNTER: + case MSR_IA32_TSC: msr_content = hvm_get_guest_time(v); break; case MSR_IA32_SYSENTER_CS: @@ -2267,11 +2267,11 @@ static int vmx_do_msr_read(struct cpu_us case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_STATUS: - case MSR_K8_MC1_STATUS: - case MSR_K8_MC2_STATUS: - case MSR_K8_MC3_STATUS: - case MSR_K8_MC4_STATUS: - case MSR_K8_MC5_STATUS: + case MSR_IA32_MC1_STATUS: + case MSR_IA32_MC2_STATUS: + case MSR_IA32_MC3_STATUS: + case MSR_IA32_MC4_STATUS: + case MSR_IA32_MC5_STATUS: /* No point in letting the guest see real MCEs */ msr_content = 0; break; @@ -2387,7 +2387,7 @@ static int vmx_do_msr_write(struct cpu_u switch ( ecx ) { - case MSR_IA32_TIME_STAMP_COUNTER: + case MSR_IA32_TSC: hvm_set_guest_time(v, msr_content); pt_reset(v); break; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/nmi.c --- a/xen/arch/x86/nmi.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/nmi.c Thu Oct 11 12:11:54 2007 +0100 @@ -314,9 +314,14 @@ void __pminit setup_apic_nmi_watchdog(vo switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: - if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) + switch (boot_cpu_data.x86) { + case 6: + case 15 ... 17: + setup_k7_watchdog(); + break; + default: return; - setup_k7_watchdog(); + } break; case X86_VENDOR_INTEL: switch (boot_cpu_data.x86) { diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/oprofile/nmi_int.c Thu Oct 11 12:11:54 2007 +0100 @@ -348,6 +348,14 @@ static int __init nmi_init(void) give user space an consistent name. */ cpu_type = "x86-64/hammer"; break; + case 0x10: + model = &op_athlon_spec; + cpu_type = "x86-64/family10"; + break; + case 0x11: + model = &op_athlon_spec; + cpu_type = "x86-64/family11"; + break; } break; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/oprofile/op_model_athlon.c --- a/xen/arch/x86/oprofile/op_model_athlon.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/oprofile/op_model_athlon.c Thu Oct 11 12:11:54 2007 +0100 @@ -34,12 +34,15 @@ #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) -#define CTRL_CLEAR(x) (x &= (1<<21)) +#define CTRL_CLEAR(lo, hi) (lo &= (1<<21), hi = 0) #define CTRL_SET_ENABLE(val) (val |= 1<<20) #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) -#define CTRL_SET_UM(val, m) (val |= (m << 8)) -#define CTRL_SET_EVENT(val, e) (val |= e) +#define CTRL_SET_UM(val, m) (val |= ((m & 0xff) << 8)) +#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) +#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) +#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) +#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; @@ -72,7 +75,7 @@ static void athlon_setup_ctrs(struct op_ /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low); + CTRL_CLEAR(low, high); CTRL_WRITE(low, high, msrs, i); } @@ -89,12 +92,15 @@ static void athlon_setup_ctrs(struct op_ CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low); + CTRL_CLEAR(low, high); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, counter_config[i].user); CTRL_SET_KERN(low, counter_config[i].kernel); CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT(low, counter_config[i].event); + CTRL_SET_EVENT_LOW(low, counter_config[i].event); + CTRL_SET_EVENT_HIGH(high, counter_config[i].event); + CTRL_SET_HOST_ONLY(high, 0); + CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); } else { reset_value[i] = 0; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/platform_hypercall.c --- a/xen/arch/x86/platform_hypercall.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/platform_hypercall.c Thu Oct 11 12:11:54 2007 +0100 @@ -121,8 +121,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe case XENPF_read_memtype: { - unsigned long mfn; - unsigned int nr_mfns; + unsigned long mfn, nr_mfns; mtrr_type type; ret = xsm_memtype(op->cmd); diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/smpboot.c Thu Oct 11 12:11:54 2007 +0100 @@ -50,6 +50,7 @@ #include <asm/div64.h> #include <asm/flushtlb.h> #include <asm/msr.h> +#include <asm/mtrr.h> #include <mach_apic.h> #include <mach_wakecpu.h> #include <smpboot_hooks.h> @@ -820,6 +821,12 @@ static int __devinit do_boot_cpu(int api unsigned short nmi_high = 0, nmi_low = 0; struct vcpu *v; + /* + * Save current MTRR state in case it was changed since early boot + * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: + */ + mtrr_save_state(); + ++cpucount; booting_cpu = cpu; diff -r 49323c8b8633 -r 0d7d6804af22 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/arch/x86/traps.c Thu Oct 11 12:11:54 2007 +0100 @@ -1728,8 +1728,8 @@ static int emulate_privileged_op(struct v->arch.guest_context.gs_base_user = res; break; #endif - case MSR_K8_FIDVID_STATUS: - case MSR_K8_FIDVID_CTL: + case MSR_K7_FID_VID_STATUS: + case MSR_K7_FID_VID_CTL: if ( (cpufreq_controller != FREQCTL_dom0_kernel) || (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) || wrmsr_safe(regs->ecx, eax, edx) ) @@ -1770,8 +1770,8 @@ static int emulate_privileged_op(struct regs->edx = v->arch.guest_context.gs_base_user >> 32; break; #endif - case MSR_K8_FIDVID_CTL: - case MSR_K8_FIDVID_STATUS: + case MSR_K7_FID_VID_CTL: + case MSR_K7_FID_VID_STATUS: if ( (cpufreq_controller != FREQCTL_dom0_kernel) || (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) || rdmsr_safe(regs->ecx, regs->eax, regs->edx) ) diff -r 49323c8b8633 -r 0d7d6804af22 xen/include/asm-x86/msr-index.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-x86/msr-index.h Thu Oct 11 12:11:54 2007 +0100 @@ -0,0 +1,334 @@ +#ifndef __ASM_MSR_INDEX_H +#define __ASM_MSR_INDEX_H + +/* CPU model specific register (MSR) numbers */ + +/* x86-64 specific MSRs */ +#define MSR_EFER 0xc0000080 /* extended feature register */ +#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ +#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ +#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ +#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ +#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ + +/* EFER bits: */ +#define _EFER_SCE 0 /* SYSCALL/SYSRET */ +#define _EFER_LME 8 /* Long mode enable */ +#define _EFER_LMA 10 /* Long mode active (read-only) */ +#define _EFER_NX 11 /* No execute enable */ +#define _EFER_SVME 12 + +#define EFER_SCE (1<<_EFER_SCE) +#define EFER_LME (1<<_EFER_LME) +#define EFER_LMA (1<<_EFER_LMA) +#define EFER_NX (1<<_EFER_NX) +#define EFER_SVME (1<<_EFER_SVME) + +/* Intel MSRs. Some also available on other CPUs */ +#define MSR_IA32_PERFCTR0 0x000000c1 +#define MSR_IA32_PERFCTR1 0x000000c2 +#define MSR_FSB_FREQ 0x000000cd + +#define MSR_MTRRcap 0x000000fe +#define MSR_IA32_BBL_CR_CTL 0x00000119 + +#define MSR_IA32_SYSENTER_CS 0x00000174 +#define MSR_IA32_SYSENTER_ESP 0x00000175 +#define MSR_IA32_SYSENTER_EIP 0x00000176 + +#define MSR_IA32_MCG_CAP 0x00000179 +#define MSR_IA32_MCG_STATUS 0x0000017a +#define MSR_IA32_MCG_CTL 0x0000017b + +#define MSR_IA32_PEBS_ENABLE 0x000003f1 +#define MSR_IA32_DS_AREA 0x00000600 +#define MSR_IA32_PERF_CAPABILITIES 0x00000345 + +#define MSR_MTRRfix64K_00000 0x00000250 +#define MSR_MTRRfix16K_80000 0x00000258 +#define MSR_MTRRfix16K_A0000 0x00000259 +#define MSR_MTRRfix4K_C0000 0x00000268 +#define MSR_MTRRfix4K_C8000 0x00000269 +#define MSR_MTRRfix4K_D0000 0x0000026a +#define MSR_MTRRfix4K_D8000 0x0000026b +#define MSR_MTRRfix4K_E0000 0x0000026c +#define MSR_MTRRfix4K_E8000 0x0000026d +#define MSR_MTRRfix4K_F0000 0x0000026e +#define MSR_MTRRfix4K_F8000 0x0000026f +#define MSR_MTRRdefType 0x000002ff + +#define MSR_IA32_DEBUGCTLMSR 0x000001d9 +#define MSR_IA32_LASTBRANCHFROMIP 0x000001db +#define MSR_IA32_LASTBRANCHTOIP 0x000001dc +#define MSR_IA32_LASTINTFROMIP 0x000001dd +#define MSR_IA32_LASTINTTOIP 0x000001de + +#define MSR_IA32_MC0_CTL 0x00000400 +#define MSR_IA32_MC0_STATUS 0x00000401 +#define MSR_IA32_MC0_ADDR 0x00000402 +#define MSR_IA32_MC0_MISC 0x00000403 + +#define MSR_IA32_MC1_CTL 0x00000404 +#define MSR_IA32_MC1_STATUS 0x00000405 +#define MSR_IA32_MC1_ADDR 0x00000406 +#define MSR_IA32_MC1_MISC 0x00000407 + +#define MSR_IA32_MC2_CTL 0x00000408 +#define MSR_IA32_MC2_STATUS 0x00000409 +#define MSR_IA32_MC2_ADDR 0x0000040A +#define MSR_IA32_MC2_MISC 0x0000040B + +#define MSR_IA32_MC3_CTL 0x0000040C +#define MSR_IA32_MC3_STATUS 0x0000040D +#define MSR_IA32_MC3_ADDR 0x0000040E +#define MSR_IA32_MC3_MISC 0x0000040F + +#define MSR_IA32_MC4_CTL 0x00000410 +#define MSR_IA32_MC4_STATUS 0x00000411 +#define MSR_IA32_MC4_ADDR 0x00000412 +#define MSR_IA32_MC4_MISC 0x00000413 + +#define MSR_IA32_MC5_CTL 0x00000414 +#define MSR_IA32_MC5_STATUS 0x00000415 +#define MSR_IA32_MC5_ADDR 0x00000416 +#define MSR_IA32_MC5_MISC 0x00000417 + +#define MSR_P6_PERFCTR0 0x000000c1 +#define MSR_P6_PERFCTR1 0x000000c2 +#define MSR_P6_EVNTSEL0 0x00000186 +#define MSR_P6_EVNTSEL1 0x00000187 + +/* MSRs & bits used for VMX enabling */ +#define MSR_IA32_VMX_BASIC 0x480 +#define MSR_IA32_VMX_PINBASED_CTLS 0x481 +#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 +#define MSR_IA32_VMX_EXIT_CTLS 0x483 +#define MSR_IA32_VMX_ENTRY_CTLS 0x484 +#define MSR_IA32_VMX_MISC 0x485 +#define MSR_IA32_VMX_CR0_FIXED0 0x486 +#define MSR_IA32_VMX_CR0_FIXED1 0x487 +#define MSR_IA32_VMX_CR4_FIXED0 0x488 +#define MSR_IA32_VMX_CR4_FIXED1 0x489 +#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b +#define IA32_FEATURE_CONTROL_MSR 0x3a +#define IA32_FEATURE_CONTROL_MSR_LOCK 0x0001 +#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX 0x0002 +#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX 0x0004 +#define IA32_FEATURE_CONTROL_MSR_SENTER_PARAM_CTL 0x7f00 +#define IA32_FEATURE_CONTROL_MSR_ENABLE_SENTER 0x8000 + +/* K7/K8 MSRs. Not complete. See the architecture manual for a more + complete list. */ +#define MSR_K7_EVNTSEL0 0xc0010000 +#define MSR_K7_PERFCTR0 0xc0010004 +#define MSR_K7_EVNTSEL1 0xc0010001 +#define MSR_K7_PERFCTR1 0xc0010005 +#define MSR_K7_EVNTSEL2 0xc0010002 +#define MSR_K7_PERFCTR2 0xc0010006 +#define MSR_K7_EVNTSEL3 0xc0010003 +#define MSR_K7_PERFCTR3 0xc0010007 +#define MSR_K8_TOP_MEM1 0xc001001a +#define MSR_K7_CLK_CTL 0xc001001b +#define MSR_K8_TOP_MEM2 0xc001001d +#define MSR_K8_SYSCFG 0xc0010010 + +#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ +#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ +#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ + +#define MSR_K7_HWCR 0xc0010015 +#define MSR_K8_HWCR 0xc0010015 +#define MSR_K7_FID_VID_CTL 0xc0010041 +#define MSR_K7_FID_VID_STATUS 0xc0010042 +#define MSR_K8_ENABLE_C1E 0xc0010055 +#define MSR_K8_VM_CR 0xC0010114 +#define MSR_K8_VM_HSAVE_PA 0xC0010117 + +/* MSR_K8_VM_CR bits: */ +#define _K8_VMCR_SVME_DISABLE 4 +#define K8_VMCR_SVME_DISABLE (1 << _K8_VMCR_SVME_DISABLE) + +/* K6 MSRs */ +#define MSR_K6_EFER 0xc0000080 +#define MSR_K6_STAR 0xc0000081 +#define MSR_K6_WHCR 0xc0000082 +#define MSR_K6_UWCCR 0xc0000085 +#define MSR_K6_EPMR 0xc0000086 +#define MSR_K6_PSOR 0xc0000087 +#define MSR_K6_PFIR 0xc0000088 + +/* Centaur-Hauls/IDT defined MSRs. */ +#define MSR_IDT_FCR1 0x00000107 +#define MSR_IDT_FCR2 0x00000108 +#define MSR_IDT_FCR3 0x00000109 +#define MSR_IDT_FCR4 0x0000010a + +#define MSR_IDT_MCR0 0x00000110 +#define MSR_IDT_MCR1 0x00000111 +#define MSR_IDT_MCR2 0x00000112 +#define MSR_IDT_MCR3 0x00000113 +#define MSR_IDT_MCR4 0x00000114 +#define MSR_IDT_MCR5 0x00000115 +#define MSR_IDT_MCR6 0x00000116 +#define MSR_IDT_MCR7 0x00000117 +#define MSR_IDT_MCR_CTRL 0x00000120 + +/* VIA Cyrix defined MSRs*/ +#define MSR_VIA_FCR 0x00001107 +#define MSR_VIA_LONGHAUL 0x0000110a +#define MSR_VIA_RNG 0x0000110b +#define MSR_VIA_BCR2 0x00001147 + +/* Transmeta defined MSRs */ +#define MSR_TMTA_LONGRUN_CTRL 0x80868010 +#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 +#define MSR_TMTA_LRTI_READOUT 0x80868018 +#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a + +/* Intel defined MSRs. */ +#define MSR_IA32_P5_MC_ADDR 0x00000000 +#define MSR_IA32_P5_MC_TYPE 0x00000001 +#define MSR_IA32_TSC 0x00000010 +#define MSR_IA32_PLATFORM_ID 0x00000017 +#define MSR_IA32_EBL_CR_POWERON 0x0000002a +#define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c + +#define MSR_IA32_APICBASE 0x0000001b +#define MSR_IA32_APICBASE_BSP (1<<8) +#define MSR_IA32_APICBASE_ENABLE (1<<11) +#define MSR_IA32_APICBASE_BASE (0xfffff<<12) + +#define MSR_IA32_UCODE_WRITE 0x00000079 +#define MSR_IA32_UCODE_REV 0x0000008b + +#define MSR_IA32_PERF_STATUS 0x00000198 +#define MSR_IA32_PERF_CTL 0x00000199 + +#define MSR_IA32_MPERF 0x000000e7 +#define MSR_IA32_APERF 0x000000e8 + +#define MSR_IA32_THERM_CONTROL 0x0000019a +#define MSR_IA32_THERM_INTERRUPT 0x0000019b +#define MSR_IA32_THERM_STATUS 0x0000019c +#define MSR_IA32_MISC_ENABLE 0x000001a0 +#define MSR_IA32_MISC_ENABLE_PERF_AVAIL (1<<7) +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1<<11) +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1<<12) + +/* Intel Model 6 */ +#define MSR_P6_EVNTSEL0 0x00000186 +#define MSR_P6_EVNTSEL1 0x00000187 + +/* P4/Xeon+ specific */ +#define MSR_IA32_MCG_EAX 0x00000180 +#define MSR_IA32_MCG_EBX 0x00000181 +#define MSR_IA32_MCG_ECX 0x00000182 +#define MSR_IA32_MCG_EDX 0x00000183 +#define MSR_IA32_MCG_ESI 0x00000184 +#define MSR_IA32_MCG_EDI 0x00000185 +#define MSR_IA32_MCG_EBP 0x00000186 +#define MSR_IA32_MCG_ESP 0x00000187 +#define MSR_IA32_MCG_EFLAGS 0x00000188 +#define MSR_IA32_MCG_EIP 0x00000189 +#define MSR_IA32_MCG_RESERVED 0x0000018a + +/* Pentium IV performance counter MSRs */ +#define MSR_P4_BPU_PERFCTR0 0x00000300 +#define MSR_P4_BPU_PERFCTR1 0x00000301 +#define MSR_P4_BPU_PERFCTR2 0x00000302 +#define MSR_P4_BPU_PERFCTR3 0x00000303 +#define MSR_P4_MS_PERFCTR0 0x00000304 +#define MSR_P4_MS_PERFCTR1 0x00000305 +#define MSR_P4_MS_PERFCTR2 0x00000306 +#define MSR_P4_MS_PERFCTR3 0x00000307 +#define MSR_P4_FLAME_PERFCTR0 0x00000308 +#define MSR_P4_FLAME_PERFCTR1 0x00000309 +#define MSR_P4_FLAME_PERFCTR2 0x0000030a +#define MSR_P4_FLAME_PERFCTR3 0x0000030b +#define MSR_P4_IQ_PERFCTR0 0x0000030c +#define MSR_P4_IQ_PERFCTR1 0x0000030d +#define MSR_P4_IQ_PERFCTR2 0x0000030e +#define MSR_P4_IQ_PERFCTR3 0x0000030f +#define MSR_P4_IQ_PERFCTR4 0x00000310 +#define MSR_P4_IQ_PERFCTR5 0x00000311 +#define MSR_P4_BPU_CCCR0 0x00000360 +#define MSR_P4_BPU_CCCR1 0x00000361 +#define MSR_P4_BPU_CCCR2 0x00000362 +#define MSR_P4_BPU_CCCR3 0x00000363 +#define MSR_P4_MS_CCCR0 0x00000364 +#define MSR_P4_MS_CCCR1 0x00000365 +#define MSR_P4_MS_CCCR2 0x00000366 +#define MSR_P4_MS_CCCR3 0x00000367 +#define MSR_P4_FLAME_CCCR0 0x00000368 +#define MSR_P4_FLAME_CCCR1 0x00000369 +#define MSR_P4_FLAME_CCCR2 0x0000036a +#define MSR_P4_FLAME_CCCR3 0x0000036b +#define MSR_P4_IQ_CCCR0 0x0000036c +#define MSR_P4_IQ_CCCR1 0x0000036d +#define MSR_P4_IQ_CCCR2 0x0000036e +#define MSR_P4_IQ_CCCR3 0x0000036f +#define MSR_P4_IQ_CCCR4 0x00000370 +#define MSR_P4_IQ_CCCR5 0x00000371 +#define MSR_P4_ALF_ESCR0 0x000003ca +#define MSR_P4_ALF_ESCR1 0x000003cb +#define MSR_P4_BPU_ESCR0 0x000003b2 +#define MSR_P4_BPU_ESCR1 0x000003b3 +#define MSR_P4_BSU_ESCR0 0x000003a0 +#define MSR_P4_BSU_ESCR1 0x000003a1 +#define MSR_P4_CRU_ESCR0 0x000003b8 +#define MSR_P4_CRU_ESCR1 0x000003b9 +#define MSR_P4_CRU_ESCR2 0x000003cc +#define MSR_P4_CRU_ESCR3 0x000003cd +#define MSR_P4_CRU_ESCR4 0x000003e0 +#define MSR_P4_CRU_ESCR5 0x000003e1 +#define MSR_P4_DAC_ESCR0 0x000003a8 +#define MSR_P4_DAC_ESCR1 0x000003a9 +#define MSR_P4_FIRM_ESCR0 0x000003a4 +#define MSR_P4_FIRM_ESCR1 0x000003a5 +#define MSR_P4_FLAME_ESCR0 0x000003a6 +#define MSR_P4_FLAME_ESCR1 0x000003a7 +#define MSR_P4_FSB_ESCR0 0x000003a2 +#define MSR_P4_FSB_ESCR1 0x000003a3 +#define MSR_P4_IQ_ESCR0 0x000003ba +#define MSR_P4_IQ_ESCR1 0x000003bb +#define MSR_P4_IS_ESCR0 0x000003b4 +#define MSR_P4_IS_ESCR1 0x000003b5 +#define MSR_P4_ITLB_ESCR0 0x000003b6 +#define MSR_P4_ITLB_ESCR1 0x000003b7 +#define MSR_P4_IX_ESCR0 0x000003c8 +#define MSR_P4_IX_ESCR1 0x000003c9 +#define MSR_P4_MOB_ESCR0 0x000003aa +#define MSR_P4_MOB_ESCR1 0x000003ab +#define MSR_P4_MS_ESCR0 0x000003c0 +#define MSR_P4_MS_ESCR1 0x000003c1 +#define MSR_P4_PMH_ESCR0 0x000003ac +#define MSR_P4_PMH_ESCR1 0x000003ad +#define MSR_P4_RAT_ESCR0 0x000003bc +#define MSR_P4_RAT_ESCR1 0x000003bd +#define MSR_P4_SAAT_ESCR0 0x000003ae +#define MSR_P4_SAAT_ESCR1 0x000003af +#define MSR_P4_SSU_ESCR0 0x000003be +#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ + +#define MSR_P4_TBPU_ESCR0 0x000003c2 +#define MSR_P4_TBPU_ESCR1 0x000003c3 +#define MSR_P4_TC_ESCR0 0x000003c4 +#define MSR_P4_TC_ESCR1 0x000003c5 +#define MSR_P4_U2L_ESCR0 0x000003b0 +#define MSR_P4_U2L_ESCR1 0x000003b1 + +/* Intel Core-based CPU performance counters */ +#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 +#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a +#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d +#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e +#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 + +/* Geode defined MSRs */ +#define MSR_GEODE_BUSCONT_CONF0 0x00001900 + +#endif /* __ASM_MSR_INDEX_H */ diff -r 49323c8b8633 -r 0d7d6804af22 xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/include/asm-x86/msr.h Thu Oct 11 12:11:54 2007 +0100 @@ -1,5 +1,7 @@ #ifndef __ASM_MSR_H #define __ASM_MSR_H + +#include "msr-index.h" #ifndef __ASSEMBLY__ @@ -87,70 +89,6 @@ static inline void wrmsrl(unsigned int m : "=a" (low), "=d" (high) \ : "c" (counter)) -#endif /* !__ASSEMBLY__ */ - -/* symbolic names for some interesting MSRs */ -/* Intel defined MSRs. */ -#define MSR_IA32_P5_MC_ADDR 0 -#define MSR_IA32_P5_MC_TYPE 1 -#define MSR_IA32_TIME_STAMP_COUNTER 0x10 -#define MSR_IA32_PLATFORM_ID 0x17 -#define MSR_IA32_EBL_CR_POWERON 0x2a -#define MSR_IA32_EBC_FREQUENCY_ID 0x2c - -#define MSR_IA32_APICBASE 0x1b -#define MSR_IA32_APICBASE_BSP (1<<8) -#define MSR_IA32_APICBASE_ENABLE (1<<11) -#define MSR_IA32_APICBASE_BASE (0xfffff<<12) - -#define MSR_IA32_UCODE_WRITE 0x79 -#define MSR_IA32_UCODE_REV 0x8b - -#define MSR_P6_PERFCTR0 0xc1 -#define MSR_P6_PERFCTR1 0xc2 - -/* MSRs & bits used for VMX enabling */ -#define MSR_IA32_VMX_BASIC 0x480 -#define MSR_IA32_VMX_PINBASED_CTLS 0x481 -#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 -#define MSR_IA32_VMX_EXIT_CTLS 0x483 -#define MSR_IA32_VMX_ENTRY_CTLS 0x484 -#define MSR_IA32_VMX_MISC 0x485 -#define MSR_IA32_VMX_CR0_FIXED0 0x486 -#define MSR_IA32_VMX_CR0_FIXED1 0x487 -#define MSR_IA32_VMX_CR4_FIXED0 0x488 -#define MSR_IA32_VMX_CR4_FIXED1 0x489 -#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b -#define IA32_FEATURE_CONTROL_MSR 0x3a -#define IA32_FEATURE_CONTROL_MSR_LOCK 0x0001 -#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX 0x0002 -#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX 0x0004 -#define IA32_FEATURE_CONTROL_MSR_SENTER_PARAM_CTL 0x7f00 -#define IA32_FEATURE_CONTROL_MSR_ENABLE_SENTER 0x8000 - -/* AMD/K8 specific MSRs */ -#define MSR_EFER 0xc0000080 /* extended feature register */ -#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ -#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ -#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ -#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ -#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ -#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ -#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ -/* EFER bits: */ -#define _EFER_SCE 0 /* SYSCALL/SYSRET */ -#define _EFER_LME 8 /* Long mode enable */ -#define _EFER_LMA 10 /* Long mode active (read-only) */ -#define _EFER_NX 11 /* No execute enable */ -#define _EFER_SVME 12 - -#define EFER_SCE (1<<_EFER_SCE) -#define EFER_LME (1<<_EFER_LME) -#define EFER_LMA (1<<_EFER_LMA) -#define EFER_NX (1<<_EFER_NX) -#define EFER_SVME (1<<_EFER_SVME) - -#ifndef __ASSEMBLY__ DECLARE_PER_CPU(__u64, efer); @@ -167,232 +105,6 @@ static inline void write_efer(__u64 val) wrmsrl(MSR_EFER, val); } -#endif - -/* Intel MSRs. Some also available on other CPUs */ -#define MSR_IA32_PLATFORM_ID 0x17 - -#define MSR_MTRRcap 0x0fe -#define MSR_IA32_BBL_CR_CTL 0x119 - -#define MSR_IA32_SYSENTER_CS 0x174 -#define MSR_IA32_SYSENTER_ESP 0x175 -#define MSR_IA32_SYSENTER_EIP 0x176 - -#define MSR_IA32_MCG_CAP 0x179 -#define MSR_IA32_MCG_STATUS 0x17a -#define MSR_IA32_MCG_CTL 0x17b - -/* P4/Xeon+ specific */ -#define MSR_IA32_MCG_EAX 0x180 -#define MSR_IA32_MCG_EBX 0x181 -#define MSR_IA32_MCG_ECX 0x182 -#define MSR_IA32_MCG_EDX 0x183 -#define MSR_IA32_MCG_ESI 0x184 -#define MSR_IA32_MCG_EDI 0x185 -#define MSR_IA32_MCG_EBP 0x186 -#define MSR_IA32_MCG_ESP 0x187 -#define MSR_IA32_MCG_EFLAGS 0x188 -#define MSR_IA32_MCG_EIP 0x189 -#define MSR_IA32_MCG_RESERVED 0x18A - -#define MSR_P6_EVNTSEL0 0x186 -#define MSR_P6_EVNTSEL1 0x187 - -#define MSR_IA32_PERF_STATUS 0x198 -#define MSR_IA32_PERF_CTL 0x199 - -#define MSR_IA32_THERM_CONTROL 0x19a -#define MSR_IA32_THERM_INTERRUPT 0x19b -#define MSR_IA32_THERM_STATUS 0x19c -#define MSR_IA32_MISC_ENABLE 0x1a0 - -#define MSR_IA32_MISC_ENABLE_PERF_AVAIL (1<<7) -#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1<<11) -#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1<<12) - -#define MSR_IA32_DEBUGCTLMSR 0x1d9 -#define MSR_IA32_LASTBRANCHFROMIP 0x1db -#define MSR_IA32_LASTBRANCHTOIP 0x1dc -#define MSR_IA32_LASTINTFROMIP 0x1dd -#define MSR_IA32_LASTINTTOIP 0x1de - -#define MSR_IA32_MC0_CTL 0x400 -#define MSR_IA32_MC0_STATUS 0x401 -#define MSR_IA32_MC0_ADDR 0x402 -#define MSR_IA32_MC0_MISC 0x403 - -/* K8 Machine Check MSRs */ -#define MSR_K8_MC1_CTL 0x404 -#define MSR_K8_MC1_STATUS 0x405 -#define MSR_K8_MC1_ADDR 0x406 -#define MSR_K8_MC1_MISC 0x407 - -#define MSR_K8_MC2_CTL 0x408 -#define MSR_K8_MC2_STATUS 0x409 -#define MSR_K8_MC2_ADDR 0x40A -#define MSR_K8_MC2_MISC 0x40B - -#define MSR_K8_MC3_CTL 0x40C -#define MSR_K8_MC3_STATUS 0x40D -#define MSR_K8_MC3_ADDR 0x40E -#define MSR_K8_MC3_MISC 0x40F - -#define MSR_K8_MC4_CTL 0x410 -#define MSR_K8_MC4_STATUS 0x411 -#define MSR_K8_MC4_ADDR 0x412 -#define MSR_K8_MC4_MISC 0x413 - -#define MSR_K8_MC5_CTL 0x414 -#define MSR_K8_MC5_STATUS 0x415 -#define MSR_K8_MC5_ADDR 0x416 -#define MSR_K8_MC5_MISC 0x417 - -/* Pentium IV performance counter MSRs */ -#define MSR_P4_BPU_PERFCTR0 0x300 -#define MSR_P4_BPU_PERFCTR1 0x301 -#define MSR_P4_BPU_PERFCTR2 0x302 -#define MSR_P4_BPU_PERFCTR3 0x303 -#define MSR_P4_MS_PERFCTR0 0x304 -#define MSR_P4_MS_PERFCTR1 0x305 -#define MSR_P4_MS_PERFCTR2 0x306 -#define MSR_P4_MS_PERFCTR3 0x307 -#define MSR_P4_FLAME_PERFCTR0 0x308 -#define MSR_P4_FLAME_PERFCTR1 0x309 -#define MSR_P4_FLAME_PERFCTR2 0x30a -#define MSR_P4_FLAME_PERFCTR3 0x30b -#define MSR_P4_IQ_PERFCTR0 0x30c -#define MSR_P4_IQ_PERFCTR1 0x30d -#define MSR_P4_IQ_PERFCTR2 0x30e -#define MSR_P4_IQ_PERFCTR3 0x30f -#define MSR_P4_IQ_PERFCTR4 0x310 -#define MSR_P4_IQ_PERFCTR5 0x311 -#define MSR_P4_BPU_CCCR0 0x360 -#define MSR_P4_BPU_CCCR1 0x361 -#define MSR_P4_BPU_CCCR2 0x362 -#define MSR_P4_BPU_CCCR3 0x363 -#define MSR_P4_MS_CCCR0 0x364 -#define MSR_P4_MS_CCCR1 0x365 -#define MSR_P4_MS_CCCR2 0x366 -#define MSR_P4_MS_CCCR3 0x367 -#define MSR_P4_FLAME_CCCR0 0x368 -#define MSR_P4_FLAME_CCCR1 0x369 -#define MSR_P4_FLAME_CCCR2 0x36a -#define MSR_P4_FLAME_CCCR3 0x36b -#define MSR_P4_IQ_CCCR0 0x36c -#define MSR_P4_IQ_CCCR1 0x36d -#define MSR_P4_IQ_CCCR2 0x36e -#define MSR_P4_IQ_CCCR3 0x36f -#define MSR_P4_IQ_CCCR4 0x370 -#define MSR_P4_IQ_CCCR5 0x371 -#define MSR_P4_ALF_ESCR0 0x3ca -#define MSR_P4_ALF_ESCR1 0x3cb -#define MSR_P4_BPU_ESCR0 0x3b2 -#define MSR_P4_BPU_ESCR1 0x3b3 -#define MSR_P4_BSU_ESCR0 0x3a0 -#define MSR_P4_BSU_ESCR1 0x3a1 -#define MSR_P4_CRU_ESCR0 0x3b8 -#define MSR_P4_CRU_ESCR1 0x3b9 -#define MSR_P4_CRU_ESCR2 0x3cc -#define MSR_P4_CRU_ESCR3 0x3cd -#define MSR_P4_CRU_ESCR4 0x3e0 -#define MSR_P4_CRU_ESCR5 0x3e1 -#define MSR_P4_DAC_ESCR0 0x3a8 -#define MSR_P4_DAC_ESCR1 0x3a9 -#define MSR_P4_FIRM_ESCR0 0x3a4 -#define MSR_P4_FIRM_ESCR1 0x3a5 -#define MSR_P4_FLAME_ESCR0 0x3a6 -#define MSR_P4_FLAME_ESCR1 0x3a7 -#define MSR_P4_FSB_ESCR0 0x3a2 -#define MSR_P4_FSB_ESCR1 0x3a3 -#define MSR_P4_IQ_ESCR0 0x3ba -#define MSR_P4_IQ_ESCR1 0x3bb -#define MSR_P4_IS_ESCR0 0x3b4 -#define MSR_P4_IS_ESCR1 0x3b5 -#define MSR_P4_ITLB_ESCR0 0x3b6 -#define MSR_P4_ITLB_ESCR1 0x3b7 -#define MSR_P4_IX_ESCR0 0x3c8 -#define MSR_P4_IX_ESCR1 0x3c9 -#define MSR_P4_MOB_ESCR0 0x3aa -#define MSR_P4_MOB_ESCR1 0x3ab -#define MSR_P4_MS_ESCR0 0x3c0 -#define MSR_P4_MS_ESCR1 0x3c1 -#define MSR_P4_PMH_ESCR0 0x3ac -#define MSR_P4_PMH_ESCR1 0x3ad -#define MSR_P4_RAT_ESCR0 0x3bc -#define MSR_P4_RAT_ESCR1 0x3bd -#define MSR_P4_SAAT_ESCR0 0x3ae -#define MSR_P4_SAAT_ESCR1 0x3af -#define MSR_P4_SSU_ESCR0 0x3be -#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ -#define MSR_P4_TBPU_ESCR0 0x3c2 -#define MSR_P4_TBPU_ESCR1 0x3c3 -#define MSR_P4_TC_ESCR0 0x3c4 -#define MSR_P4_TC_ESCR1 0x3c5 -#define MSR_P4_U2L_ESCR0 0x3b0 -#define MSR_P4_U2L_ESCR1 0x3b1 - -#define MSR_K6_EFER 0xC0000080 -#define MSR_K6_STAR 0xC0000081 -#define MSR_K6_WHCR 0xC0000082 -#define MSR_K6_UWCCR 0xC0000085 -#define MSR_K6_EPMR 0xC0000086 -#define MSR_K6_PSOR 0xC0000087 -#define MSR_K6_PFIR 0xC0000088 - -#define MSR_K7_EVNTSEL0 0xC0010000 -#define MSR_K7_EVNTSEL1 0xC0010001 -#define MSR_K7_EVNTSEL2 0xC0010002 -#define MSR_K7_EVNTSEL3 0xC0010003 -#define MSR_K7_PERFCTR0 0xC0010004 -#define MSR_K7_PERFCTR1 0xC0010005 -#define MSR_K7_PERFCTR2 0xC0010006 -#define MSR_K7_PERFCTR3 0xC0010007 -#define MSR_K7_HWCR 0xC0010015 -#define MSR_K7_CLK_CTL 0xC001001b -#define MSR_K7_FID_VID_CTL 0xC0010041 -#define MSR_K7_FID_VID_STATUS 0xC0010042 - -#define MSR_K8_TOP_MEM1 0xC001001A -#define MSR_K8_TOP_MEM2 0xC001001D -#define MSR_K8_SYSCFG 0xC0010010 -#define MSR_K8_HWCR 0xC0010015 -#define MSR_K8_VM_CR 0xC0010114 -#define MSR_K8_VM_HSAVE_PA 0xC0010117 - -#define MSR_K8_FIDVID_CTL 0xC0010041 -#define MSR_K8_FIDVID_STATUS 0xC0010042 - -/* MSR_K8_VM_CR bits: */ -#define _K8_VMCR_SVME_DISABLE 4 -#define K8_VMCR_SVME_DISABLE (1 << _K8_VMCR_SVME_DISABLE) - -/* Centaur-Hauls/IDT defined MSRs. */ -#define MSR_IDT_FCR1 0x107 -#define MSR_IDT_FCR2 0x108 -#define MSR_IDT_FCR3 0x109 -#define MSR_IDT_FCR4 0x10a - -#define MSR_IDT_MCR0 0x110 -#define MSR_IDT_MCR1 0x111 -#define MSR_IDT_MCR2 0x112 -#define MSR_IDT_MCR3 0x113 -#define MSR_IDT_MCR4 0x114 -#define MSR_IDT_MCR5 0x115 -#define MSR_IDT_MCR6 0x116 -#define MSR_IDT_MCR7 0x117 -#define MSR_IDT_MCR_CTRL 0x120 - -/* VIA Cyrix defined MSRs*/ -#define MSR_VIA_FCR 0x1107 -#define MSR_VIA_LONGHAUL 0x110a -#define MSR_VIA_RNG 0x110b -#define MSR_VIA_BCR2 0x1147 - -/* Transmeta defined MSRs */ -#define MSR_TMTA_LONGRUN_CTRL 0x80868010 -#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 -#define MSR_TMTA_LRTI_READOUT 0x80868018 -#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a +#endif /* !__ASSEMBLY__ */ #endif /* __ASM_MSR_H */ diff -r 49323c8b8633 -r 0d7d6804af22 xen/include/asm-x86/mtrr.h --- a/xen/include/asm-x86/mtrr.h Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/include/asm-x86/mtrr.h Thu Oct 11 12:11:54 2007 +0100 @@ -11,6 +11,8 @@ #define MTRR_TYPE_WRBACK 6 #define MTRR_NUM_TYPES 7 +extern void mtrr_save_fixed_ranges(void *); +extern void mtrr_save_state(void); extern int mtrr_add(unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_add_page(unsigned long base, unsigned long size, diff -r 49323c8b8633 -r 0d7d6804af22 xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/include/asm-x86/processor.h Thu Oct 11 12:11:54 2007 +0100 @@ -342,6 +342,12 @@ static always_inline void set_in_cr4 (un write_cr4(mmu_cr4_features); } +static always_inline void clear_in_cr4 (unsigned long mask) +{ + mmu_cr4_features &= ~mask; + write_cr4(mmu_cr4_features); +} + /* * NSC/Cyrix CPU configuration register indexes */ diff -r 49323c8b8633 -r 0d7d6804af22 xen/include/xen/config.h --- a/xen/include/xen/config.h Thu Oct 11 10:21:55 2007 +0100 +++ b/xen/include/xen/config.h Thu Oct 11 12:11:54 2007 +0100 @@ -10,6 +10,7 @@ #include <asm/config.h> #define EXPORT_SYMBOL(var) +#define EXPORT_SYMBOL_GPL(var) #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) /* _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |