|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 19/30] x86/cpu: Rework Intel masking/faulting setup
>>> On 05.02.16 at 14:42, <andrew.cooper3@xxxxxxxxxx> wrote:
> --- a/xen/arch/x86/cpu/intel.c
> +++ b/xen/arch/x86/cpu/intel.c
> @@ -18,11 +18,18 @@
>
> #define select_idle_routine(x) ((void)0)
>
> -static unsigned int probe_intel_cpuid_faulting(void)
> +static bool_t __init probe_intel_cpuid_faulting(void)
> {
> uint64_t x;
> - return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) &&
> - (x & MSR_PLATFORM_INFO_CPUID_FAULTING);
> +
> + if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
> + !(x & MSR_PLATFORM_INFO_CPUID_FAULTING) )
> + return 0;
Partial Xen coding style again.
> @@ -44,41 +51,46 @@ void set_cpuid_faulting(bool_t enable)
> }
>
> /*
> - * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
> - * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
> - * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
> - * 'rev down' to E8400, you can set these values in these Xen boot
> parameters.
> + * Set caps in expected_levelling_cap, probe a specific masking MSR, and set
> + * caps in levelling_caps if it is found, or clobber the MSR index if
> missing.
> + * If preset, reads the default value into msr_val.
> */
> -static void set_cpuidmask(const struct cpuinfo_x86 *c)
> +static void __init __probe_mask_msr(unsigned int *msr, uint64_t caps,
> + uint64_t *msr_val)
> {
> - static unsigned int msr_basic, msr_ext, msr_xsave;
> - static enum { not_parsed, no_mask, set_mask } status;
> - u64 msr_val;
> + uint64_t val;
>
> - if (status == no_mask)
> - return;
> + expected_levelling_cap |= caps;
>
> - if (status == set_mask)
> - goto setmask;
> + if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val))
> + *msr = 0;
> + else
> + {
> + levelling_caps |= caps;
> + *msr_val = val;
> + }
> +}
Same as for the AMD side: Perhaps neater if the function returned
the MSR value? (Also again partial Xen coding style here.)
> +/* Indicies of the masking MSRs, or 0 if unavailable. */
> +static unsigned int __read_mostly msr_basic, msr_ext, msr_xsave;
I think this way __read_mostly applies only to msr_basic, which I
don't think is what you want. Also I think you mean "indices" or
"indexes".
> +static void __init probe_masking_msrs(void)
> +{
> + const struct cpuinfo_x86 *c = &boot_cpu_data;
> + unsigned int exp_msr_basic = 0, exp_msr_ext = 0, exp_msr_xsave = 0;
>
> /* Only family 6 supports this feature. */
> - if (c->x86 != 6) {
> - printk("No CPUID feature masking support available\n");
> + if (c->x86 != 6)
> return;
> - }
>
> switch (c->x86_model) {
> case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
> case 0x1d: /* Dunnington(MP) */
> - msr_basic = MSR_INTEL_MASK_V1_CPUID1;
> + exp_msr_basic = msr_basic = MSR_INTEL_MASK_V1_CPUID1;
> break;
>
> case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
> @@ -88,71 +100,126 @@ static void set_cpuidmask(const struct cpuinfo_x86 *c)
> case 0x2c: /* Gulftown, Westmere-EP */
> case 0x2e: /* Nehalem-EX(Beckton) */
> case 0x2f: /* Westmere-EX */
> - msr_basic = MSR_INTEL_MASK_V2_CPUID1;
> - msr_ext = MSR_INTEL_MASK_V2_CPUID80000001;
> + exp_msr_basic = msr_basic = MSR_INTEL_MASK_V2_CPUID1;
> + exp_msr_ext = msr_ext = MSR_INTEL_MASK_V2_CPUID80000001;
> break;
>
> case 0x2a: /* SandyBridge */
> case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
> - msr_basic = MSR_INTEL_MASK_V3_CPUID1;
> - msr_ext = MSR_INTEL_MASK_V3_CPUID80000001;
> - msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
> + exp_msr_basic = msr_basic = MSR_INTEL_MASK_V3_CPUID1;
> + exp_msr_ext = msr_ext = MSR_INTEL_MASK_V3_CPUID80000001;
> + exp_msr_xsave = msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
> break;
> }
Instead of all these changes, and instead of the variable needing
initializers, you could simply initialize all three ext_msr_* right after
the switch().
> +static void intel_ctxt_switch_levelling(const struct domain *nextd)
> +{
> + struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> + const struct cpuidmasks *masks = &cpuidmask_defaults;
> +
> +#define LAZY(msr, field) \
> + ({ \
> + if (msr && (these_masks->field != masks->field)) \
> + { \
> + wrmsrl(msr, masks->field); \
> + these_masks->field = masks->field; \
> + } \
> + })
> +
> + LAZY(msr_basic, _1cd);
> + LAZY(msr_ext, e1cd);
> + LAZY(msr_xsave, Da1);
Please either use token concatenation inside the macro body to
eliminate the redundant msr_ prefixes here, or properly
parenthesize the uses of "msr" inside the macro body.
> + if (opt_cpu_info) {
> + printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
> + printk(XENLOG_INFO
> + "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
> + "e1c 0x%08x, Da1 0x%08x\n",
> + (uint32_t)(cpuidmask_defaults._1cd >> 32),
> + (uint32_t)cpuidmask_defaults._1cd,
> + (uint32_t)(cpuidmask_defaults.e1cd >> 32),
> + (uint32_t)cpuidmask_defaults.e1cd,
> + (uint32_t)cpuidmask_defaults.Da1);
Could I convince you to make this second printk() dependent
upon there not being CPUID faulting support?
> @@ -190,22 +257,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
> (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
> paddr_bits = 36;
>
> - if (c == &boot_cpu_data && c->x86 == 6) {
> - if (probe_intel_cpuid_faulting())
> - __set_bit(X86_FEATURE_CPUID_FAULTING,
> - c->x86_capability);
> - } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
> - BUG_ON(!probe_intel_cpuid_faulting());
> - __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
> - }
> + if (c == &boot_cpu_data)
> + intel_init_levelling();
> +
> + if (test_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability))
> + __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
Mixing tabs and spaces for indentation.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |