|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 18/30] x86/cpu: Rework AMD masking MSR setup
>>> On 05.02.16 at 14:42, <andrew.cooper3@xxxxxxxxxx> wrote:
> @@ -126,126 +133,189 @@ static const struct cpuidmask *__init noinline
> get_cpuidmask(const char *opt)
> }
>
> /*
> + * Sets caps in expected_levelling_cap, probes for the specified mask MSR,
> and
> + * set caps in levelling_caps if it is found. Processors prior to Fam 10h
> + * required a 32-bit password for masking MSRs. Reads the default value into
> + * msr_val.
> + */
> +static void __init __probe_mask_msr(unsigned int msr, uint64_t caps,
Please reduce the leading underscores to at most one.
> + uint64_t *msr_val)
> +{
> + unsigned int hi, lo;
> +
> + expected_levelling_cap |= caps;
Indentation.
> + if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) &&
> + (wrmsr_amd_safe(msr, lo, hi) == 0))
> + levelling_caps |= caps;
> +
> + *msr_val = ((uint64_t)hi << 32) | lo;
> +}
Why can't this function, currently returning void, simply return the
value read?
> +/*
> + * Context switch levelling state to the next domain. A parameter of NULL is
> + * used to context switch to the default host state, and is used by the
> BSP/AP
> + * startup code.
> + */
> +static void amd_ctxt_switch_levelling(const struct domain *nextd)
> +{
> + struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> + const struct cpuidmasks *masks = &cpuidmask_defaults;
May I suggest naming this "defaults", to aid clarity of the code
below?
> +#define LAZY(cap, msr, field)
> \
> + ({ \
> + if (((levelling_caps & cap) == cap) && \
> + (these_masks->field != masks->field)) \
Perhaps worth swapping the operands of the && and wrapping
the then left side of it in unlikely(), to hopefully make the most
desirable route through this function a branch-less one?
> +static void __init noinline amd_init_levelling(void)
> {
> - static unsigned int feat_ecx, feat_edx;
> - static unsigned int extfeat_ecx, extfeat_edx;
> - static unsigned int l7s0_eax, l7s0_ebx;
> - static unsigned int thermal_ecx;
> - static bool_t skip_feat, skip_extfeat;
> - static bool_t skip_l7s0_eax_ebx, skip_thermal_ecx;
> - static enum { not_parsed, no_mask, set_mask } status;
> - unsigned int eax, ebx, ecx, edx;
> -
> - if (status == no_mask)
> - return;
> + const struct cpuidmask *m = NULL;
>
> - if (status == set_mask)
> - goto setmask;
> + probe_masking_msrs();
>
> - ASSERT((status == not_parsed) && (c == &boot_cpu_data));
> - status = no_mask;
> + if (*opt_famrev != '\0') {
> + m = get_cpuidmask(opt_famrev);
>
> - /* Fam11 doesn't support masking at all. */
> - if (c->x86 == 0x11)
> - return;
> + if (!m)
> + printk("Invalid processor string: %s\n", opt_famrev);
> + }
>
> - if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
> - opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
> - opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx &
> - opt_cpuid_mask_thermal_ecx)) {
> - feat_ecx = opt_cpuid_mask_ecx;
> - feat_edx = opt_cpuid_mask_edx;
> - extfeat_ecx = opt_cpuid_mask_ext_ecx;
> - extfeat_edx = opt_cpuid_mask_ext_edx;
> - l7s0_eax = opt_cpuid_mask_l7s0_eax;
> - l7s0_ebx = opt_cpuid_mask_l7s0_ebx;
> - thermal_ecx = opt_cpuid_mask_thermal_ecx;
> - } else if (*opt_famrev == '\0') {
> - return;
> - } else {
> - const struct cpuidmask *m = get_cpuidmask(opt_famrev);
> + if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
> + uint32_t ecx, edx, tmp;
>
> - if (!m) {
> - printk("Invalid processor string: %s\n", opt_famrev);
> - printk("CPUID will not be masked\n");
> - return;
> + cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
Didn't you collect raw CPUID output already?
> + if(~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
> + ecx &= opt_cpuid_mask_ecx;
> + edx &= opt_cpuid_mask_edx;
> + } else if ( m ) {
Partial Xen coding style slipped in here.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |