|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 01/15] x86/cpu: Create Hygon Dhyana architecture support file
On 20/12/2018 13:12, Pu Wen wrote:
> diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c
> new file mode 100644
> index 0000000..0728b4a
> --- /dev/null
> +++ b/xen/arch/x86/cpu/hygon.c
> @@ -0,0 +1,296 @@
> +#include <xen/init.h>
> +#include <asm/processor.h>
> +#include <asm/hvm/support.h>
> +#include <asm/spec_ctrl.h>
> +
> +#include "cpu.h"
> +
> +static unsigned int __initdata opt_cpuid_mask_l7s0_eax = ~0u;
> +integer_param("cpuid_mask_l7s0_eax", opt_cpuid_mask_l7s0_eax);
> +static unsigned int __initdata opt_cpuid_mask_l7s0_ebx = ~0u;
> +integer_param("cpuid_mask_l7s0_ebx", opt_cpuid_mask_l7s0_ebx);
These should be moved from the AMD specific code into the common cpu
code (alongside the other masks) rather than duplicated here.
> +
> +static inline int rdmsr_hygon_safe(unsigned int msr, unsigned int *lo,
> + unsigned int *hi)
> +{
> + int err;
> +
> + asm volatile("1: rdmsr\n2:\n"
> + ".section .fixup,\"ax\"\n"
> + "3: movl %6,%2\n"
> + " jmp 2b\n"
> + ".previous\n"
> + _ASM_EXTABLE(1b, 3b)
> + : "=a" (*lo), "=d" (*hi), "=r" (err)
> + : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT));
These rdmsr/wrmsr helpers with a password in %edi are only used in the
K8 processors. Since Hygon is a Zen derivative, you shouldn't need any
of these.
> +
> + return err;
> +}
> +
> +static inline int wrmsr_hygon_safe(unsigned int msr, unsigned int lo,
> + unsigned int hi)
> +{
> + int err;
> +
> + asm volatile("1: wrmsr\n2:\n"
> + ".section .fixup,\"ax\"\n"
> + "3: movl %6,%0\n"
> + " jmp 2b\n"
> + ".previous\n"
> + _ASM_EXTABLE(1b, 3b)
> + : "=r" (err)
> + : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a),
> + "0" (0), "i" (-EFAULT));
> +
> + return err;
> +}
> +
> +static void wrmsr_hygon(unsigned int msr, uint64_t val)
> +{
> + asm volatile("wrmsr" ::
> + "c" (msr), "a" ((uint32_t)val),
> + "d" (val >> 32), "D" (0x9c5a203a));
> +}
> +
> +/*
> + * Sets caps in expected_levelling_cap, probes for the specified mask MSR,
> and
> + * set caps in levelling_caps if it is found. Returns the default value.
> + */
> +static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps)
> +{
> + unsigned int hi, lo;
> +
> + expected_levelling_cap |= caps;
> +
> + if ((rdmsr_hygon_safe(msr, &lo, &hi) == 0) &&
> + (wrmsr_hygon_safe(msr, lo, hi) == 0))
> + levelling_caps |= caps;
> +
> + return ((uint64_t)hi << 32) | lo;
> +}
> +
> +/* Probe for the existance of the expected masking MSRs. */
> +static void __init noinline probe_masking_msrs(void)
> +{
> + const struct cpuinfo_x86 *c = &boot_cpu_data;
> +
> + /* Work out which masking MSRs we should have. */
> + cpuidmask_defaults._1cd =
> + _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd);
> + cpuidmask_defaults.e1cd =
> + _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd);
> + if (c->cpuid_level >= 7)
> + cpuidmask_defaults._7ab0 =
> + _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0);
> +}
> +
> +/*
> + * Context switch CPUID masking state to the next domain. Only called if
> + * CPUID Faulting isn't available, but masking MSRs have been detected. A
> + * parameter of NULL is used to context switch to the default host state (by
> + * the cpu bringup-code, crash path, etc).
> + */
> +static void hygon_ctxt_switch_masking(const struct vcpu *next)
> +{
> + struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> + const struct domain *nextd = next ? next->domain : NULL;
> + const struct cpuidmasks *masks =
> + (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
> + ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
> +
> + if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
> + uint64_t val = masks->_1cd;
> +
> + /*
> + * OSXSAVE defaults to 1, which causes fast-forwarding of
> + * Xen's real setting. Clobber it if disabled by the guest
> + * kernel.
> + */
> + if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
> + !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
> + val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) <<
> 32);
> +
> + if (unlikely(these_masks->_1cd != val)) {
> + wrmsr_hygon(MSR_K8_FEATURE_MASK, val);
> + these_masks->_1cd = val;
> + }
> + }
> +
> +#define LAZY(cap, msr, field)
> \
> + ({ \
> + if (unlikely(these_masks->field != masks->field) && \
> + ((levelling_caps & cap) == cap)) {
> \
> + wrmsr_hygon(msr, masks->field); \
> + these_masks->field = masks->field; \
> + } \
> + })
> +
> + LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK, e1cd);
> + LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
> +#undef LAZY
> +}
> +
> +/*
> + * Mask the features and extended features returned by CPUID. Parameters are
> + * set from the boot line via user-defined masks.
> + */
> +static void __init noinline hygon_init_levelling(void)
> +{
> + probe_masking_msrs();
> +
> + if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
> + uint32_t ecx, edx, tmp;
> +
> + cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
> +
> + if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
> + ecx &= opt_cpuid_mask_ecx;
> + edx &= opt_cpuid_mask_edx;
> + }
> +
> + /* Fast-forward bits - Must be set. */
> + if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
> + ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
> + edx |= cpufeat_mask(X86_FEATURE_APIC);
> +
> + /* Allow the HYPERVISOR bit to be set via guest policy. */
> + ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
> +
> + cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
> + }
> +
> + if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) {
> + uint32_t ecx, edx, tmp;
> +
> + cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
> +
> + if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
> + ecx &= opt_cpuid_mask_ext_ecx;
> + edx &= opt_cpuid_mask_ext_edx;
> + }
> +
> + /* Fast-forward bits - Must be set. */
> + edx |= cpufeat_mask(X86_FEATURE_APIC);
> +
> + cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
> + }
> +
> + if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) {
> + uint32_t eax, ebx, tmp;
> +
> + cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
> +
> + if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) {
> + eax &= opt_cpuid_mask_l7s0_eax;
> + ebx &= opt_cpuid_mask_l7s0_ebx;
> + }
> +
> + cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx;
> + }
> +
> + if (opt_cpu_info) {
> + printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
> + printk(XENLOG_INFO
> + "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
> + "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x\n",
> + (uint32_t)cpuidmask_defaults._1cd,
> + (uint32_t)(cpuidmask_defaults._1cd >> 32),
> + (uint32_t)cpuidmask_defaults.e1cd,
> + (uint32_t)(cpuidmask_defaults.e1cd >> 32),
> + (uint32_t)(cpuidmask_defaults._7ab0 >> 32),
> + (uint32_t)cpuidmask_defaults._7ab0);
> + }
> +
> + if (levelling_caps)
> + ctxt_switch_masking = hygon_ctxt_switch_masking;
> +}
> +
> +static void hygon_get_topology(struct cpuinfo_x86 *c)
> +{
> + u32 ebx;
> +
> + if (c->x86_max_cores <= 1)
> + return;
> +
> + /* Convert local APIC ID into the socket ID */
> + c->phys_proc_id >>= (cpuid_ecx(0x80000008) >> 12) & 0xf;
> +
> + ebx = cpuid_ebx(0x8000001e);
> + c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
> + c->x86_max_cores /= c->x86_num_siblings;
> + c->cpu_core_id = ebx & 0xff;
> +
> + if (opt_cpu_info)
> + printk("CPU %d(%d) -> Processor %d, Core %d\n",
> + smp_processor_id(), c->x86_max_cores,
> + c->phys_proc_id, c->cpu_core_id);
> +}
> +
> +static void early_init_hygon(struct cpuinfo_x86 *c)
> +{
> + if (c == &boot_cpu_data)
> + hygon_init_levelling();
> +
> + ctxt_switch_levelling(NULL);
> +}
> +
> +static void init_hygon(struct cpuinfo_x86 *c)
> +{
> + u32 l, h;
> + unsigned long long value;
> +
> + /* Attempt to set lfence to be Dispatch Serialising. */
> + if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
> + /* Unable to read. Assume the safer default. */
> + __clear_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
> + else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
> + /* Already dispatch serialising. */
> + __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
> +
> + /*
> + * If the user has explicitly chosen to disable Memory Disambiguation
> + * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
> + */
> + if (opt_ssbd && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
> + value |= 1ull << 10;
> + wrmsr_safe(MSR_AMD64_LS_CFG, value);
> + }
> +
> + display_cacheinfo(c);
> +
> + if (cpu_has(c, X86_FEATURE_ITSC)) {
> + __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
> + __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
> + __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
> + }
> +
> + c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
> +
> + hygon_get_topology(c);
> +
> + /* Hygon CPUs do not support SYSENTER outside of legacy mode. */
> + __clear_bit(X86_FEATURE_SEP, c->x86_capability);
> +
> + /* Hygon processors have APIC timer running in deep C states. */
> + if ( opt_arat )
> + __set_bit(X86_FEATURE_ARAT, c->x86_capability);
> +
> + if (cpu_has(c, X86_FEATURE_EFRO)) {
> + rdmsr(MSR_K7_HWCR, l, h);
> + l |= (1 << 27); /* Enable read-only APERF/MPERF bit */
> + wrmsr(MSR_K7_HWCR, l, h);
> + }
Is there anything which is actually unique to Hygon here? I ask,
because this looks like a lot of duplicate code, considering that the
processor base is the same.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |