[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 01/15] x86/cpu: Create Hygon Dhyana architecture support file



Add x86 architecture support for a new processor: Hygon Dhyana Family
18h. Carve out initialization codes from amd.c needed by Dhyana into a
separate file hygon.c by removing unnecessary codes and make Hygon
initialization codes more clear.

To identify Hygon Dhyana CPU, add a new vendor type X86_VENDOR_HYGON
for system recognition.

Signed-off-by: Pu Wen <puwen@xxxxxxxx>
---
 xen/arch/x86/cpu/Makefile         |   1 +
 xen/arch/x86/cpu/common.c         |   1 +
 xen/arch/x86/cpu/cpu.h            |   1 +
 xen/arch/x86/cpu/hygon.c          | 296 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/x86-vendors.h |   3 +-
 5 files changed, 301 insertions(+), 1 deletion(-)
 create mode 100644 xen/arch/x86/cpu/hygon.c

diff --git a/xen/arch/x86/cpu/Makefile b/xen/arch/x86/cpu/Makefile
index 34a01ca..1db7d88 100644
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -8,4 +8,5 @@ obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
 obj-y += shanghai.o
+obj-y += hygon.o
 obj-y += vpmu.o vpmu_amd.o vpmu_intel.o
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index de6c5c9..ce48d4a 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -704,6 +704,7 @@ void __init early_cpu_init(void)
 {
        intel_cpu_init();
        amd_init_cpu();
+       hygon_init_cpu();
        centaur_init_cpu();
        shanghai_init_cpu();
        early_cpu_detect();
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index 2fcb931..bcf3a1c 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -19,5 +19,6 @@ extern void display_cacheinfo(struct cpuinfo_x86 *c);
 
 int intel_cpu_init(void);
 int amd_init_cpu(void);
+int hygon_init_cpu(void);
 int centaur_init_cpu(void);
 int shanghai_init_cpu(void);
diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c
new file mode 100644
index 0000000..0728b4a
--- /dev/null
+++ b/xen/arch/x86/cpu/hygon.c
@@ -0,0 +1,296 @@
+#include <xen/init.h>
+#include <asm/processor.h>
+#include <asm/hvm/support.h>
+#include <asm/spec_ctrl.h>
+
+#include "cpu.h"
+
+static unsigned int __initdata opt_cpuid_mask_l7s0_eax = ~0u;
+integer_param("cpuid_mask_l7s0_eax", opt_cpuid_mask_l7s0_eax);
+static unsigned int __initdata opt_cpuid_mask_l7s0_ebx = ~0u;
+integer_param("cpuid_mask_l7s0_ebx", opt_cpuid_mask_l7s0_ebx);
+
+static inline int rdmsr_hygon_safe(unsigned int msr, unsigned int *lo,
+                                unsigned int *hi)
+{
+       int err;
+
+       asm volatile("1: rdmsr\n2:\n"
+                    ".section .fixup,\"ax\"\n"
+                    "3: movl %6,%2\n"
+                    "   jmp 2b\n"
+                    ".previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+                    : "=a" (*lo), "=d" (*hi), "=r" (err)
+                    : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT));
+
+       return err;
+}
+
+static inline int wrmsr_hygon_safe(unsigned int msr, unsigned int lo,
+                                unsigned int hi)
+{
+       int err;
+
+       asm volatile("1: wrmsr\n2:\n"
+                    ".section .fixup,\"ax\"\n"
+                    "3: movl %6,%0\n"
+                    "   jmp 2b\n"
+                    ".previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+                    : "=r" (err)
+                    : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a),
+                      "0" (0), "i" (-EFAULT));
+
+       return err;
+}
+
+static void wrmsr_hygon(unsigned int msr, uint64_t val)
+{
+       asm volatile("wrmsr" ::
+                    "c" (msr), "a" ((uint32_t)val),
+                    "d" (val >> 32), "D" (0x9c5a203a));
+}
+
+/*
+ * Sets caps in expected_levelling_cap, probes for the specified mask MSR, and
+ * set caps in levelling_caps if it is found.  Returns the default value.
+ */
+static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps)
+{
+       unsigned int hi, lo;
+
+       expected_levelling_cap |= caps;
+
+       if ((rdmsr_hygon_safe(msr, &lo, &hi) == 0) &&
+           (wrmsr_hygon_safe(msr, lo, hi) == 0))
+               levelling_caps |= caps;
+
+       return ((uint64_t)hi << 32) | lo;
+}
+
+/* Probe for the existance of the expected masking MSRs. */
+static void __init noinline probe_masking_msrs(void)
+{
+       const struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       /* Work out which masking MSRs we should have. */
+       cpuidmask_defaults._1cd =
+               _probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd);
+       cpuidmask_defaults.e1cd =
+               _probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd);
+       if (c->cpuid_level >= 7)
+               cpuidmask_defaults._7ab0 =
+                       _probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0);
+}
+
+/*
+ * Context switch CPUID masking state to the next domain.  Only called if
+ * CPUID Faulting isn't available, but masking MSRs have been detected.  A
+ * parameter of NULL is used to context switch to the default host state (by
+ * the cpu bringup-code, crash path, etc).
+ */
+static void hygon_ctxt_switch_masking(const struct vcpu *next)
+{
+       struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
+       const struct domain *nextd = next ? next->domain : NULL;
+       const struct cpuidmasks *masks =
+               (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+               ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
+
+       if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
+               uint64_t val = masks->_1cd;
+
+               /*
+                * OSXSAVE defaults to 1, which causes fast-forwarding of
+                * Xen's real setting.  Clobber it if disabled by the guest
+                * kernel.
+                */
+               if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
+                   !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
+                       val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 
32);
+
+               if (unlikely(these_masks->_1cd != val)) {
+                       wrmsr_hygon(MSR_K8_FEATURE_MASK, val);
+                       these_masks->_1cd = val;
+               }
+       }
+
+#define LAZY(cap, msr, field)                                          \
+       ({                                                              \
+               if (unlikely(these_masks->field != masks->field) &&     \
+                   ((levelling_caps & cap) == cap)) {                          
                        \
+                       wrmsr_hygon(msr, masks->field);                 \
+                       these_masks->field = masks->field;              \
+               }                                                       \
+       })
+
+       LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK,   e1cd);
+       LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
+#undef LAZY
+}
+
+/*
+ * Mask the features and extended features returned by CPUID.  Parameters are
+ * set from the boot line via user-defined masks.
+ */
+static void __init noinline hygon_init_levelling(void)
+{
+       probe_masking_msrs();
+
+       if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
+               uint32_t ecx, edx, tmp;
+
+               cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
+
+               if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
+                       ecx &= opt_cpuid_mask_ecx;
+                       edx &= opt_cpuid_mask_edx;
+               }
+
+               /* Fast-forward bits - Must be set. */
+               if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
+                       ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
+               edx |= cpufeat_mask(X86_FEATURE_APIC);
+
+               /* Allow the HYPERVISOR bit to be set via guest policy. */
+               ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
+
+               cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
+       }
+
+       if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) {
+               uint32_t ecx, edx, tmp;
+
+               cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
+
+               if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+                       ecx &= opt_cpuid_mask_ext_ecx;
+                       edx &= opt_cpuid_mask_ext_edx;
+               }
+
+               /* Fast-forward bits - Must be set. */
+               edx |= cpufeat_mask(X86_FEATURE_APIC);
+
+               cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
+       }
+
+       if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) {
+               uint32_t eax, ebx, tmp;
+
+               cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
+
+               if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) {
+                       eax &= opt_cpuid_mask_l7s0_eax;
+                       ebx &= opt_cpuid_mask_l7s0_ebx;
+               }
+
+               cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx;
+       }
+
+       if (opt_cpu_info) {
+               printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
+               printk(XENLOG_INFO
+                      "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
+                      "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x\n",
+                      (uint32_t)cpuidmask_defaults._1cd,
+                      (uint32_t)(cpuidmask_defaults._1cd >> 32),
+                      (uint32_t)cpuidmask_defaults.e1cd,
+                      (uint32_t)(cpuidmask_defaults.e1cd >> 32),
+                      (uint32_t)(cpuidmask_defaults._7ab0 >> 32),
+                      (uint32_t)cpuidmask_defaults._7ab0);
+       }
+
+       if (levelling_caps)
+               ctxt_switch_masking = hygon_ctxt_switch_masking;
+}
+
+static void hygon_get_topology(struct cpuinfo_x86 *c)
+{
+       u32 ebx;
+
+       if (c->x86_max_cores <= 1)
+               return;
+
+       /* Convert local APIC ID into the socket ID */
+       c->phys_proc_id >>= (cpuid_ecx(0x80000008) >> 12) & 0xf;
+
+       ebx = cpuid_ebx(0x8000001e);
+       c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
+       c->x86_max_cores /= c->x86_num_siblings;
+       c->cpu_core_id = ebx & 0xff;
+
+       if (opt_cpu_info)
+               printk("CPU %d(%d) -> Processor %d, Core %d\n",
+                       smp_processor_id(), c->x86_max_cores,
+                               c->phys_proc_id, c->cpu_core_id);
+}
+
+static void early_init_hygon(struct cpuinfo_x86 *c)
+{
+       if (c == &boot_cpu_data)
+               hygon_init_levelling();
+
+       ctxt_switch_levelling(NULL);
+}
+
+static void init_hygon(struct cpuinfo_x86 *c)
+{
+       u32 l, h;
+       unsigned long long value;
+
+       /* Attempt to set lfence to be Dispatch Serialising. */
+       if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
+               /* Unable to read.  Assume the safer default. */
+               __clear_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
+       else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
+               /* Already dispatch serialising. */
+               __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
+
+       /*
+        * If the user has explicitly chosen to disable Memory Disambiguation
+        * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
+        */
+       if (opt_ssbd && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
+               value |= 1ull << 10;
+               wrmsr_safe(MSR_AMD64_LS_CFG, value);
+       }
+
+       display_cacheinfo(c);
+
+       if (cpu_has(c, X86_FEATURE_ITSC)) {
+               __set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+               __set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+               __set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+       }
+
+       c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+
+       hygon_get_topology(c);
+
+       /* Hygon CPUs do not support SYSENTER outside of legacy mode. */
+       __clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
+       /* Hygon processors have APIC timer running in deep C states. */
+       if ( opt_arat )
+               __set_bit(X86_FEATURE_ARAT, c->x86_capability);
+
+       if (cpu_has(c, X86_FEATURE_EFRO)) {
+               rdmsr(MSR_K7_HWCR, l, h);
+               l |= (1 << 27); /* Enable read-only APERF/MPERF bit */
+               wrmsr(MSR_K7_HWCR, l, h);
+       }
+}
+
+static const struct cpu_dev hygon_cpu_dev = {
+       .c_vendor       = "Hygon",
+       .c_ident        = { "HygonGenuine" },
+       .c_early_init   = early_init_hygon,
+       .c_init         = init_hygon,
+};
+
+int __init hygon_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_HYGON] = &hygon_cpu_dev;
+       return 0;
+}
diff --git a/xen/include/asm-x86/x86-vendors.h 
b/xen/include/asm-x86/x86-vendors.h
index 38a81c3..fa1cbb4 100644
--- a/xen/include/asm-x86/x86-vendors.h
+++ b/xen/include/asm-x86/x86-vendors.h
@@ -9,6 +9,7 @@
 #define X86_VENDOR_AMD 2
 #define X86_VENDOR_CENTAUR 3
 #define X86_VENDOR_SHANGHAI 4
-#define X86_VENDOR_NUM 5
+#define X86_VENDOR_HYGON 5
+#define X86_VENDOR_NUM 6
 
 #endif /* __XEN_X86_VENDORS_H__ */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.