[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86: enable VIA CPU support


  • To: "xen-devel" <xen-devel@xxxxxxxxxxxxx>
  • From: "Jan Beulich" <JBeulich@xxxxxxxx>
  • Date: Fri, 21 Sep 2012 12:40:56 +0100
  • Delivery-date: Fri, 21 Sep 2012 11:41:15 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

Newer VIA CPUs have both 64-bit and VMX support. Enable them to be
recognized for these purposes, at once stripping off any 32-bit CPU
only bits from the respective CPU support file.

This particularly implies untying the VMX == Intel assumption in a few
places.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

---
Note that my testing of this functionality wasn't as wide as I would
have hoped it to be, since the box I was provided only survived the
first few days - meanwhile it doesn't stay up long enough to just build
hypervisor and tools. Therefore, further fixes to fully support these
CPUs may be needed as the VIA folks themselves get to test that code.

--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -32,7 +32,8 @@ void save_rest_processor_state(void)
     rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
     rdmsrl(MSR_CSTAR, saved_cstar);
     rdmsrl(MSR_LSTAR, saved_lstar);
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
         rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
@@ -59,7 +60,8 @@ void restore_rest_processor_state(void)
     wrmsrl(MSR_GS_BASE, saved_gs_base);
     wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* Recover sysenter MSRs */
         wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -2,10 +2,8 @@ subdir-y += mcheck
 subdir-y += mtrr
 
 obj-y += amd.o
+obj-y += centaur.o
 obj-y += common.o
 obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
-
-# Keeping around for VIA support (JBeulich)
-# obj-$(x86_32) += centaur.o
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf
                c->x86_capability[5] = cpuid_edx(0xC0000001);
        }
 
-       /* Cyrix III family needs CX8 & PGE explicity enabled. */
-       if (c->x86_model >=6 && c->x86_model <= 9) {
-               rdmsrl(MSR_VIA_FCR, msr_content);
-               wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
-               set_bit(X86_FEATURE_CX8, c->x86_capability);
+       if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+               c->x86_cache_alignment = c->x86_clflush_size * 2;
+               set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
        }
 
-       /* Before Nehemiah, the C3's had 3dNOW! */
-       if (c->x86_model >=6 && c->x86_model <9)
-               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
        get_model_name(c);
        display_cacheinfo(c);
 }
 
 static void __init init_centaur(struct cpuinfo_x86 *c)
 {
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-       clear_bit(0*32+31, c->x86_capability);
-
        if (c->x86 == 6)
                init_c3(c);
 }
 
-static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int 
size)
-{
-       /* VIA C3 CPUs (670-68F) need further shifting. */
-       if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
-               size >>= 8;
-
-       /* VIA also screwed up Nehemiah stepping 1, and made
-          it return '65KB' instead of '64KB'
-          - Note, it seems this may only be in engineering samples. */
-       if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-               size -=1;
-
-       return size;
-}
-
 static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
        .c_vendor       = "Centaur",
        .c_ident        = { "CentaurHauls" },
        .c_init         = init_centaur,
-       .c_size_cache   = centaur_size_cache,
 };
 
 int __init centaur_init_cpu(void)
@@ -97,5 +71,3 @@ int __init centaur_init_cpu(void)
        cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
        return 0;
 }
-
-//early_arch_initcall(centaur_init_cpu);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -567,6 +567,7 @@ void __init early_cpu_init(void)
 {
        intel_cpu_init();
        amd_init_cpu();
+       centaur_init_cpu();
        early_cpu_detect();
 }
 /*
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -114,6 +114,7 @@ static int __init hvm_enable(void)
     switch ( boot_cpu_data.x86_vendor )
     {
     case X86_VENDOR_INTEL:
+    case X86_VENDOR_CENTAUR:
         fns = start_vmx();
         break;
     case X86_VENDOR_AMD:
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -151,13 +151,15 @@ nestedhvm_is_n2(struct vcpu *v)
 static int __init
 nestedhvm_setup(void)
 {
-    /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
-    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
-    unsigned int i, order = get_order_from_pages(nr);
+    unsigned int i, nr, order;
 
     if ( !hvm_funcs.name )
         return 0;
 
+    /* Same format and size as hvm_io_bitmap (VMX needs only 2 pages). */
+    nr = !strcmp(hvm_funcs.name, "VMX") ? 2 : 3;
+    order = get_order_from_pages(nr);
+
     /* shadow_io_bitmaps can't be declared static because
      *   they must fulfill hw requirements (page aligned section)
      *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -156,8 +156,7 @@ static void enable_hypercall_page(struct
     *(u32 *)(p + 1) = 0x80000000;
     *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
     *(u8  *)(p + 6) = 0x01;
-    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-                       ? 0xc1 : 0xd9);
+    *(u8  *)(p + 7) = (!strcmp(hvm_funcs.name, "VMX") ? 0xc1 : 0xd9);
     *(u8  *)(p + 8) = 0xc3; /* ret */
     memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
 
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x
                 break;
 
             /* Currently only EPT is supported */
-            if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            if ( strcmp(hvm_funcs.name, "VMX") )
                 break;
 
             rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -83,7 +83,7 @@ static void p2m_initialise(struct domain
 
     p2m->cr3 = CR3_EADDR;
 
-    if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
+    if ( hap_enabled(d) && !strcmp(hvm_funcs.name, "VMX") )
         ept_p2m_init(p2m);
     else
         p2m_pt_init(p2m);
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init
     wrmsrl(MSR_LSTAR, (unsigned long)stack);
     stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
 
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+         boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
     {
         /* SYSENTER entry. */
         wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);


Attachment: x86-Centaur.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.