[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] This patch adds dual-core support to xen, and improves HT detection.



ChangeSet 1.1723, 2005/06/19 17:38:21+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        This patch adds dual-core support to xen, and improves HT detection.
        Adapted from linux 2.6.12.
        Signed-off-by: Nguyen Anh Quynh <aquynh@xxxxxxxxx>
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/cpu/amd.c          |   29 ++++++++++++++++----------
 arch/x86/cpu/common.c       |   49 ++++++++++++++++++++++++--------------------
 arch/x86/cpu/cpu.h          |    1 
 arch/x86/cpu/intel.c        |   23 ++++++++++++++++++++
 arch/x86/dom0_ops.c         |    4 +--
 arch/x86/setup.c            |    1 
 arch/x86/smpboot.c          |   32 ++++++++++++++++++++++++++--
 common/dom0_ops.c           |    4 +--
 include/asm-x86/processor.h |    1 
 include/asm-x86/smp.h       |    2 +
 include/xen/smp.h           |    2 -
 11 files changed, 104 insertions(+), 44 deletions(-)


diff -Nru a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/cpu/amd.c    2005-06-19 14:06:51 -04:00
@@ -193,23 +193,30 @@
        }
 
        display_cacheinfo(c);
-       detect_ht(c);
-
-#ifdef CONFIG_X86_HT
-       /* AMD dual core looks like HT but isn't really. Hide it from the
-          scheduler. This works around problems with the domain scheduler.
-          Also probably gives slightly better scheduling and disables
-          SMT nice which is harmful on dual core.
-          TBD tune the domain scheduler for dual core. */
-       if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-               smp_num_siblings = 1;
-#endif
 
        if (cpuid_eax(0x80000000) >= 0x80000008) {
                c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
                if (c->x86_num_cores & (c->x86_num_cores - 1))
                        c->x86_num_cores = 1;
        }
+
+#ifdef CONFIG_X86_HT
+       /*
+        * On a AMD dual core setup the lower bits of the APIC id
+        * distingush the cores.  Assumes number of cores is a power
+        * of two.
+        */
+       if (c->x86_num_cores > 1) {
+               int cpu = smp_processor_id();
+               unsigned bits = 0;
+               while ((1 << bits) < c->x86_num_cores)
+                       bits++;
+               cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
+               phys_proc_id[cpu] >>= bits;
+               printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
+                      cpu, c->x86_num_cores, cpu_core_id[cpu]);
+       }
+#endif
 }
 
 static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff -Nru a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c 2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/cpu/common.c 2005-06-19 14:06:51 -04:00
@@ -186,7 +186,7 @@
 
 
 /* Probe for the CPUID instruction */
-int __init have_cpuid_p(void)
+static int __init have_cpuid_p(void)
 {
        return flag_is_changeable_p(X86_EFLAGS_ID);
 }
@@ -194,7 +194,7 @@
 /* Do minimum CPU detection early.
    Fields really needed: vendor, cpuid_level, family, model, mask, cache 
alignment.
    The others are not touched to avoid unwanted side effects. */
-void __init early_cpu_detect(void)
+static void __init early_cpu_detect(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
@@ -228,6 +228,10 @@
        }
 
        early_intel_workaround(c);
+
+#ifdef CONFIG_X86_HT
+       phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+#endif
 }
 
 void __init generic_identify(struct cpuinfo_x86 * c)
@@ -416,25 +420,15 @@
        mcheck_init(c);
 #endif
 }
-/*
- *     Perform early boot up checks for a valid TSC. See 
arch/i386/kernel/time.c
- */
- 
-void __init dodgy_tsc(void)
-{
-       if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
-           ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
-               cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
-}
 
 #ifdef CONFIG_X86_HT
 void __init detect_ht(struct cpuinfo_x86 *c)
 {
        u32     eax, ebx, ecx, edx;
-       int     index_lsb, index_msb, tmp;
+       int     index_msb, tmp;
        int     cpu = smp_processor_id();
 
-       if (!cpu_has(c, X86_FEATURE_HT))
+       if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
                return;
 
        cpuid(1, &eax, &ebx, &ecx, &edx);
@@ -443,7 +437,6 @@
        if (smp_num_siblings == 1) {
                printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
        } else if (smp_num_siblings > 1 ) {
-               index_lsb = 0;
                index_msb = 31;
 
                if (smp_num_siblings > NR_CPUS) {
@@ -452,21 +445,34 @@
                        return;
                }
                tmp = smp_num_siblings;
-               while ((tmp & 1) == 0) {
-                       tmp >>=1 ;
-                       index_lsb++;
-               }
-               tmp = smp_num_siblings;
                while ((tmp & 0x80000000 ) == 0) {
                        tmp <<=1 ;
                        index_msb--;
                }
-               if (index_lsb != index_msb )
+               if (smp_num_siblings & (smp_num_siblings - 1))
                        index_msb++;
                phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
 
                printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
                       phys_proc_id[cpu]);
+
+               smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+
+               tmp = smp_num_siblings;
+               index_msb = 31;
+               while ((tmp & 0x80000000) == 0) {
+                       tmp <<=1 ;
+                       index_msb--;
+               }
+
+               if (smp_num_siblings & (smp_num_siblings - 1))
+                       index_msb++;
+
+               cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+               if (c->x86_num_cores > 1)
+                       printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
+                              cpu_core_id[cpu]);
        }
 }
 #endif
@@ -511,7 +517,6 @@
 extern int centaur_init_cpu(void);
 extern int transmeta_init_cpu(void);
 extern int rise_init_cpu(void);
-void early_cpu_detect(void);
 
 void __init early_cpu_init(void)
 {
diff -Nru a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
--- a/xen/arch/x86/cpu/cpu.h    2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/cpu/cpu.h    2005-06-19 14:06:51 -04:00
@@ -25,7 +25,6 @@
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
 
 extern void generic_identify(struct cpuinfo_x86 * c);
-extern int have_cpuid_p(void);
 
 extern void early_intel_workaround(struct cpuinfo_x86 *c);
 
diff -Nru a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c  2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/cpu/intel.c  2005-06-19 14:06:51 -04:00
@@ -74,6 +74,27 @@
 }
 
 
+/*
+ * find out the number of processor cores on the die
+ */
+static int __init num_cpu_cores(struct cpuinfo_x86 *c)
+{
+       unsigned int eax;
+
+       if (c->cpuid_level < 4)
+               return 1;
+
+       __asm__("cpuid"
+               : "=a" (eax)
+               : "0" (4), "c" (0)
+               : "bx", "dx");
+
+       if (eax & 0x1f)
+               return ((eax >> 26) + 1);
+       else
+               return 1;
+}
+
 static void __init init_intel(struct cpuinfo_x86 *c)
 {
        unsigned int l2 = 0;
@@ -136,6 +157,8 @@
        if ( p )
                strcpy(c->x86_model_id, p);
        
+       c->x86_num_cores = num_cpu_cores(c);
+
        detect_ht(c);
 
        /* Work around errata */
diff -Nru a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c   2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/dom0_ops.c   2005-06-19 14:06:51 -04:00
@@ -179,8 +179,8 @@
     {
         dom0_physinfo_t *pi = &op->u.physinfo;
 
-        pi->ht_per_core = ht_per_core;
-        pi->cores       = num_online_cpus() / ht_per_core;
+        pi->ht_per_core = smp_num_siblings;
+        pi->cores       = boot_cpu_data.x86_num_cores;
         pi->total_pages = max_page;
         pi->free_pages  = avail_domheap_pages();
         pi->cpu_khz     = cpu_khz;
diff -Nru a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/setup.c      2005-06-19 14:06:51 -04:00
@@ -66,7 +66,6 @@
 
 int early_boot = 1;
 
-int ht_per_core = 1;
 cpumask_t cpu_present_map;
 
 /* Limits of Xen heap, used to initialise the allocator. */
diff -Nru a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    2005-06-19 14:06:51 -04:00
+++ b/xen/arch/x86/smpboot.c    2005-06-19 14:06:51 -04:00
@@ -62,6 +62,8 @@
 int smp_num_siblings = 1;
 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
 EXPORT_SYMBOL(phys_proc_id);
+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
+EXPORT_SYMBOL(cpu_core_id);
 
 /* bitmap of online cpus */
 cpumask_t cpu_online_map;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.