[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Fix misc issues related to allowing support of more CPUs



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1222090651 -3600
# Node ID c0db74e416626f34cf91b0eefe659bcfe8b43a35
# Parent  ae24b533dc9d0d5ce05b34a1ef72917589b4e63d
Fix misc issues related to allowing support of more CPUs

This mainly means removing stack variables that (should) depend on
NR_CPUS (other than cpumask_t ones) and adjusting certain array sizes.

There's at least one open tools issue: The 'xm vcpu-pin' path assumes
a maximum of 64 CPU-s in many places.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/nmi.c                |    2 +-
 xen/arch/x86/smpboot.c            |    2 +-
 xen/arch/x86/x86_32/domain_page.c |    3 +++
 xen/common/domctl.c               |   25 +++++++++++++++++--------
 xen/common/sched_credit.c         |   13 ++++++++-----
 xen/common/sched_sedf.c           |   23 +++++++++++++++++++----
 6 files changed, 49 insertions(+), 19 deletions(-)

diff -r ae24b533dc9d -r c0db74e41662 xen/arch/x86/nmi.c
--- a/xen/arch/x86/nmi.c        Mon Sep 22 14:04:27 2008 +0100
+++ b/xen/arch/x86/nmi.c        Mon Sep 22 14:37:31 2008 +0100
@@ -96,7 +96,7 @@ int nmi_active;
 
 int __init check_nmi_watchdog (void)
 {
-    unsigned int prev_nmi_count[NR_CPUS];
+    static unsigned int __initdata prev_nmi_count[NR_CPUS];
     int cpu;
     
     if ( !nmi_watchdog )
diff -r ae24b533dc9d -r c0db74e41662 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Mon Sep 22 14:04:27 2008 +0100
+++ b/xen/arch/x86/smpboot.c    Mon Sep 22 14:37:31 2008 +0100
@@ -1121,7 +1121,7 @@ static void __init smp_boot_cpus(unsigne
        Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
 
        kicked = 1;
-       for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
+       for (bit = 0; kicked < NR_CPUS && bit < NR_CPUS; bit++) {
                apicid = cpu_present_to_apicid(bit);
                /*
                 * Don't even attempt to start the boot CPU!
diff -r ae24b533dc9d -r c0db74e41662 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Mon Sep 22 14:04:27 2008 +0100
+++ b/xen/arch/x86/x86_32/domain_page.c Mon Sep 22 14:37:31 2008 +0100
@@ -201,6 +201,9 @@ void *map_domain_page_global(unsigned lo
 
     ASSERT(!in_irq() && local_irq_is_enabled());
 
+    /* At least half the ioremap space should be available to us. */
+    BUILD_BUG_ON(IOREMAP_VIRT_START + (IOREMAP_MBYTES << 19) >= FIXADDR_START);
+
     spin_lock(&globalmap_lock);
 
     idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
diff -r ae24b533dc9d -r c0db74e41662 xen/common/domctl.c
--- a/xen/common/domctl.c       Mon Sep 22 14:04:27 2008 +0100
+++ b/xen/common/domctl.c       Mon Sep 22 14:37:31 2008 +0100
@@ -145,16 +145,23 @@ static unsigned int default_vcpu0_locati
 {
     struct domain *d;
     struct vcpu   *v;
-    unsigned int   i, cpu, cnt[NR_CPUS] = { 0 };
+    unsigned int   i, cpu, nr_cpus, *cnt;
     cpumask_t      cpu_exclude_map;
 
     /* Do an initial CPU placement. Pick the least-populated CPU. */
-    rcu_read_lock(&domlist_read_lock);
-    for_each_domain ( d )
-        for_each_vcpu ( d, v )
-        if ( !test_bit(_VPF_down, &v->pause_flags) )
-            cnt[v->processor]++;
-    rcu_read_unlock(&domlist_read_lock);
+    nr_cpus = last_cpu(cpu_possible_map) + 1;
+    cnt = xmalloc_array(unsigned int, nr_cpus);
+    if ( cnt )
+    {
+        memset(cnt, 0, nr_cpus * sizeof(*cnt));
+
+        rcu_read_lock(&domlist_read_lock);
+        for_each_domain ( d )
+            for_each_vcpu ( d, v )
+                if ( !test_bit(_VPF_down, &v->pause_flags) )
+                    cnt[v->processor]++;
+        rcu_read_unlock(&domlist_read_lock);
+    }
 
     /*
      * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
@@ -172,9 +179,11 @@ static unsigned int default_vcpu0_locati
              (cpus_weight(cpu_sibling_map[i]) > 1) )
             continue;
         cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
-        if ( cnt[i] <= cnt[cpu] )
+        if ( !cnt || cnt[i] <= cnt[cpu] )
             cpu = i;
     }
+
+    xfree(cnt);
 
     return cpu;
 }
diff -r ae24b533dc9d -r c0db74e41662 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Mon Sep 22 14:04:27 2008 +0100
+++ b/xen/common/sched_credit.c Mon Sep 22 14:37:31 2008 +0100
@@ -1258,14 +1258,15 @@ csched_dump_pcpu(int cpu)
     struct csched_pcpu *spc;
     struct csched_vcpu *svc;
     int loop;
+    char cpustr[100];
 
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    printk(" sort=%d, sibling=0x%lx, core=0x%lx\n",
-            spc->runq_sort_last,
-            cpu_sibling_map[cpu].bits[0],
-            cpu_core_map[cpu].bits[0]);
+    cpumask_scnprintf(cpustr, sizeof(cpustr), cpu_sibling_map[cpu]);
+    printk(" sort=%d, sibling=%s, ", spc->runq_sort_last, cpustr);
+    cpumask_scnprintf(cpustr, sizeof(cpustr), cpu_core_map[cpu]);
+    printk("core=%s\n", cpustr);
 
     /* current VCPU */
     svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
@@ -1292,6 +1293,7 @@ csched_dump(void)
 {
     struct list_head *iter_sdom, *iter_svc;
     int loop;
+    char idlers_buf[100];
 
     printk("info:\n"
            "\tncpus              = %u\n"
@@ -1317,7 +1319,8 @@ csched_dump(void)
            CSCHED_TICKS_PER_TSLICE,
            CSCHED_TICKS_PER_ACCT);
 
-    printk("idlers: 0x%lx\n", csched_priv.idlers.bits[0]);
+    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), csched_priv.idlers);
+    printk("idlers: %s\n", idlers_buf);
 
     CSCHED_STATS_PRINTK();
 
diff -r ae24b533dc9d -r c0db74e41662 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Mon Sep 22 14:04:27 2008 +0100
+++ b/xen/common/sched_sedf.c   Mon Sep 22 14:37:31 2008 +0100
@@ -1298,8 +1298,18 @@ static int sedf_adjust_weights(struct xe
 {
     struct vcpu *p;
     struct domain      *d;
-    int                 sumw[NR_CPUS] = { 0 };
-    s_time_t            sumt[NR_CPUS] = { 0 };
+    unsigned int        nr_cpus = last_cpu(cpu_possible_map) + 1;
+    int                *sumw = xmalloc_array(int, nr_cpus);
+    s_time_t           *sumt = xmalloc_array(s_time_t, nr_cpus);
+
+    if ( !sumw || !sumt )
+    {
+        xfree(sumt);
+        xfree(sumw);
+        return -ENOMEM;
+    }
+    memset(sumw, 0, nr_cpus * sizeof(*sumw));
+    memset(sumt, 0, nr_cpus * sizeof(*sumt));
 
     /* Sum across all weights. */
     rcu_read_lock(&domlist_read_lock);
@@ -1348,6 +1358,9 @@ static int sedf_adjust_weights(struct xe
     }
     rcu_read_unlock(&domlist_read_lock);
 
+    xfree(sumt);
+    xfree(sumw);
+
     return 0;
 }
 
@@ -1356,6 +1369,7 @@ static int sedf_adjust(struct domain *p,
 static int sedf_adjust(struct domain *p, struct xen_domctl_scheduler_op *op)
 {
     struct vcpu *v;
+    int rc;
 
     PRINT(2,"sedf_adjust was called, domain-id %i new period %"PRIu64" "
           "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
@@ -1411,8 +1425,9 @@ static int sedf_adjust(struct domain *p,
             }
         }
 
-        if ( sedf_adjust_weights(op) )
-            return -EINVAL;
+        rc = sedf_adjust_weights(op);
+        if ( rc )
+            return rc;
 
         for_each_vcpu ( p, v )
         {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.