[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] allocate CPU sibling and core maps dynamically



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1319181785 -7200
# Node ID 253073b522f8fcde6de44db9955c89257cde84d8
# Parent  eef4641d6726982d4d93ec12d407c3ce2180e290
allocate CPU sibling and core maps dynamically

... thus reducing the per-CPU data area size back to one page even when
building for large NR_CPUS.

At once eliminate the old __cpu{mask,list}_scnprintf() helpers.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r eef4641d6726 -r 253073b522f8 xen/arch/ia64/linux-xen/setup.c
--- a/xen/arch/ia64/linux-xen/setup.c   Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/ia64/linux-xen/setup.c   Fri Oct 21 09:23:05 2011 +0200
@@ -577,8 +577,12 @@
 
        cpu_physical_id(0) = hard_smp_processor_id();
 
-       cpu_set(0, per_cpu(cpu_sibling_map, 0));
-       cpu_set(0, per_cpu(cpu_core_map, 0));
+       if (!zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
+            !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)))
+               panic("No memory for boot CPU sibling/core maps\n");
+
+       cpumask_set_cpu(0, per_cpu(cpu_sibling_mask, 0));
+       cpumask_set_cpu(0, per_cpu(cpu_core_mask, 0));
 
        check_for_logical_procs();
        if (smp_num_cpucores > 1)
diff -r eef4641d6726 -r 253073b522f8 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/ia64/linux-xen/smpboot.c Fri Oct 21 09:23:05 2011 +0200
@@ -144,8 +144,8 @@
 cpumask_t cpu_possible_map;
 EXPORT_SYMBOL(cpu_possible_map);
 
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_core_map);
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
 int smp_num_siblings = 1;
 int smp_num_cpucores = 1;
 
@@ -687,13 +687,13 @@
 {
        int i;
 
-       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
-       for_each_cpu_mask(i, per_cpu(cpu_core_map, cpu))
-               cpumask_clear_cpu(cpu, &per_cpu(cpu_core_map, i));
+       for_each_cpu_mask(i, *per_cpu(cpu_sibling_mask, cpu))
+               cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
+       for_each_cpu_mask(i, *per_cpu(cpu_core_mask, cpu))
+               cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
 
-       cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
-       cpumask_clear(&per_cpu(cpu_core_map, cpu));
+       cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
+       cpumask_clear(per_cpu(cpu_core_mask, cpu));
 }
 
 static void
@@ -703,12 +703,12 @@
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, cpu));
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
+               cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, cpu));
+               cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
                return;
        }
 
-       last = (cpus_weight(per_cpu(cpu_core_map, cpu)) == 1);
+       last = (cpumask_weight(per_cpu(cpu_core_mask, cpu)) == 1);
 
        /* remove it from all sibling map's */
        clear_cpu_sibling_map(cpu);
@@ -794,11 +794,11 @@
 
        for_each_online_cpu(i) {
                if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
+                       cpumask_set_cpu(i, per_cpu(cpu_core_mask, cpu));
+                       cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, i));
                        if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+                               cpumask_set_cpu(i, per_cpu(cpu_sibling_mask, 
cpu));
+                               cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, 
i));
                        }
                }
        }
@@ -821,6 +821,14 @@
        if (cpu_isset(cpu, cpu_callin_map))
                return -EINVAL;
 
+       if (!per_cpu(cpu_sibling_mask, cpu) &&
+            !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)))
+               return -ENOMEM;
+
+       if (!per_cpu(cpu_core_mask, cpu) &&
+            !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)))
+               return -ENOMEM;
+
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
        /* Processor goes to start_secondary(), sets online flag */
        ret = do_boot_cpu(sapicid, cpu);
@@ -829,8 +837,8 @@
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-               cpu_set(cpu, per_cpu(cpu_core_map, cpu));
+               cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
+               cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));
                return 0;
        }
 
diff -r eef4641d6726 -r 253073b522f8 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/ia64/xen/dom0_ops.c      Fri Oct 21 09:23:05 2011 +0200
@@ -594,9 +594,9 @@
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
         memset(pi, 0, sizeof(*pi));
-        pi->threads_per_core = cpus_weight(per_cpu(cpu_sibling_map, 0));
+        pi->threads_per_core = cpumask_weight(per_cpu(cpu_sibling_mask, 0));
         pi->cores_per_socket =
-            cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
         pi->nr_nodes         = (u32)num_online_nodes();
         pi->nr_cpus          = (u32)num_online_cpus();
         pi->total_pages      = total_pages; 
diff -r eef4641d6726 -r 253073b522f8 xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c     Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/ia64/xen/tlb_track.c     Fri Oct 21 09:23:05 2011 +0200
@@ -504,7 +504,7 @@
     char pcpumask_buf[NR_CPUS + 1];
     char vcpumask_buf[MAX_VIRT_CPUS + 1];
     cpumask_scnprintf(pcpumask_buf, sizeof(pcpumask_buf),
-                      entry->pcpu_dirty_mask);
+                      &entry->pcpu_dirty_mask);
     vcpumask_scnprintf(vcpumask_buf, sizeof(vcpumask_buf),
                        entry->vcpu_dirty_mask);
     printk("%s:%d\n"
diff -r eef4641d6726 -r 253073b522f8 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Fri Oct 21 09:23:05 2011 +0200
@@ -867,7 +867,7 @@
         {
             char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
             ebufp = ebuf + strlen(ebuf);
-            cpumask_scnprintf(ebufp, 95 - strlen(ebuf), mce_fatal_cpus);
+            cpumask_scnprintf(ebufp, 95 - strlen(ebuf), &mce_fatal_cpus);
             mc_panic(ebuf);
         }
         atomic_set(&found_error, 0);
diff -r eef4641d6726 -r 253073b522f8 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/x86/irq.c        Fri Oct 21 09:23:05 2011 +0200
@@ -1998,7 +1998,7 @@
         spin_lock_irqsave(&desc->lock, flags);
 
         cpumask_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
-                          desc->affinity);
+                          &desc->affinity);
         printk("   IRQ:%4d affinity:%s vec:%02x type=%-15s"
                " status=%08x ",
                irq, keyhandler_scratch, desc->arch.vector,
diff -r eef4641d6726 -r 253073b522f8 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/x86/mm.c Fri Oct 21 09:23:05 2011 +0200
@@ -3201,7 +3201,7 @@
                 cpumask_clear(&mask);
                 for_each_online_cpu(cpu)
                     if ( !cpumask_intersects(&mask,
-                                             &per_cpu(cpu_sibling_map, cpu)) )
+                                             per_cpu(cpu_sibling_mask, cpu)) )
                         cpumask_set_cpu(cpu, &mask);
                 flush_mask(&mask, FLUSH_CACHE);
             }
diff -r eef4641d6726 -r 253073b522f8 xen/arch/x86/oprofile/op_model_p4.c
--- a/xen/arch/x86/oprofile/op_model_p4.c       Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/x86/oprofile/op_model_p4.c       Fri Oct 21 09:23:05 2011 +0200
@@ -385,7 +385,7 @@
 {
 #ifdef CONFIG_SMP
        int cpu = smp_processor_id();
-       return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
+       return (cpu != cpumask_first(per_cpu(cpu_sibling_mask, cpu)));
 #endif 
        return 0;
 }
diff -r eef4641d6726 -r 253073b522f8 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/x86/smpboot.c    Fri Oct 21 09:23:05 2011 +0200
@@ -51,9 +51,9 @@
 unsigned long __read_mostly trampoline_phys;
 
 /* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
 /* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_core_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
 
 cpumask_t cpu_online_map __read_mostly;
 EXPORT_SYMBOL(cpu_online_map);
@@ -233,10 +233,10 @@
 
 static void link_thread_siblings(int cpu1, int cpu2)
 {
-    cpu_set(cpu1, per_cpu(cpu_sibling_map, cpu2));
-    cpu_set(cpu2, per_cpu(cpu_sibling_map, cpu1));
-    cpu_set(cpu1, per_cpu(cpu_core_map, cpu2));
-    cpu_set(cpu2, per_cpu(cpu_core_map, cpu1));
+    cpumask_set_cpu(cpu1, per_cpu(cpu_sibling_mask, cpu2));
+    cpumask_set_cpu(cpu2, per_cpu(cpu_sibling_mask, cpu1));
+    cpumask_set_cpu(cpu1, per_cpu(cpu_core_mask, cpu2));
+    cpumask_set_cpu(cpu2, per_cpu(cpu_core_mask, cpu1));
 }
 
 static void set_cpu_sibling_map(int cpu)
@@ -262,13 +262,13 @@
     }
     else
     {
-        cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
+        cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
     }
 
     if ( c[cpu].x86_max_cores == 1 )
     {
-        cpumask_copy(&per_cpu(cpu_core_map, cpu),
-                     &per_cpu(cpu_sibling_map, cpu));
+        cpumask_copy(per_cpu(cpu_core_mask, cpu),
+                     per_cpu(cpu_sibling_mask, cpu));
         c[cpu].booted_cores = 1;
         return;
     }
@@ -277,18 +277,18 @@
     {
         if ( c[cpu].phys_proc_id == c[i].phys_proc_id )
         {
-            cpu_set(i, per_cpu(cpu_core_map, cpu));
-            cpu_set(cpu, per_cpu(cpu_core_map, i));
+            cpumask_set_cpu(i, per_cpu(cpu_core_mask, cpu));
+            cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, i));
             /*
              *  Does this new cpu bringup a new core?
              */
-            if ( cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1 )
+            if ( cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) == 1 )
             {
                 /*
                  * for each core in package, increment
                  * the booted_cores for this new cpu
                  */
-                if ( first_cpu(per_cpu(cpu_sibling_map, i)) == i )
+                if ( cpumask_first(per_cpu(cpu_sibling_mask, i)) == i )
                     c[cpu].booted_cores++;
                 /*
                  * increment the core count for all
@@ -641,13 +641,14 @@
 {
     unsigned int order;
 
+    free_cpumask_var(per_cpu(cpu_sibling_mask, cpu));
+    free_cpumask_var(per_cpu(cpu_core_mask, cpu));
+
     order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
     free_xenheap_pages(per_cpu(gdt_table, cpu), order);
-    per_cpu(gdt_table, cpu) = NULL;
 
 #ifdef __x86_64__
     free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order);
-    per_cpu(compat_gdt_table, cpu) = NULL;
 #endif
 
     order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
@@ -696,7 +697,9 @@
         goto oom;
     memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES * sizeof(idt_entry_t));
 
-    return 0;
+    if ( zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) &&
+         zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
+        return 0;
 
  oom:
     cpu_smpboot_free(cpu);
@@ -744,6 +747,10 @@
 
     stack_base[0] = stack_start.esp;
 
+    if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
+         !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) )
+        panic("No memory for boot CPU sibling/core maps\n");
+
     set_cpu_sibling_map(0);
 
     /*
@@ -760,8 +767,6 @@
             printk(KERN_NOTICE "Local APIC not detected."
                    " Using dummy APIC emulation.\n");
         map_cpu_to_logical_apicid();
-        cpu_set(0, per_cpu(cpu_sibling_map, 0));
-        cpu_set(0, per_cpu(cpu_core_map, 0));
         return;
     }
 
@@ -792,13 +797,6 @@
     setup_local_APIC();
     map_cpu_to_logical_apicid();
 
-    /*
-     * construct cpu_sibling_map, so that we can tell sibling CPUs
-     * efficiently.
-     */
-    cpu_set(0, per_cpu(cpu_sibling_map, 0));
-    cpu_set(0, per_cpu(cpu_core_map, 0));
-
     smpboot_setup_io_apic();
 
     setup_boot_APIC_clock();
@@ -816,18 +814,18 @@
     int sibling;
     struct cpuinfo_x86 *c = cpu_data;
 
-    for_each_cpu_mask ( sibling, per_cpu(cpu_core_map, cpu) )
+    for_each_cpu_mask ( sibling, *per_cpu(cpu_core_mask, cpu) )
     {
-        cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+        cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling));
         /* Last thread sibling in this cpu core going down. */
-        if ( cpumask_weight(&per_cpu(cpu_sibling_map, cpu)) == 1 )
+        if ( cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) == 1 )
             c[sibling].booted_cores--;
     }
    
-    for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-        cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, sibling));
-    cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
-    cpumask_clear(&per_cpu(cpu_core_map, cpu));
+    for_each_cpu_mask(sibling, *per_cpu(cpu_sibling_mask, cpu))
+        cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling));
+    cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
+    cpumask_clear(per_cpu(cpu_core_mask, cpu));
     c[cpu].phys_proc_id = BAD_APICID;
     c[cpu].cpu_core_id = BAD_APICID;
     c[cpu].compute_unit_id = BAD_APICID;
diff -r eef4641d6726 -r 253073b522f8 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c     Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/arch/x86/sysctl.c     Fri Oct 21 09:23:05 2011 +0200
@@ -76,9 +76,9 @@
 
         memset(pi, 0, sizeof(*pi));
         pi->threads_per_core =
-            cpus_weight(per_cpu(cpu_sibling_map, 0));
+            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
         pi->cores_per_socket =
-            cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
         pi->nr_cpus = num_online_cpus();
         pi->nr_nodes = num_online_nodes();
         pi->max_node_id = MAX_NUMNODES-1;
diff -r eef4641d6726 -r 253073b522f8 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/common/domctl.c       Fri Oct 21 09:23:05 2011 +0200
@@ -171,7 +171,7 @@
      * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
      * favour high numbered CPUs in the event of a tie.
      */
-    cpumask_copy(&cpu_exclude_map, &per_cpu(cpu_sibling_map, 0));
+    cpumask_copy(&cpu_exclude_map, per_cpu(cpu_sibling_mask, 0));
     cpu = cpumask_first(&cpu_exclude_map);
     if ( cpumask_weight(&cpu_exclude_map) > 1 )
         cpu = cpumask_next(cpu, &cpu_exclude_map);
@@ -179,11 +179,11 @@
     {
         if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
-        if ( (i == cpumask_first(&per_cpu(cpu_sibling_map, i))) &&
-             (cpumask_weight(&per_cpu(cpu_sibling_map, i)) > 1) )
+        if ( (i == cpumask_first(per_cpu(cpu_sibling_mask, i))) &&
+             (cpumask_weight(per_cpu(cpu_sibling_mask, i)) > 1) )
             continue;
         cpumask_or(&cpu_exclude_map, &cpu_exclude_map,
-                   &per_cpu(cpu_sibling_map, i));
+                   per_cpu(cpu_sibling_mask, i));
         if ( !cnt || cnt[i] <= cnt[cpu] )
             cpu = i;
     }
diff -r eef4641d6726 -r 253073b522f8 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/common/keyhandler.c   Fri Oct 21 09:23:05 2011 +0200
@@ -210,7 +210,7 @@
 static void cpuset_print(char *set, int size, const cpumask_t *mask)
 {
     *set++ = '{';
-    set += cpulist_scnprintf(set, size-2, *mask);
+    set += cpulist_scnprintf(set, size-2, mask);
     *set++ = '}';
     *set++ = '\0';
 }
diff -r eef4641d6726 -r 253073b522f8 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/common/sched_credit.c Fri Oct 21 09:23:05 2011 +0200
@@ -502,23 +502,23 @@
 
         nxt = cpumask_cycle(cpu, &cpus);
 
-        if ( cpumask_test_cpu(cpu, &per_cpu(cpu_core_map, nxt)) )
+        if ( cpumask_test_cpu(cpu, per_cpu(cpu_core_mask, nxt)) )
         {
             /* We're on the same socket, so check the busy-ness of threads.
              * Migrate if # of idlers is less at all */
-            ASSERT( cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
+            ASSERT( cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu)) );
             migrate_factor = 1;
-            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_sibling_map, cpu));
-            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_sibling_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, per_cpu(cpu_sibling_mask, cpu));
+            cpumask_and(&nxt_idlers, &idlers, per_cpu(cpu_sibling_mask, nxt));
         }
         else
         {
             /* We're on different sockets, so check the busy-ness of cores.
              * Migrate only if the other core is twice as idle */
-            ASSERT( !cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
+            ASSERT( !cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu)) );
             migrate_factor = 2;
-            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_core_map, cpu));
-            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_core_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, per_cpu(cpu_core_mask, cpu));
+            cpumask_and(&nxt_idlers, &idlers, per_cpu(cpu_core_mask, nxt));
         }
 
         weight_cpu = cpumask_weight(&cpu_idlers);
@@ -531,7 +531,7 @@
             cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
             spc = CSCHED_PCPU(nxt);
             cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
-            cpumask_andnot(&cpus, &cpus, &per_cpu(cpu_sibling_map, cpu));
+            cpumask_andnot(&cpus, &cpus, per_cpu(cpu_sibling_mask, cpu));
         }
         else
         {
@@ -1419,9 +1419,9 @@
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_map, cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
     printk(" sort=%d, sibling=%s, ", spc->runq_sort_last, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_map, cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
@@ -1481,7 +1481,7 @@
            prv->ticks_per_tslice,
            vcpu_migration_delay);
 
-    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
+    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), &prv->idlers);
     printk("idlers: %s\n", idlers_buf);
 
     printk("active vcpus:\n");
diff -r eef4641d6726 -r 253073b522f8 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/common/sched_credit2.c        Fri Oct 21 09:23:05 2011 +0200
@@ -1767,9 +1767,9 @@
 
     runq = &RQD(ops, cpu)->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_map,cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
     printk(" sibling=%s, ", cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_map,cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
diff -r eef4641d6726 -r 253073b522f8 xen/include/asm-ia64/linux-xen/asm/smp.h
--- a/xen/include/asm-ia64/linux-xen/asm/smp.h  Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/include/asm-ia64/linux-xen/asm/smp.h  Fri Oct 21 09:23:05 2011 +0200
@@ -62,8 +62,8 @@
 extern cpumask_t cpu_online_map;
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
 
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
 extern int smp_num_siblings;
 extern int smp_num_cpucores;
 extern void __iomem *ipi_base_addr;
diff -r eef4641d6726 -r 253073b522f8 xen/include/asm-x86/smp.h
--- a/xen/include/asm-x86/smp.h Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/include/asm-x86/smp.h Fri Oct 21 09:23:05 2011 +0200
@@ -25,8 +25,8 @@
  */
  
 extern void smp_alloc_memory(void);
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
 
 void smp_send_nmi_allbutself(void);
 
diff -r eef4641d6726 -r 253073b522f8 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Fri Oct 21 09:22:02 2011 +0200
+++ b/xen/include/xen/cpumask.h Fri Oct 21 09:23:05 2011 +0200
@@ -320,20 +320,16 @@
 #define cpus_addr(src) ((src).bits)
 #define cpumask_bits(maskp) ((maskp)->bits)
 
-#define cpumask_scnprintf(buf, len, src) \
-       __cpumask_scnprintf((buf), (len), &(src), nr_cpu_ids)
-static inline int __cpumask_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
+static inline int cpumask_scnprintf(char *buf, int len,
+                                   const cpumask_t *srcp)
 {
-       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+       return bitmap_scnprintf(buf, len, srcp->bits, nr_cpu_ids);
 }
 
-#define cpulist_scnprintf(buf, len, src) \
-       __cpulist_scnprintf((buf), (len), &(src), nr_cpu_ids)
-static inline int __cpulist_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
+static inline int cpulist_scnprintf(char *buf, int len,
+                                   const cpumask_t *srcp)
 {
-       return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+       return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpu_ids);
 }
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.