[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Remove many uses of cpu_possible_map and iterators over NR_CPUS.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1273865822 -3600
# Node ID 6348b2ab5c391114593c63b491b42fc16cce0dab
# Parent  44f25f7250bea0f14d90f4174f7cd337bc6215bf
Remove many uses of cpu_possible_map and iterators over NR_CPUS.

The significant remaining culprits for x86 are credit2, hpet, and
percpu-area subsystems. To be dealt with in a separate patch.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          |    1 
 xen/arch/x86/nmi.c              |    2 
 xen/arch/x86/oprofile/nmi_int.c |    5 --
 xen/arch/x86/setup.c            |    2 
 xen/arch/x86/smpboot.c          |    5 --
 xen/arch/x86/time.c             |    7 +-
 xen/common/domctl.c             |    7 +-
 xen/common/perfc.c              |   28 ++++++-----
 xen/common/radix-tree.c         |    5 +-
 xen/common/rcupdate.c           |    2 
 xen/common/sched_sedf.c         |   14 +++--
 xen/common/schedule.c           |   41 +++++++++++++---
 xen/common/stop_machine.c       |   25 ++++++++--
 xen/common/tasklet.c            |   11 ++--
 xen/common/timer.c              |   31 +++++++++---
 xen/common/tmem.c               |    9 ++-
 xen/common/tmem_xen.c           |   98 ++++++++++++++++++++++++++++++----------
 xen/include/asm-ia64/perfc.h    |    4 -
 xen/include/asm-x86/perfc.h     |    4 -
 xen/include/asm-x86/setup.h     |    1 
 xen/include/asm-x86/smp.h       |    1 
 xen/include/xen/tmem.h          |    1 
 xen/xsm/flask/flask_op.c        |    8 ---
 23 files changed, 209 insertions(+), 103 deletions(-)

diff -r 44f25f7250be -r 6348b2ab5c39 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri May 14 18:39:02 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri May 14 20:37:02 2010 +0100
@@ -85,6 +85,7 @@ static int cpu_callback(
     case CPU_DYING:
         hvm_cpu_down();
         break;
+    case CPU_UP_CANCELED:
     case CPU_DEAD:
         hvm_funcs.cpu_dead(cpu);
         break;
diff -r 44f25f7250be -r 6348b2ab5c39 xen/arch/x86/nmi.c
--- a/xen/arch/x86/nmi.c        Fri May 14 18:39:02 2010 +0100
+++ b/xen/arch/x86/nmi.c        Fri May 14 20:37:02 2010 +0100
@@ -468,7 +468,7 @@ static void do_nmi_stats(unsigned char k
     struct vcpu *v;
 
     printk("CPU\tNMI\n");
-    for_each_possible_cpu ( i )
+    for_each_online_cpu ( i )
         printk("%3d\t%3d\n", i, nmi_count(i));
 
     if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
diff -r 44f25f7250be -r 6348b2ab5c39 xen/arch/x86/oprofile/nmi_int.c
--- a/xen/arch/x86/oprofile/nmi_int.c   Fri May 14 18:39:02 2010 +0100
+++ b/xen/arch/x86/oprofile/nmi_int.c   Fri May 14 20:37:02 2010 +0100
@@ -150,10 +150,7 @@ static int allocate_msrs(void)
        size_t counters_size = sizeof(struct op_msr) * model->num_counters;
 
        int i;
-       for (i = 0; i < NR_CPUS; ++i) {
-               if (!test_bit(i, &cpu_online_map))
-                       continue;
-
+       for_each_online_cpu (i) {
                cpu_msrs[i].counters = xmalloc_bytes(counters_size);
                if (!cpu_msrs[i].counters) {
                        success = 0;
diff -r 44f25f7250be -r 6348b2ab5c39 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Fri May 14 18:39:02 2010 +0100
+++ b/xen/arch/x86/setup.c      Fri May 14 20:37:02 2010 +0100
@@ -1158,8 +1158,6 @@ void __init __start_xen(unsigned long mb
 
     init_trace_bufs();
 
-    init_tmem();
-
     console_endboot();
 
     /* Hide UART from DOM0 if we're using it */
diff -r 44f25f7250be -r 6348b2ab5c39 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri May 14 18:39:02 2010 +0100
+++ b/xen/arch/x86/smpboot.c    Fri May 14 20:37:02 2010 +0100
@@ -1198,11 +1198,6 @@ static void __init smp_boot_cpus(unsigne
         * construct cpu_sibling_map, so that we can tell sibling CPUs
         * efficiently.
         */
-       for_each_possible_cpu(cpu) {
-               cpus_clear(per_cpu(cpu_sibling_map, cpu));
-               cpus_clear(per_cpu(cpu_core_map, cpu));
-       }
-
        cpu_set(0, per_cpu(cpu_sibling_map, 0));
        cpu_set(0, per_cpu(cpu_core_map, 0));
 
diff -r 44f25f7250be -r 6348b2ab5c39 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri May 14 18:39:02 2010 +0100
+++ b/xen/arch/x86/time.c       Fri May 14 20:37:02 2010 +0100
@@ -1401,6 +1401,10 @@ void init_percpu_time(void)
     unsigned long flags;
     s_time_t now;
 
+    /* If we have constant-rate TSCs then scale factor can be shared. */
+    if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
+        this_cpu(cpu_time).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
+
     local_irq_save(flags);
     rdtscll(t->local_tsc_stamp);
     now = read_platform_stime();
@@ -1435,9 +1439,6 @@ int __init init_xen_time(void)
     /* If we have constant-rate TSCs then scale factor can be shared. */
     if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
     {
-        int cpu;
-        for_each_possible_cpu ( cpu )
-            per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
         /* If TSCs are not marked as 'reliable', re-sync during rendezvous. */
         if ( !boot_cpu_has(X86_FEATURE_TSC_RELIABLE) )
             time_calibration_rendezvous_fn = time_calibration_tsc_rendezvous;
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/domctl.c       Fri May 14 20:37:02 2010 +0100
@@ -155,7 +155,7 @@ static unsigned int default_vcpu0_locati
     cpumask_t      cpu_exclude_map;
 
     /* Do an initial CPU placement. Pick the least-populated CPU. */
-    nr_cpus = last_cpu(cpu_possible_map) + 1;
+    nr_cpus = last_cpu(cpu_online_map) + 1;
     cnt = xmalloc_array(unsigned int, nr_cpus);
     if ( cnt )
     {
@@ -164,8 +164,9 @@ static unsigned int default_vcpu0_locati
         rcu_read_lock(&domlist_read_lock);
         for_each_domain ( d )
             for_each_vcpu ( d, v )
-                if ( !test_bit(_VPF_down, &v->pause_flags) )
-                    cnt[v->processor]++;
+                if ( !test_bit(_VPF_down, &v->pause_flags)
+                     && ((cpu = v->processor) < nr_cpus) )
+                    cnt[cpu]++;
         rcu_read_unlock(&domlist_read_lock);
     }
 
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/perfc.c
--- a/xen/common/perfc.c        Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/perfc.c        Fri May 14 20:37:02 2010 +0100
@@ -114,8 +114,6 @@ void perfc_printall(unsigned char key)
         }
         printk("\n");
     }
-
-    arch_perfc_printall();
 }
 
 void perfc_reset(unsigned char key)
@@ -136,13 +134,13 @@ void perfc_reset(unsigned char key)
         switch ( perfc_info[i].type )
         {
         case TYPE_SINGLE:
-            for_each_possible_cpu ( cpu )
+            for_each_online_cpu ( cpu )
                 per_cpu(perfcounters, cpu)[j] = 0;
         case TYPE_S_SINGLE:
             ++j;
             break;
         case TYPE_ARRAY:
-            for_each_possible_cpu ( cpu )
+            for_each_online_cpu ( cpu )
                 memset(per_cpu(perfcounters, cpu) + j, 0,
                        perfc_info[i].nr_elements * sizeof(perfc_t));
         case TYPE_S_ARRAY:
@@ -157,15 +155,22 @@ static xen_sysctl_perfc_desc_t perfc_d[N
 static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
 static xen_sysctl_perfc_val_t *perfc_vals;
 static unsigned int      perfc_nbr_vals;
-static int               perfc_init = 0;
+static cpumask_t         perfc_cpumap;
+
 static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
                            XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val)
 {
     unsigned int i, j, v;
 
     /* We only copy the name and array-size information once. */
-    if ( !perfc_init ) 
-    {
+    if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
+    {
+        unsigned int nr_cpus;
+        perfc_cpumap = cpu_online_map;
+        nr_cpus = cpus_weight(perfc_cpumap);
+
+        perfc_nbr_vals = 0;
+
         for ( i = 0; i < NR_PERFCTRS; i++ )
         {
             safe_strcpy(perfc_d[i].name, perfc_info[i].name);
@@ -174,7 +179,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
             {
             case TYPE_SINGLE:
             case TYPE_S_SINGLE:
-                perfc_d[i].nr_vals = num_possible_cpus();
+                perfc_d[i].nr_vals = nr_cpus;
                 break;
             case TYPE_ARRAY:
             case TYPE_S_ARRAY:
@@ -183,8 +188,9 @@ static int perfc_copy_info(XEN_GUEST_HAN
             }
             perfc_nbr_vals += perfc_d[i].nr_vals;
         }
+
+        xfree(perfc_vals);
         perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
-        perfc_init = 1;
     }
 
     if ( guest_handle_is_null(desc) )
@@ -205,14 +211,14 @@ static int perfc_copy_info(XEN_GUEST_HAN
         {
         case TYPE_SINGLE:
         case TYPE_S_SINGLE:
-            for_each_possible_cpu ( cpu )
+            for_each_cpu_mask ( cpu, perfc_cpumap )
                 perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
             ++j;
             break;
         case TYPE_ARRAY:
         case TYPE_S_ARRAY:
             memset(perfc_vals + v, 0, perfc_d[i].nr_vals * 
sizeof(*perfc_vals));
-            for_each_possible_cpu ( cpu )
+            for_each_cpu_mask ( cpu, perfc_cpumap )
             {
                 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
                 unsigned int k;
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/radix-tree.c
--- a/xen/common/radix-tree.c   Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/radix-tree.c   Fri May 14 20:37:02 2010 +0100
@@ -31,6 +31,7 @@
  */
 
 #include <xen/config.h>
+#include <xen/init.h>
 #include <xen/lib.h>
 #include <xen/types.h>
 #include <xen/errno.h>
@@ -429,7 +430,7 @@ void radix_tree_destroy(struct radix_tre
 }
 EXPORT_SYMBOL(radix_tree_destroy);
 
-static /*__init*/ unsigned long __maxindex(unsigned int height)
+static unsigned long __init __maxindex(unsigned int height)
 {
     unsigned int tmp = height * RADIX_TREE_MAP_SHIFT;
     unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1;
@@ -439,7 +440,7 @@ static /*__init*/ unsigned long __maxind
     return index;
 }
 
-/*__init*/ void radix_tree_init(void)
+void __init radix_tree_init(void)
 {
     unsigned int i;
 
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/rcupdate.c
--- a/xen/common/rcupdate.c     Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/rcupdate.c     Fri May 14 20:37:02 2010 +0100
@@ -361,7 +361,7 @@ void __init rcu_init(void)
 void __init rcu_init(void)
 {
     void *cpu = (void *)(long)smp_processor_id();
-    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
     register_cpu_notifier(&cpu_nfb);
     open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 }
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/sched_sedf.c   Fri May 14 20:37:02 2010 +0100
@@ -1333,7 +1333,7 @@ static int sedf_adjust_weights(struct cp
 {
     struct vcpu *p;
     struct domain      *d;
-    unsigned int        nr_cpus = last_cpu(cpu_possible_map) + 1;
+    unsigned int        cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
     int                *sumw = xmalloc_array(int, nr_cpus);
     s_time_t           *sumt = xmalloc_array(s_time_t, nr_cpus);
 
@@ -1354,9 +1354,12 @@ static int sedf_adjust_weights(struct cp
             continue;
         for_each_vcpu( d, p )
         {
+            if ( (cpu = p->processor) >= nr_cpus )
+                continue;
+
             if ( EDOM_INFO(p)->weight )
             {
-                sumw[p->processor] += EDOM_INFO(p)->weight;
+                sumw[cpu] += EDOM_INFO(p)->weight;
             }
             else
             {
@@ -1367,7 +1370,7 @@ static int sedf_adjust_weights(struct cp
                 /*check for overflows*/
                 ASSERT((WEIGHT_PERIOD < ULONG_MAX) 
                        && (EDOM_INFO(p)->slice_orig < ULONG_MAX));
-                sumt[p->processor] += 
+                sumt[cpu] += 
                     (WEIGHT_PERIOD * EDOM_INFO(p)->slice_orig) / 
                     EDOM_INFO(p)->period_orig;
             }
@@ -1381,6 +1384,8 @@ static int sedf_adjust_weights(struct cp
     {
         for_each_vcpu ( d, p )
         {
+            if ( (cpu = p->processor) >= nr_cpus )
+                continue;
             if ( EDOM_INFO(p)->weight )
             {
                 EDOM_INFO(p)->period_orig = 
@@ -1388,8 +1393,7 @@ static int sedf_adjust_weights(struct cp
                 EDOM_INFO(p)->slice_orig  =
                     EDOM_INFO(p)->slice   = 
                     (EDOM_INFO(p)->weight *
-                     (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[p->processor])) / 
-                    sumw[p->processor];
+                     (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
             }
         }
     }
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/schedule.c
--- a/xen/common/schedule.c     Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/schedule.c     Fri May 14 20:37:02 2010 +0100
@@ -31,6 +31,7 @@
 #include <xen/errno.h>
 #include <xen/guest_access.h>
 #include <xen/multicall.h>
+#include <xen/cpu.h>
 #include <public/sched.h>
 #include <xsm/xsm.h>
 
@@ -1089,9 +1090,39 @@ const struct scheduler *scheduler_get_by
     return NULL;
 }
 
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    switch ( action )
+    {
+    case CPU_UP_PREPARE:
+        per_cpu(scheduler, cpu) = &ops;
+        spin_lock_init(&per_cpu(schedule_data, cpu)._lock);
+        per_cpu(schedule_data, cpu).schedule_lock
+            = &per_cpu(schedule_data, cpu)._lock;
+        init_timer(&per_cpu(schedule_data, cpu).s_timer,
+                   s_timer_fn, NULL, cpu);
+        break;
+    case CPU_DEAD:
+        kill_timer(&per_cpu(schedule_data, cpu).s_timer);
+        break;
+    default:
+        break;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
 /* Initialise the data structures. */
 void __init scheduler_init(void)
 {
+    void *hcpu = (void *)(long)smp_processor_id();
     int i;
 
     open_softirq(SCHEDULE_SOFTIRQ, schedule);
@@ -1109,14 +1140,8 @@ void __init scheduler_init(void)
         ops = *schedulers[0];
     }
 
-    for_each_possible_cpu ( i )
-    {
-        per_cpu(scheduler, i) = &ops;
-        spin_lock_init(&per_cpu(schedule_data, i)._lock);
-        per_cpu(schedule_data, i).schedule_lock
-            = &per_cpu(schedule_data, i)._lock;
-        init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
-    }
+    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+    register_cpu_notifier(&cpu_nfb);
 
     printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
     if ( SCHED_OP(&ops, init, 1) )
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/stop_machine.c
--- a/xen/common/stop_machine.c Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/stop_machine.c Fri May 14 20:37:02 2010 +0100
@@ -155,12 +155,31 @@ static void stopmachine_action(unsigned 
     local_irq_enable();
 }
 
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    if ( action == CPU_UP_PREPARE )
+        tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
+                     stopmachine_action, cpu);
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
 static int __init cpu_stopmachine_init(void)
 {
     unsigned int cpu;
-    for_each_possible_cpu ( cpu )
-        tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
-                     stopmachine_action, cpu);
+    for_each_online_cpu ( cpu )
+    {
+        void *hcpu = (void *)(long)cpu;
+        cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+    }
+    register_cpu_notifier(&cpu_nfb);
     return 0;
 }
 __initcall(cpu_stopmachine_init);
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/tasklet.c
--- a/xen/common/tasklet.c      Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/tasklet.c      Fri May 14 20:37:02 2010 +0100
@@ -19,7 +19,7 @@
 #include <xen/cpu.h>
 
 /* Some subsystems call into us before we are initialised. We ignore them. */
-static cpumask_t tasklets_initialised;
+static bool_t tasklets_initialised;
 
 DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
 
@@ -44,7 +44,7 @@ void tasklet_schedule_on_cpu(struct task
 
     spin_lock_irqsave(&tasklet_lock, flags);
 
-    if ( cpu_isset(cpu, tasklets_initialised) && !t->is_dead )
+    if ( tasklets_initialised && !t->is_dead )
     {
         t->scheduled_on = cpu;
         if ( !t->is_running )
@@ -174,9 +174,9 @@ static int cpu_callback(
     switch ( action )
     {
     case CPU_UP_PREPARE:
-        if ( !cpu_test_and_set(cpu, tasklets_initialised) )
-            INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
-        break;
+        INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+        break;
+    case CPU_UP_CANCELED:
     case CPU_DEAD:
         migrate_tasklets_from_cpu(cpu);
         break;
@@ -196,6 +196,7 @@ void __init tasklet_subsys_init(void)
     void *hcpu = (void *)(long)smp_processor_id();
     cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
     register_cpu_notifier(&cpu_nfb);
+    tasklets_initialised = 1;
 }
 
 /*
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/timer.c
--- a/xen/common/timer.c        Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/timer.c        Fri May 14 20:37:02 2010 +0100
@@ -18,6 +18,7 @@
 #include <xen/timer.h>
 #include <xen/keyhandler.h>
 #include <xen/percpu.h>
+#include <xen/cpu.h>
 #include <asm/system.h>
 #include <asm/desc.h>
 
@@ -514,10 +515,29 @@ static struct keyhandler dump_timerq_key
     .desc = "dump timer queues"
 };
 
+static struct timer *dummy_heap;
+
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    if ( action == CPU_UP_PREPARE )
+    {
+        spin_lock_init(&per_cpu(timers, cpu).lock);
+        per_cpu(timers, cpu).heap = &dummy_heap;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
 void __init timer_init(void)
 {
-    static struct timer *dummy_heap;
-    int i;
+    void *cpu = (void *)(long)smp_processor_id();
 
     open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
 
@@ -528,11 +548,8 @@ void __init timer_init(void)
     SET_HEAP_SIZE(&dummy_heap, 0);
     SET_HEAP_LIMIT(&dummy_heap, 0);
 
-    for_each_possible_cpu ( i )
-    {
-        spin_lock_init(&per_cpu(timers, i).lock);
-        per_cpu(timers, i).heap = &dummy_heap;
-    }
+    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
+    register_cpu_notifier(&cpu_nfb);
 
     register_keyhandler('a', &dump_timerq_keyhandler);
 }
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/tmem.c
--- a/xen/common/tmem.c Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/tmem.c Fri May 14 20:37:02 2010 +0100
@@ -2877,11 +2877,11 @@ EXPORT void *tmem_relinquish_pages(unsig
 }
 
 /* called at hypervisor startup */
-EXPORT void init_tmem(void)
+static int __init init_tmem(void)
 {
     int i;
     if ( !tmh_enabled() )
-        return;
+        return 0;
 
     radix_tree_init();
     if ( tmh_dedup_enabled() )
@@ -2905,7 +2905,10 @@ EXPORT void init_tmem(void)
     }
     else
         printk("tmem: initialization FAILED\n");
-}
+
+    return 0;
+}
+__initcall(init_tmem);
 
 /*
  * Local variables:
diff -r 44f25f7250be -r 6348b2ab5c39 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Fri May 14 18:39:02 2010 +0100
+++ b/xen/common/tmem_xen.c     Fri May 14 20:37:02 2010 +0100
@@ -11,6 +11,7 @@
 #include <xen/lzo.h> /* compression code */
 #include <xen/paging.h>
 #include <xen/domain_page.h>
+#include <xen/cpu.h>
 
 #define EXPORT /* indicates code other modules are dependent upon */
 
@@ -277,7 +278,7 @@ static void tmh_mempool_page_put(void *p
     tmh_free_page(virt_to_page(page_va));
 }
 
-static int tmh_mempool_init(void)
+static int __init tmh_mempool_init(void)
 {
     tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
         tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
@@ -347,32 +348,83 @@ EXPORT void tmh_client_destroy(tmh_clien
 
 /******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
 
-EXPORT int tmh_init(void)
-{
 #ifndef __i386__
-    int dstmem_order, workmem_order;
-    bool_t bad_alloc = 0;
-    struct page_info *pi;
-    unsigned char *p1, *p2;
-    int cpu;
+
+static int dstmem_order, workmem_order;
+
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    switch ( action )
+    {
+    case CPU_UP_PREPARE: {
+        if ( per_cpu(dstmem, cpu) == NULL )
+        {
+            struct page_info *p = alloc_domheap_pages(0, dstmem_order, 0);
+            per_cpu(dstmem, cpu) = p ? page_to_virt(p) : NULL;
+        }
+        if ( per_cpu(workmem, cpu) == NULL )
+        {
+            struct page_info *p = alloc_domheap_pages(0, workmem_order, 0);
+            per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL;
+        }
+        break;
+    }
+    case CPU_DEAD:
+    case CPU_UP_CANCELED: {
+        if ( per_cpu(dstmem, cpu) != NULL )
+        {
+            struct page_info *p = virt_to_page(per_cpu(dstmem, cpu));
+            free_domheap_pages(p, dstmem_order);
+            per_cpu(dstmem, cpu) = NULL;
+        }
+        if ( per_cpu(workmem, cpu) != NULL )
+        {
+            struct page_info *p = virt_to_page(per_cpu(workmem, cpu));
+            free_domheap_pages(p, workmem_order);
+            per_cpu(workmem, cpu) = NULL;
+        }
+        break;
+    }
+    default:
+        break;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
+EXPORT int __init tmh_init(void)
+{
+    unsigned int cpu;
 
     if ( !tmh_mempool_init() )
         return 0;
 
     dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
     workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
-    for_each_possible_cpu ( cpu )
-    {
-        pi = alloc_domheap_pages(0,dstmem_order,0);
-        per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
-        pi = alloc_domheap_pages(0,workmem_order,0);
-        per_cpu(workmem, cpu) = p2 = ((pi == NULL) ? NULL : page_to_virt(pi));
-        if ( (p1 == NULL) || (p2 == NULL) )
-            bad_alloc++;
-    }
-    if ( bad_alloc )
-        printk("tmem: can't allocate compression buffers for %d cpus\n",
-               bad_alloc);
-#endif
-    return 1;
-}
+
+    for_each_online_cpu ( cpu )
+    {
+        void *hcpu = (void *)(long)cpu;
+        cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+    }
+
+    register_cpu_notifier(&cpu_nfb);
+
+    return 1;
+}
+
+#else
+
+EXPORT int __init tmh_init(void)
+{
+    return 1;
+}
+
+#endif
diff -r 44f25f7250be -r 6348b2ab5c39 xen/include/asm-ia64/perfc.h
--- a/xen/include/asm-ia64/perfc.h      Fri May 14 18:39:02 2010 +0100
+++ b/xen/include/asm-ia64/perfc.h      Fri May 14 20:37:02 2010 +0100
@@ -3,10 +3,6 @@
 
 #include <asm/vhpt.h>
 #include <asm/privop_stat.h>
-
-static inline void arch_perfc_printall(void)
-{
-}
 
 static inline void arch_perfc_reset(void)
 {
diff -r 44f25f7250be -r 6348b2ab5c39 xen/include/asm-x86/perfc.h
--- a/xen/include/asm-x86/perfc.h       Fri May 14 18:39:02 2010 +0100
+++ b/xen/include/asm-x86/perfc.h       Fri May 14 20:37:02 2010 +0100
@@ -1,9 +1,5 @@
 #ifndef __ASM_PERFC_H__
 #define __ASM_PERFC_H__
-
-static inline void arch_perfc_printall(void)
-{
-}
 
 static inline void arch_perfc_reset(void)
 {
diff -r 44f25f7250be -r 6348b2ab5c39 xen/include/asm-x86/setup.h
--- a/xen/include/asm-x86/setup.h       Fri May 14 18:39:02 2010 +0100
+++ b/xen/include/asm-x86/setup.h       Fri May 14 20:37:02 2010 +0100
@@ -22,7 +22,6 @@ void subarch_init_memory(void);
 void subarch_init_memory(void);
 
 void init_IRQ(void);
-void init_tmem(void);
 void vesa_init(void);
 void vesa_mtrr_init(void);
 
diff -r 44f25f7250be -r 6348b2ab5c39 xen/include/asm-x86/smp.h
--- a/xen/include/asm-x86/smp.h Fri May 14 18:39:02 2010 +0100
+++ b/xen/include/asm-x86/smp.h Fri May 14 20:37:02 2010 +0100
@@ -69,7 +69,6 @@ int cpu_add(uint32_t apic_id, uint32_t a
 
 extern cpumask_t cpu_callout_map;
 extern cpumask_t cpu_callin_map;
-/* cpu_possible_map declared in <xen/cpumask.h> */
 
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
diff -r 44f25f7250be -r 6348b2ab5c39 xen/include/xen/tmem.h
--- a/xen/include/xen/tmem.h    Fri May 14 18:39:02 2010 +0100
+++ b/xen/include/xen/tmem.h    Fri May 14 20:37:02 2010 +0100
@@ -9,7 +9,6 @@
 #ifndef __XEN_TMEM_H__
 #define __XEN_TMEM_H__
 
-extern void init_tmem(void);
 extern void tmem_destroy(void *);
 extern void *tmem_relinquish_pages(unsigned int, unsigned int);
 extern int  opt_tmem;
diff -r 44f25f7250be -r 6348b2ab5c39 xen/xsm/flask/flask_op.c
--- a/xen/xsm/flask/flask_op.c  Fri May 14 18:39:02 2010 +0100
+++ b/xen/xsm/flask/flask_op.c  Fri May 14 20:37:02 2010 +0100
@@ -680,7 +680,6 @@ static int flask_security_avc_cachestats
     char *page = NULL;
     int len = 0;
     int length = 0;
-    long long idx = 0;
     int cpu;
     struct avc_cache_stats *st;
 
@@ -701,11 +700,8 @@ static int flask_security_avc_cachestats
     length += len;
     count -= len;
 
-    for ( cpu = idx; cpu < NR_CPUS; ++cpu )
-    {
-        if ( !cpu_possible(cpu) )
-            continue;
-        idx = cpu + 1;
+    for_each_online_cpu ( cpu )
+    {
         st = &per_cpu(avc_cache_stats, cpu);
 
         len = snprintf(page, PAGE_SIZE, "%u %u %u %u %u %u\n", st->lookups,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.