[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] rename for_each_cpu() to for_each_possible_cpu()



... to be more precise in naming, and also to match Linux.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2009-07-10.orig/xen/arch/ia64/linux-xen/perfmon.c   2009-05-27 
13:54:05.000000000 +0200
+++ 2009-07-10/xen/arch/ia64/linux-xen/perfmon.c        2009-07-15 
10:02:08.000000000 +0200
@@ -7313,7 +7313,7 @@ xenpfm_context_create(XEN_GUEST_HANDLE(p
                goto out;
 
        /* XXX fmt */
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ctx[cpu] = pfm_context_create(&kreq);
                if (ctx[cpu] == NULL) {
                        error = -ENOMEM;
@@ -7325,20 +7325,20 @@ xenpfm_context_create(XEN_GUEST_HANDLE(p
 
        BUG_ON(in_irq());
        spin_lock(&xenpfm_context_lock);
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (per_cpu(xenpfm_context, cpu) != NULL) {
                        error = -EBUSY;
                        break;
                }
        }
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                per_cpu(xenpfm_context, cpu) = ctx[cpu];
                ctx[cpu] = NULL;
        }
        spin_unlock(&xenpfm_context_lock);
 
 out:
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (ctx[cpu] != NULL)
                        pfm_context_free(ctx[cpu]);
        }
@@ -7358,7 +7358,7 @@ again:
        need_unload = 0;
        BUG_ON(in_irq());
        spin_lock_irqsave(&xenpfm_context_lock, flags);
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ctx = per_cpu(xenpfm_context, cpu);
                if (ctx == NULL) {
                        error = -EINVAL;
@@ -7369,7 +7369,7 @@ again:
                        need_unload = 1;
        }
        if (error) {
-               for_each_cpu(cpu) {
+               for_each_possible_cpu(cpu) {
                        ctx = per_cpu(xenpfm_context, cpu);
                        if (ctx == NULL)
                                break;
@@ -7378,7 +7378,7 @@ again:
                goto out;
        }
        if (need_unload) {
-               for_each_cpu(cpu)
+               for_each_possible_cpu(cpu)
                        UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
                spin_unlock_irqrestore(&xenpfm_context_lock, flags);
 
@@ -7388,7 +7388,7 @@ again:
                goto again;
        }
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
                per_cpu(xenpfm_context, cpu) = NULL;
 
@@ -7740,7 +7740,7 @@ xenpfm_start_stop_locked(int is_start)
        arg.is_start = is_start;
        atomic_set(&arg.started, 1); /* 1 for this cpu */
        atomic_set(&arg.finished, 0);
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                arg.error[cpu] = 0;
 
        BUG_ON(!spin_is_locked(&xenpfm_context_lock));
--- 2009-07-10.orig/xen/arch/ia64/linux-xen/smpboot.c   2009-07-10 
13:57:41.000000000 +0200
+++ 2009-07-10/xen/arch/ia64/linux-xen/smpboot.c        2009-07-15 
10:02:26.000000000 +0200
@@ -675,7 +675,7 @@ remove_from_mtinfo(int cpu)
 {
        int i;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                if (mt_info[i].valid &&  mt_info[i].socket_id ==
                                                cpu_data(cpu)->socket_id)
                        mt_info[i].valid = 0;
@@ -874,7 +874,7 @@ check_for_mtinfo_index(void)
 {
        int i;
        
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                if (!mt_info[i].valid)
                        return i;
 
@@ -892,7 +892,7 @@ check_for_new_socket(__u16 logical_addre
        int i;
        __u32 sid = c->socket_id;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (mt_info[i].valid && mt_info[i].proc_fixed_addr == 
logical_address
                    && mt_info[i].socket_id == sid) {
                        c->core_id = mt_info[i].core_id;
--- 2009-07-10.orig/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c 2009-05-27 
13:54:05.000000000 +0200
+++ 2009-07-10/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c      2009-07-15 
10:01:39.000000000 +0200
@@ -211,7 +211,7 @@ sn2_global_tlb_purge(unsigned long start
        spin_lock(&sn2_ptcg_lock2);
        node_set(cpu_to_node(smp_processor_id()), nodes_flushed);
        i = 0;
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                cnode = cpu_to_node(cpu);
                if (!node_isset(cnode, nodes_flushed)) {
                        cpu_set(cpu, selected_cpus);
@@ -269,7 +269,7 @@ sn2_global_tlb_purge(struct mm_struct *m
                i++;
        }
 #else
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                cnode = cpu_to_node(cpu);
                node_set(cnode, nodes_flushed);
                lcpu = cpu;
--- 2009-07-10.orig/xen/arch/ia64/xen/privop_stat.c     2009-07-03 
10:20:56.000000000 +0200
+++ 2009-07-10/xen/arch/ia64/xen/privop_stat.c  2009-07-15 10:00:01.000000000 
+0200
@@ -65,7 +65,7 @@ void gather_privop_addrs(void)
 {
        unsigned int cpu;
 
-       for_each_cpu ( cpu ) {
+       for_each_possible_cpu ( cpu ) {
                perfc_t *perfcounters = per_cpu(perfcounters, cpu);
                struct privop_addr_count *s = per_cpu(privop_addr_counter, cpu);
                int i, j;
@@ -92,7 +92,7 @@ void reset_privop_addrs(void)
 {
        unsigned int cpu;
 
-       for_each_cpu ( cpu ) {
+       for_each_possible_cpu ( cpu ) {
                struct privop_addr_count *v = per_cpu(privop_addr_counter, cpu);
                int i, j;
 
--- 2009-07-10.orig/xen/arch/ia64/xen/xensetup.c        2009-07-10 
08:51:30.000000000 +0200
+++ 2009-07-10/xen/arch/ia64/xen/xensetup.c     2009-07-15 10:01:13.000000000 
+0200
@@ -606,8 +606,7 @@ skip_move:
     smp_prepare_cpus(max_cpus);
 
     /* We aren't hotplug-capable yet. */
-    for_each_cpu ( i )
-        cpu_set(i, cpu_present_map);
+    cpus_or(cpu_present_map, cpu_present_map, cpu_possible_map);
 
     /*  Enable IRQ to receive IPI (needed for ITC sync).  */
     local_irq_enable();
--- 2009-07-10.orig/xen/arch/x86/hpet.c 2009-05-27 13:54:05.000000000 +0200
+++ 2009-07-10/xen/arch/x86/hpet.c      2009-07-15 09:58:49.000000000 +0200
@@ -590,7 +590,7 @@ void hpet_broadcast_init(void)
     legacy_hpet_event.flags = 0;
     spin_lock_init(&legacy_hpet_event.lock);
 
-    for_each_cpu(i)
+    for_each_possible_cpu(i)
         per_cpu(cpu_bc_channel, i) = &legacy_hpet_event;
 
     if ( !force_hpet_broadcast )
--- 2009-07-10.orig/xen/arch/x86/nmi.c  2009-06-19 11:11:23.000000000 +0200
+++ 2009-07-10/xen/arch/x86/nmi.c       2009-07-15 09:58:16.000000000 +0200
@@ -460,7 +460,7 @@ static void do_nmi_stats(unsigned char k
     struct vcpu *v;
 
     printk("CPU\tNMI\n");
-    for_each_cpu ( i )
+    for_each_possible_cpu ( i )
         printk("%3d\t%3d\n", i, nmi_count(i));
 
     if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
--- 2009-07-10.orig/xen/arch/x86/smpboot.c      2009-07-15 09:36:05.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/smpboot.c   2009-07-15 09:58:36.000000000 +0200
@@ -1163,7 +1163,7 @@ static void __init smp_boot_cpus(unsigne
         * construct cpu_sibling_map, so that we can tell sibling CPUs
         * efficiently.
         */
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                cpus_clear(per_cpu(cpu_sibling_map, cpu));
                cpus_clear(per_cpu(cpu_core_map, cpu));
        }
--- 2009-07-10.orig/xen/arch/x86/time.c 2009-05-27 13:54:05.000000000 +0200
+++ 2009-07-10/xen/arch/x86/time.c      2009-07-15 09:59:20.000000000 +0200
@@ -1227,7 +1227,7 @@ int __init init_xen_time(void)
     if ( opt_consistent_tscs )
     {
         int cpu;
-        for_each_cpu ( cpu )
+        for_each_possible_cpu ( cpu )
             per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
     }
 
--- 2009-07-10.orig/xen/common/perfc.c  2007-06-21 09:23:10.000000000 +0200
+++ 2009-07-10/xen/common/perfc.c       2009-07-15 10:03:02.000000000 +0200
@@ -136,13 +136,13 @@ void perfc_reset(unsigned char key)
         switch ( perfc_info[i].type )
         {
         case TYPE_SINGLE:
-            for_each_cpu ( cpu )
+            for_each_possible_cpu ( cpu )
                 per_cpu(perfcounters, cpu)[j] = 0;
         case TYPE_S_SINGLE:
             ++j;
             break;
         case TYPE_ARRAY:
-            for_each_cpu ( cpu )
+            for_each_possible_cpu ( cpu )
                 memset(per_cpu(perfcounters, cpu) + j, 0,
                        perfc_info[i].nr_elements * sizeof(perfc_t));
         case TYPE_S_ARRAY:
@@ -205,14 +205,14 @@ static int perfc_copy_info(XEN_GUEST_HAN
         {
         case TYPE_SINGLE:
         case TYPE_S_SINGLE:
-            for_each_cpu ( cpu )
+            for_each_possible_cpu ( cpu )
                 perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
             ++j;
             break;
         case TYPE_ARRAY:
         case TYPE_S_ARRAY:
             memset(perfc_vals + v, 0, perfc_d[i].nr_vals * 
sizeof(*perfc_vals));
-            for_each_cpu ( cpu )
+            for_each_possible_cpu ( cpu )
             {
                 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
                 unsigned int k;
--- 2009-07-10.orig/xen/common/schedule.c       2009-04-09 14:05:36.000000000 
+0200
+++ 2009-07-10/xen/common/schedule.c    2009-07-15 10:03:11.000000000 +0200
@@ -918,7 +918,7 @@ void __init scheduler_init(void)
 
     open_softirq(SCHEDULE_SOFTIRQ, schedule);
 
-    for_each_cpu ( i )
+    for_each_possible_cpu ( i )
     {
         spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
         init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
--- 2009-07-10.orig/xen/common/timer.c  2009-02-18 17:45:17.000000000 +0100
+++ 2009-07-10/xen/common/timer.c       2009-07-15 10:02:46.000000000 +0200
@@ -529,7 +529,7 @@ void __init timer_init(void)
     SET_HEAP_SIZE(&dummy_heap, 0);
     SET_HEAP_LIMIT(&dummy_heap, 0);
 
-    for_each_cpu ( i )
+    for_each_possible_cpu ( i )
     {
         spin_lock_init(&per_cpu(timers, i).lock);
         per_cpu(timers, i).heap = &dummy_heap;
--- 2009-07-10.orig/xen/common/tmem_xen.c       2009-07-10 13:57:36.000000000 
+0200
+++ 2009-07-10/xen/common/tmem_xen.c    2009-07-15 10:02:38.000000000 +0200
@@ -317,7 +317,7 @@ EXPORT int tmh_init(void)
 
     dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
     workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
-    for_each_cpu ( cpu )
+    for_each_possible_cpu ( cpu )
     {
         pi = alloc_domheap_pages(0,dstmem_order,0);
         per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
--- 2009-07-10.orig/xen/include/xen/cpumask.h   2009-05-27 13:54:07.000000000 
+0200
+++ 2009-07-10/xen/include/xen/cpumask.h        2009-07-15 10:04:01.000000000 
+0200
@@ -61,7 +61,7 @@
  *
  * int any_online_cpu(mask)            First online cpu in mask, or NR_CPUS
  *
- * for_each_cpu(cpu)                   for-loop cpu over cpu_possible_map
+ * for_each_possible_cpu(cpu)          for-loop cpu over cpu_possible_map
  * for_each_online_cpu(cpu)            for-loop cpu over cpu_online_map
  * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
  *
@@ -402,9 +402,9 @@ extern cpumask_t cpu_present_map;
        cpu;                                    \
 })
 
-#define for_each_cpu(cpu)        for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu_mask(cpu, cpu_possible_map)
+#define for_each_online_cpu(cpu)   for_each_cpu_mask(cpu, cpu_online_map)
+#define for_each_present_cpu(cpu)  for_each_cpu_mask(cpu, cpu_present_map)
 
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.