[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] eliminate first_cpu() etc



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1320744970 -3600
# Node ID d89f1b65b5121b7ae1d915d8ffdcda7605894c66
# Parent  52d242adc94edea36049fa93f668222f93d8518f
eliminate first_cpu() etc

This includes the conversion from for_each_cpu_mask() to for_each-cpu().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---


diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/linux-xen/iosapic.c Tue Nov 08 10:36:10 2011 +0100
@@ -704,7 +704,7 @@
 
                cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
 
-               for_each_cpu_mask(numa_cpu, cpu_mask) {
+               for_each_cpu(numa_cpu, &cpu_mask) {
                        if (!cpu_online(numa_cpu))
                                cpumask_clear_cpu(numa_cpu, &cpu_mask);
                }
@@ -717,8 +717,8 @@
                /* Use vector assigment to distribute across cpus in node */
                cpu_index = vector % num_cpus;
 
-               for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
-                       numa_cpu = next_cpu(numa_cpu, cpu_mask);
+               for (numa_cpu = cpumask_first(&cpu_mask) ; i < cpu_index ; i++)
+                       numa_cpu = cpumask_next(numa_cpu, &cpu_mask);
 
                if (numa_cpu != NR_CPUS)
                        return cpu_physical_id(numa_cpu);
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/linux-xen/mca.c     Tue Nov 08 10:36:10 2011 +0100
@@ -1415,7 +1415,7 @@
 #endif
 {
        /* Trigger a CMC interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 /*
@@ -1505,7 +1505,7 @@
 #endif
 {
        /* Trigger a CPE interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 #endif /* CONFIG_ACPI */
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c     Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/linux-xen/smp.c     Tue Nov 08 10:36:10 2011 +0100
@@ -462,7 +462,7 @@
        call_data = &data;
        wmb();
 
-       for_each_cpu_mask(cpu, *selected)
+       for_each_cpu(cpu, selected)
                send_IPI_single(cpu, IPI_CALL_FUNC);
 
        while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 08 10:36:10 2011 +0100
@@ -687,9 +687,9 @@
 {
        int i;
 
-       for_each_cpu_mask(i, *per_cpu(cpu_sibling_mask, cpu))
+       for_each_cpu(i, per_cpu(cpu_sibling_mask, cpu))
                cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
-       for_each_cpu_mask(i, *per_cpu(cpu_core_mask, cpu))
+       for_each_cpu(i, per_cpu(cpu_core_mask, cpu))
                cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
 
        cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/vmx/vacpi.c
--- a/xen/arch/ia64/vmx/vacpi.c Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/vmx/vacpi.c Tue Nov 08 10:36:10 2011 +0100
@@ -191,7 +191,7 @@
        s->last_gtime = NOW();
 
        /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
-       init_timer(&s->timer, pmt_timer_callback, d, first_cpu(cpu_online_map));
+       init_timer(&s->timer, pmt_timer_callback, d, 
cpumask_first(&cpu_online_map));
        pmt_timer_callback(d);
 }
 
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c      Tue Nov 08 10:36:10 2011 +0100
@@ -618,7 +618,7 @@
         XEN_GUEST_HANDLE_64(uint32) arr;
         uint32_t i, val, max_array_ent = ti->max_cpu_index;
 
-        ti->max_cpu_index = last_cpu(cpu_online_map);
+        ti->max_cpu_index = cpumask_last(&cpu_online_map);
         max_array_ent = min(max_array_ent, ti->max_cpu_index);
 
         arr = ti->cpu_to_core;
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/xen/domain.c        Tue Nov 08 10:36:10 2011 +0100
@@ -501,7 +501,7 @@
 
        if (!VMX_DOMAIN(v))
                init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,
-                          first_cpu(cpu_online_map));
+                          cpumask_any(&cpu_online_map));
 
        return 0;
 }
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/ia64/xen/vhpt.c  Tue Nov 08 10:36:10 2011 +0100
@@ -463,7 +463,7 @@
                                local_purge = 0;
                }
        } else {
-               for_each_cpu_mask(cpu, entry->pcpu_dirty_mask) {
+               for_each_cpu(cpu, &entry->pcpu_dirty_mask) {
                        /* Invalidate VHPT entries.  */
                        cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);
 
@@ -559,7 +559,7 @@
     if (cpumask_subset(mask, cpumask_of(cpu)))
         return;
 
-    for_each_cpu_mask (cpu, *mask)
+    for_each_cpu (cpu, mask)
         if (cpu != smp_processor_id())
             smp_call_function_single
                 (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c      Tue Nov 08 10:36:10 2011 +0100
@@ -251,7 +251,7 @@
     cpumask_and(&target, mask, &cpuidle_mwait_flags);
 
     /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
-    for_each_cpu_mask(cpu, target)
+    for_each_cpu(cpu, &target)
         mwait_wakeup(cpu) = 0;
 
     cpumask_andnot(mask, mask, &target);
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/acpi/cpufreq/cpufreq.c
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c       Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c       Tue Nov 08 10:36:10 2011 +0100
@@ -487,7 +487,7 @@
         return -EAGAIN;
     }
 
-    for_each_cpu_mask(j, online_policy_cpus)
+    for_each_cpu(j, &online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
 
     perf->state = next_perf_state;
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/acpi/cpufreq/powernow.c
--- a/xen/arch/x86/acpi/cpufreq/powernow.c      Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c      Tue Nov 08 10:36:10 2011 +0100
@@ -130,7 +130,7 @@
 
     on_selected_cpus(cmd.mask, transition_pstate, &cmd, 1);
 
-    for_each_cpu_mask(j, online_policy_cpus)
+    for_each_cpu(j, &online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
 
     perf->state = next_perf_state;
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/genapic/x2apic.c
--- a/xen/arch/x86/genapic/x2apic.c     Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/genapic/x2apic.c     Tue Nov 08 10:36:10 2011 +0100
@@ -72,7 +72,7 @@
 
     local_irq_save(flags);
 
-    for_each_cpu_mask ( cpu, *cpumask )
+    for_each_cpu ( cpu, cpumask )
     {
         if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
             continue;
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/hpet.c       Tue Nov 08 10:36:10 2011 +0100
@@ -182,7 +182,7 @@
     now = NOW();
 
     /* find all expired events */
-    for_each_cpu_mask(cpu, *ch->cpumask)
+    for_each_cpu(cpu, ch->cpumask)
     {
         s_time_t deadline;
 
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/irq.c        Tue Nov 08 10:36:10 2011 +0100
@@ -125,7 +125,7 @@
     if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED )
         return -EBUSY;
     trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask);
-    for_each_cpu_mask(cpu, online_mask)
+    for_each_cpu(cpu, &online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     desc->arch.vector = vector;
     cpumask_copy(desc->arch.cpu_mask, &online_mask);
@@ -223,7 +223,7 @@
     vector = desc->arch.vector;
     cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map);
 
-    for_each_cpu_mask(cpu, tmp_mask) {
+    for_each_cpu(cpu, &tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
         per_cpu(vector_irq, cpu)[vector] = -1;
     }
@@ -248,7 +248,7 @@
     old_vector = desc->arch.old_vector;
     cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
 
-    for_each_cpu_mask(cpu, tmp_mask) {
+    for_each_cpu(cpu, &tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
         TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
         per_cpu(vector_irq, cpu)[old_vector] = -1;
@@ -451,7 +451,7 @@
     else
         irq_used_vectors = irq_get_used_vector_mask(irq);
 
-    for_each_cpu_mask(cpu, *mask) {
+    for_each_cpu(cpu, mask) {
         int new_cpu;
         int vector, offset;
 
@@ -481,7 +481,7 @@
             && test_bit(vector, irq_used_vectors) )
             goto next;
 
-        for_each_cpu_mask(new_cpu, tmp_mask)
+        for_each_cpu(new_cpu, &tmp_mask)
             if (per_cpu(vector_irq, new_cpu)[vector] != -1)
                 goto next;
         /* Found one! */
@@ -493,7 +493,7 @@
             desc->arch.old_vector = desc->arch.vector;
         }
         trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
-        for_each_cpu_mask(new_cpu, tmp_mask)
+        for_each_cpu(new_cpu, &tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         desc->arch.vector = vector;
         cpumask_copy(desc->arch.cpu_mask, &tmp_mask);
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/microcode.c
--- a/xen/arch/x86/microcode.c  Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/microcode.c  Tue Nov 08 10:36:10 2011 +0100
@@ -125,7 +125,7 @@
     if ( error )
         info->error = error;
 
-    info->cpu = next_cpu(info->cpu, cpu_online_map);
+    info->cpu = cpumask_next(info->cpu, &cpu_online_map);
     if ( info->cpu < nr_cpu_ids )
         return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
 
@@ -158,7 +158,7 @@
 
     info->buffer_size = len;
     info->error = 0;
-    info->cpu = first_cpu(cpu_online_map);
+    info->cpu = cpumask_first(&cpu_online_map);
 
     return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
 }
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/platform_hypercall.c
--- a/xen/arch/x86/platform_hypercall.c Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/platform_hypercall.c Tue Nov 08 10:36:10 2011 +0100
@@ -366,7 +366,7 @@
             goto out;
         guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
 
-        for_each_cpu_mask ( cpu, *cpumap )
+        for_each_cpu ( cpu, cpumap )
         {
             if ( idle_vcpu[cpu] == NULL )
                 cpumask_clear_cpu(cpu, cpumap);
@@ -460,7 +460,7 @@
                 g_info->flags |= XEN_PCPU_FLAGS_ONLINE;
         }
 
-        g_info->max_present = last_cpu(cpu_present_map);
+        g_info->max_present = cpumask_last(&cpu_present_map);
 
         put_cpu_maps();
 
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/setup.c      Tue Nov 08 10:36:10 2011 +0100
@@ -229,9 +229,9 @@
          * Find remaining CPU with longest-prefix match on APIC ID.
          * Among identical longest-prefix matches, pick the smallest APIC ID.
          */
-        for ( j = next_cpu(i, cpu_present_map);
+        for ( j = cpumask_next(i, &cpu_present_map);
               j < nr_cpu_ids;
-              j = next_cpu(j, cpu_present_map) )
+              j = cpumask_next(j, &cpu_present_map) )
         {
             diff = x86_cpu_to_apicid[j] ^ apicid;
             while ( diff & (diff-1) )
@@ -248,12 +248,12 @@
         /* If no match then there must be no CPUs remaining to consider. */
         if ( min_cpu >= nr_cpu_ids )
         {
-            BUG_ON(next_cpu(i, cpu_present_map) < nr_cpu_ids);
+            BUG_ON(cpumask_next(i, &cpu_present_map) < nr_cpu_ids);
             break;
         }
 
         /* Switch the best-matching CPU with the next CPU in logical order. */
-        j = next_cpu(i, cpu_present_map);
+        j = cpumask_next(i, &cpu_present_map);
         apicid = x86_cpu_to_apicid[min_cpu];
         x86_cpu_to_apicid[min_cpu] = x86_cpu_to_apicid[j];
         x86_cpu_to_apicid[j] = apicid;
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/smp.c        Tue Nov 08 10:36:10 2011 +0100
@@ -182,7 +182,7 @@
 
     local_irq_save(flags);
 
-    for_each_cpu_mask ( query_cpu, *mask )
+    for_each_cpu ( query_cpu, mask )
     {
         if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) )
             continue;
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/smpboot.c    Tue Nov 08 10:36:10 2011 +0100
@@ -248,7 +248,7 @@
 
     if ( c[cpu].x86_num_siblings > 1 )
     {
-        for_each_cpu_mask ( i, cpu_sibling_setup_map )
+        for_each_cpu ( i, &cpu_sibling_setup_map )
         {
             if ( cpu_has(c, X86_FEATURE_TOPOEXT) ) {
                 if ( (c[cpu].phys_proc_id == c[i].phys_proc_id) &&
@@ -273,7 +273,7 @@
         return;
     }
 
-    for_each_cpu_mask ( i, cpu_sibling_setup_map )
+    for_each_cpu ( i, &cpu_sibling_setup_map )
     {
         if ( c[cpu].phys_proc_id == c[i].phys_proc_id )
         {
@@ -814,7 +814,7 @@
     int sibling;
     struct cpuinfo_x86 *c = cpu_data;
 
-    for_each_cpu_mask ( sibling, *per_cpu(cpu_core_mask, cpu) )
+    for_each_cpu ( sibling, per_cpu(cpu_core_mask, cpu) )
     {
         cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling));
         /* Last thread sibling in this cpu core going down. */
@@ -822,7 +822,7 @@
             c[sibling].booted_cores--;
     }
    
-    for_each_cpu_mask(sibling, *per_cpu(cpu_sibling_mask, cpu))
+    for_each_cpu(sibling, per_cpu(cpu_sibling_mask, cpu))
         cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling));
     cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
     cpumask_clear(per_cpu(cpu_core_mask, cpu));
diff -r 52d242adc94e -r d89f1b65b512 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c     Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/arch/x86/sysctl.c     Tue Nov 08 10:36:10 2011 +0100
@@ -103,7 +103,7 @@
         uint32_t i, max_cpu_index, last_online_cpu;
         xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
 
-        last_online_cpu = last_cpu(cpu_online_map);
+        last_online_cpu = cpumask_last(&cpu_online_map);
         max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
         ti->max_cpu_index = last_online_cpu;
 
diff -r 52d242adc94e -r d89f1b65b512 xen/common/cpu.c
--- a/xen/common/cpu.c  Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/cpu.c  Tue Nov 08 10:36:10 2011 +0100
@@ -205,7 +205,7 @@
 
     printk("Enabling non-boot CPUs  ...\n");
 
-    for_each_cpu_mask ( cpu, frozen_cpus )
+    for_each_cpu ( cpu, &frozen_cpus )
     {
         if ( (error = cpu_up(cpu)) )
         {
diff -r 52d242adc94e -r d89f1b65b512 xen/common/cpupool.c
--- a/xen/common/cpupool.c      Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/cpupool.c      Tue Nov 08 10:36:10 2011 +0100
@@ -494,7 +494,7 @@
                         op->cpupool_id, cpu);
         spin_lock(&cpupool_lock);
         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
-            cpu = first_cpu(cpupool_free_cpus);
+            cpu = cpumask_first(&cpupool_free_cpus);
         ret = -EINVAL;
         if ( cpu >= nr_cpu_ids )
             goto addcpu_out;
diff -r 52d242adc94e -r d89f1b65b512 xen/common/domctl.c
--- a/xen/common/domctl.c       Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/domctl.c       Tue Nov 08 10:36:10 2011 +0100
@@ -190,7 +190,7 @@
     cpu = cpumask_first(&cpu_exclude_map);
     if ( cpumask_weight(&cpu_exclude_map) > 1 )
         cpu = cpumask_next(cpu, &cpu_exclude_map);
-    for_each_cpu_mask(i, *online)
+    for_each_cpu(i, online)
     {
         if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
@@ -541,7 +541,7 @@
 
             cpu = (i == 0) ?
                 default_vcpu0_location(online) :
-                cycle_cpu(d->vcpu[i-1]->processor, *online);
+                cpumask_cycle(d->vcpu[i-1]->processor, online);
 
             if ( alloc_vcpu(d, i, cpu) == NULL )
                 goto maxvcpu_out;
diff -r 52d242adc94e -r d89f1b65b512 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/keyhandler.c   Tue Nov 08 10:36:10 2011 +0100
@@ -128,7 +128,7 @@
         return;
 
     /* Normal handling: synchronously dump the remaining CPUs' states. */
-    for_each_cpu_mask ( cpu, dump_execstate_mask )
+    for_each_cpu ( cpu, &dump_execstate_mask )
     {
         smp_send_state_dump(cpu);
         while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
diff -r 52d242adc94e -r d89f1b65b512 xen/common/perfc.c
--- a/xen/common/perfc.c        Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/perfc.c        Tue Nov 08 10:36:10 2011 +0100
@@ -211,14 +211,14 @@
         {
         case TYPE_SINGLE:
         case TYPE_S_SINGLE:
-            for_each_cpu_mask ( cpu, perfc_cpumap )
+            for_each_cpu ( cpu, &perfc_cpumap )
                 perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
             ++j;
             break;
         case TYPE_ARRAY:
         case TYPE_S_ARRAY:
             memset(perfc_vals + v, 0, perfc_d[i].nr_vals * 
sizeof(*perfc_vals));
-            for_each_cpu_mask ( cpu, perfc_cpumap )
+            for_each_cpu ( cpu, &perfc_cpumap )
             {
                 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
                 unsigned int k;
diff -r 52d242adc94e -r d89f1b65b512 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/sched_credit2.c        Tue Nov 08 10:36:10 2011 +0100
@@ -521,7 +521,7 @@
     cpumask_andnot(&mask, &rqd->active, &rqd->idle);
     cpumask_andnot(&mask, &mask, &rqd->tickled);
 
-    for_each_cpu_mask(i, mask)
+    for_each_cpu(i, &mask)
     {
         struct csched_vcpu * cur;
 
@@ -1051,7 +1051,7 @@
         else
         {
             d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id);
-            new_cpu = first_cpu(svc->migrate_rqd->active);
+            new_cpu = cpumask_first(&svc->migrate_rqd->active);
             goto out_up;
         }
     }
@@ -1061,7 +1061,7 @@
     min_avgload = MAX_LOAD;
 
     /* Find the runqueue with the lowest instantaneous load */
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         struct csched_runqueue_data *rqd;
         s_time_t rqd_avgload;
@@ -1099,7 +1099,7 @@
     else
     {
         BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
-        new_cpu = first_cpu(prv->rqd[min_rqi].active);
+        new_cpu = cpumask_first(&prv->rqd[min_rqi].active);
     }
 
 out_up:
@@ -1179,7 +1179,7 @@
             on_runq=1;
         }
         __runq_deassign(svc);
-        svc->vcpu->processor = first_cpu(trqd->active);
+        svc->vcpu->processor = cpumask_first(&trqd->active);
         __runq_assign(svc, trqd);
         if ( on_runq )
         {
@@ -1219,7 +1219,7 @@
 
     st.load_delta = 0;
 
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         s_time_t delta;
         
@@ -1618,7 +1618,7 @@
         {
             int rq;
             other_rqi = -2;
-            for_each_cpu_mask ( rq, CSCHED_PRIV(ops)->active_queues )
+            for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues )
             {
                 if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] )
                 {
@@ -1803,7 +1803,7 @@
            "\tdefault-weight     = %d\n",
            cpumask_weight(&prv->active_queues),
            CSCHED_DEFAULT_WEIGHT);
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         s_time_t fraction;
         
diff -r 52d242adc94e -r d89f1b65b512 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/sched_sedf.c   Tue Nov 08 10:36:10 2011 +0100
@@ -442,7 +442,7 @@
 
     online = SEDF_CPUONLINE(v->domain->cpupool);
     cpumask_and(&online_affinity, v->cpu_affinity, online);
-    return first_cpu(online_affinity);
+    return cpumask_first(&online_affinity);
 }
 
 /*
@@ -1322,7 +1322,7 @@
 {
     struct vcpu *p;
     struct domain      *d;
-    unsigned int        cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
+    unsigned int        cpu, nr_cpus = cpumask_last(&cpu_online_map) + 1;
     int                *sumw = xzalloc_array(int, nr_cpus);
     s_time_t           *sumt = xzalloc_array(s_time_t, nr_cpus);
 
diff -r 52d242adc94e -r d89f1b65b512 xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/schedule.c     Tue Nov 08 10:36:10 2011 +0100
@@ -1450,7 +1450,7 @@
     printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
     SCHED_OP(sched, dump_settings);
 
-    for_each_cpu_mask (i, *cpus)
+    for_each_cpu (i, cpus)
     {
         pcpu_schedule_lock(i);
         printk("CPU[%02d] ", i);
diff -r 52d242adc94e -r d89f1b65b512 xen/common/softirq.c
--- a/xen/common/softirq.c      Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/softirq.c      Tue Nov 08 10:36:10 2011 +0100
@@ -74,7 +74,7 @@
     cpumask_t send_mask;
 
     cpumask_clear(&send_mask);
-    for_each_cpu_mask(cpu, *mask)
+    for_each_cpu(cpu, mask)
         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
             cpumask_set_cpu(cpu, &send_mask);
 
diff -r 52d242adc94e -r d89f1b65b512 xen/common/stop_machine.c
--- a/xen/common/stop_machine.c Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/stop_machine.c Tue Nov 08 10:36:10 2011 +0100
@@ -101,7 +101,7 @@
 
     smp_wmb();
 
-    for_each_cpu_mask ( i, allbutself )
+    for_each_cpu ( i, &allbutself )
         tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
 
     stopmachine_set_state(STOPMACHINE_PREPARE);
diff -r 52d242adc94e -r d89f1b65b512 xen/common/timer.c
--- a/xen/common/timer.c        Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/common/timer.c        Tue Nov 08 10:36:10 2011 +0100
@@ -548,7 +548,7 @@
 
 static void migrate_timers_from_cpu(unsigned int old_cpu)
 {
-    unsigned int new_cpu = first_cpu(cpu_online_map);
+    unsigned int new_cpu = cpumask_any(&cpu_online_map);
     struct timers *old_ts, *new_ts;
     struct timer *t;
     bool_t notify = 0;
diff -r 52d242adc94e -r d89f1b65b512 xen/drivers/acpi/pmstat.c
--- a/xen/drivers/acpi/pmstat.c Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/drivers/acpi/pmstat.c Tue Nov 08 10:36:10 2011 +0100
@@ -223,7 +223,7 @@
 
     if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) )
         return -ENOMEM;
-    for_each_cpu_mask(cpu, *policy->cpus)
+    for_each_cpu(cpu, policy->cpus)
         affected_cpus[j++] = cpu;
     ret = copy_to_guest(op->u.get_para.affected_cpus,
                        affected_cpus, op->u.get_para.cpu_num);
diff -r 52d242adc94e -r d89f1b65b512 xen/drivers/cpufreq/cpufreq_ondemand.c
--- a/xen/drivers/cpufreq/cpufreq_ondemand.c    Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/drivers/cpufreq/cpufreq_ondemand.c    Tue Nov 08 10:36:10 2011 +0100
@@ -122,7 +122,7 @@
         return;
 
     /* Get Idle Time */
-    for_each_cpu_mask(j, *policy->cpus) {
+    for_each_cpu(j, policy->cpus) {
         uint64_t idle_ns, total_idle_ns;
         uint64_t load, load_freq, freq_avg;
         struct cpu_dbs_info_s *j_dbs_info;
@@ -233,7 +233,7 @@
 
         dbs_enable++;
 
-        for_each_cpu_mask(j, *policy->cpus) {
+        for_each_cpu(j, policy->cpus) {
             struct cpu_dbs_info_s *j_dbs_info;
             j_dbs_info = &per_cpu(cpu_dbs_info, j);
             j_dbs_info->cur_policy = policy;
diff -r 52d242adc94e -r d89f1b65b512 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Tue Nov 08 10:36:10 2011 +0100
@@ -1033,7 +1033,7 @@
     msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
     msg.address_lo |= MSI_PHYSICAL_MODE << 2;
     msg.address_lo |= MSI_REDIRECTION_HINT_MODE << 3;
-    dest = cpu_physical_id(first_cpu(mask));
+    dest = cpu_physical_id(cpumask_first(mask));
     msg.address_lo |= dest << MSI_TARGET_CPU_SHIFT;
 #endif
 
diff -r 52d242adc94e -r d89f1b65b512 xen/include/asm-ia64/linux-xen/asm/acpi.h
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h Tue Nov 08 10:36:10 2011 +0100
@@ -139,7 +139,7 @@
 #ifdef CONFIG_ACPI_NUMA
 extern cpumask_t early_cpu_possible_map;
 #define for_each_possible_early_cpu(cpu)  \
-       for_each_cpu_mask((cpu), early_cpu_possible_map)
+       for_each_cpu(cpu, &early_cpu_possible_map)
 
 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
 {
diff -r 52d242adc94e -r d89f1b65b512 xen/include/asm-x86/flushtlb.h
--- a/xen/include/asm-x86/flushtlb.h    Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/include/asm-x86/flushtlb.h    Tue Nov 08 10:36:10 2011 +0100
@@ -52,7 +52,7 @@
 #define tlbflush_filter(mask, page_timestamp)                           \
 do {                                                                    \
     unsigned int cpu;                                                   \
-    for_each_cpu_mask ( cpu, mask )                                     \
+    for_each_cpu ( cpu, &(mask) )                                       \
         if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
             cpumask_clear_cpu(cpu, &(mask));                            \
 } while ( 0 )
diff -r 52d242adc94e -r d89f1b65b512 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Tue Nov 08 10:35:12 2011 +0100
+++ b/xen/include/xen/cpumask.h Tue Nov 08 10:36:10 2011 +0100
@@ -37,18 +37,19 @@
  * void cpumask_shift_right(dst, src, n) Shift right
  * void cpumask_shift_left(dst, src, n)        Shift left
  *
- * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
- * int last_cpu(mask)                  Number highest set bit, or NR_CPUS
- * int cycle_cpu(cpu, mask)            Next cpu cycling from 'cpu', or NR_CPUS
+ * int cpumask_first(mask)             Number lowest set bit, or NR_CPUS
+ * int cpumask_next(cpu, mask)         Next cpu past 'cpu', or NR_CPUS
+ * int cpumask_last(mask)              Number highest set bit, or NR_CPUS
+ * int cpumask_any(mask)               Any cpu in mask, or NR_CPUS
+ * int cpumask_cycle(cpu, mask)                Next cpu cycling from 'cpu', or 
NR_CPUS
  *
- * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ * const cpumask_t *cpumask_of(cpu)    Return cpumask with bit 'cpu' set
  * unsigned long *cpumask_bits(mask)   Array of unsigned long's in mask
  *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
  *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ * for_each_cpu(cpu, mask)             for-loop cpu over mask
  *
  * int num_online_cpus()               Number of online CPUs
  * int num_possible_cpus()             Number of all possible CPUs
@@ -210,42 +211,43 @@
        bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
-#define cpumask_first(src) __first_cpu(src, nr_cpu_ids)
-#define first_cpu(src) __first_cpu(&(src), nr_cpu_ids)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_first(const cpumask_t *srcp)
 {
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+       return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids));
 }
 
-#define cpumask_next(n, src) __next_cpu(n, src, nr_cpu_ids)
-#define next_cpu(n, src) __next_cpu((n), &(src), nr_cpu_ids)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_next(int n, const cpumask_t *srcp)
 {
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpumask_check(n);
+
+       return min_t(int, nr_cpu_ids,
+                     find_next_bit(srcp->bits, nr_cpu_ids, n + 1));
 }
 
-#define cpumask_last(src) __last_cpu(src, nr_cpu_ids)
-#define last_cpu(src) __last_cpu(&(src), nr_cpu_ids)
-static inline int __last_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_last(const cpumask_t *srcp)
 {
-       int cpu, pcpu = nbits;
-       for (cpu = __first_cpu(srcp, nbits);
-            cpu < nbits;
-            cpu = __next_cpu(cpu, srcp, nbits))
+       int cpu, pcpu = nr_cpu_ids;
+
+       for (cpu = cpumask_first(srcp);
+            cpu < nr_cpu_ids;
+            cpu = cpumask_next(cpu, srcp))
                pcpu = cpu;
        return pcpu;
 }
 
-#define cpumask_cycle(n, src) __cycle_cpu(n, src, nr_cpu_ids)
-#define cycle_cpu(n, src) __cycle_cpu((n), &(src), nr_cpu_ids)
-static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_cycle(int n, const cpumask_t *srcp)
 {
-    int nxt = __next_cpu(n, srcp, nbits);
-    if (nxt == nbits)
-        nxt = __first_cpu(srcp, nbits);
+    int nxt = cpumask_next(n, srcp);
+
+    if (nxt == nr_cpu_ids)
+        nxt = cpumask_first(srcp);
     return nxt;
 }
 
+#define cpumask_any(srcp) cpumask_first(srcp)
+
 /*
  * Special-case data structure for "single bit set only" constant CPU masks.
  *
@@ -262,8 +264,6 @@
        return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
 }
 
-#define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
-
 #if defined(__ia64__) /* XXX needs cleanup */
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
@@ -366,12 +366,13 @@
 #endif
 
 #if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < nr_cpu_ids;             \
-               (cpu) = next_cpu((cpu), (mask)))
+#define for_each_cpu(cpu, mask)                        \
+       for ((cpu) = cpumask_first(mask);       \
+            (cpu) < nr_cpu_ids;                \
+            (cpu) = cpumask_next(cpu, mask))
 #else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_cpu(cpu, mask)                        \
+       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
 #endif /* NR_CPUS */
 
 /*
@@ -450,18 +451,9 @@
 #define cpu_present(cpu)       ((cpu) == 0)
 #endif
 
-#define any_online_cpu(mask)                   \
-({                                             \
-       int cpu;                                \
-       for_each_cpu_mask(cpu, (mask))          \
-               if (cpu_online(cpu))            \
-                       break;                  \
-       cpu;                                    \
-})
-
-#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)   for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu)  for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu(cpu, &cpu_possible_map)
+#define for_each_online_cpu(cpu)   for_each_cpu(cpu, &cpu_online_map)
+#define for_each_present_cpu(cpu)  for_each_cpu(cpu, &cpu_present_map)
 
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.