[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] eliminate cpu_set()



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1320744873 -3600
# Node ID 1f3056be6c8399b6b20157d40a45ecb02a35bfcc
# Parent  f9c4494e77c812e9da1d0576a486dfdca5919904
eliminate cpu_set()

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---


diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/ia64/linux-xen/acpi.c
--- a/xen/arch/ia64/linux-xen/acpi.c    Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/ia64/linux-xen/acpi.c    Tue Nov 08 10:34:33 2011 +0100
@@ -557,7 +557,7 @@
            (pa->apic_id << 8) | (pa->local_sapic_eid);
        /* nid should be overridden as logical node id later */
        node_cpuid[srat_num_cpus].nid = pxm;
-       cpu_set(srat_num_cpus, early_cpu_possible_map);
+       cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
        srat_num_cpus++;
 }
 
@@ -917,7 +917,7 @@
                possible, max((possible - available_cpus), 0));
 
        for (i = 0; i < possible; i++)
-               cpu_set(i, cpu_possible_map);
+               cpumask_set_cpu(i, &cpu_possible_map);
 }
 
 #ifndef XEN
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/ia64/linux-xen/setup.c
--- a/xen/arch/ia64/linux-xen/setup.c   Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/ia64/linux-xen/setup.c   Tue Nov 08 10:34:33 2011 +0100
@@ -463,7 +463,7 @@
 {
 #ifdef CONFIG_SMP
        /* If we register an early console, allow CPU 0 to printk */
-       cpu_set(smp_processor_id(), cpu_online_map);
+       cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
 #endif
 }
 
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 08 10:34:33 2011 +0100
@@ -392,7 +392,7 @@
 #else
        lock_ipi_calllock();
 #endif
-       cpu_set(cpuid, cpu_online_map);
+       cpumask_set_cpu(cpuid, &cpu_online_map);
 #ifdef XEN
        unlock_ipi_calllock(flags);
 #else
@@ -437,7 +437,7 @@
        /*
         * Allow the master to continue.
         */
-       cpu_set(cpuid, cpu_callin_map);
+       cpumask_set_cpu(cpuid, &cpu_callin_map);
        Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
 }
 
@@ -625,8 +625,8 @@
        /*
         * We have the boot CPU online for sure.
         */
-       cpu_set(0, cpu_online_map);
-       cpu_set(0, cpu_callin_map);
+       cpumask_set_cpu(0, &cpu_online_map);
+       cpumask_set_cpu(0, &cpu_callin_map);
 
        local_cpu_data->loops_per_jiffy = loops_per_jiffy;
        ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -652,8 +652,8 @@
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), cpu_callin_map);
+       cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
+       cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 }
 
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Tue Nov 08 10:33:02 
2011 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Tue Nov 08 10:34:33 
2011 +0100
@@ -214,7 +214,7 @@
        for_each_possible_cpu(cpu) {
                cnode = cpu_to_node(cpu);
                if (!node_isset(cnode, nodes_flushed)) {
-                       cpu_set(cpu, selected_cpus);
+                       cpumask_set_cpu(cpu, &selected_cpus);
                        i++;
                }
                node_set(cnode, nodes_flushed);
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/ia64/xen/mm_init.c
--- a/xen/arch/ia64/xen/mm_init.c       Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/ia64/xen/mm_init.c       Tue Nov 08 10:34:33 2011 +0100
@@ -38,7 +38,7 @@
        ia64_set_psr(psr);
        ia64_srlz_i();
 #ifdef XEN
-       cpu_set(cpu, percpu_set);
+       cpumask_set_cpu(cpu, &percpu_set);
 #endif
 
        /*
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c     Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/ia64/xen/tlb_track.c     Tue Nov 08 10:34:33 2011 +0100
@@ -389,7 +389,7 @@
 
  found:
     BUG_ON(v->processor >= NR_CPUS);
-    cpu_set(v->processor, entry->pcpu_dirty_mask);
+    cpumask_set_cpu(v->processor, &entry->pcpu_dirty_mask);
     BUG_ON(v->vcpu_id >= NR_CPUS);
     vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
     perfc_incr(tlb_track_iod_dirtied);
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c      Tue Nov 08 10:34:33 2011 +0100
@@ -271,9 +271,9 @@
      */
     if ( expires > NOW() || expires == 0 )
     {
-        cpu_set(cpu, cpuidle_mwait_flags);
+        cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
         __mwait(eax, ecx);
-        cpu_clear(cpu, cpuidle_mwait_flags);
+        cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
     }
 
     if ( expires <= NOW() && expires > 0 )
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Tue Nov 08 10:34:33 2011 +0100
@@ -828,7 +828,7 @@
              * (the MSRs are sticky)
              */
             if (bs.pcc || !bs.recoverable)
-                cpu_set(smp_processor_id(), mce_fatal_cpus);
+                cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
         } else {
             if (mctc != NULL)
                 mctelem_commit(mctc);
@@ -849,7 +849,7 @@
 
     mce_barrier_enter(&mce_trap_bar);
     if ( mctc != NULL && mce_urgent_action(regs, mctc))
-        cpu_set(smp_processor_id(), mce_fatal_cpus);
+        cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
     mce_barrier_exit(&mce_trap_bar);
     /*
      * Wait until everybody has processed the trap.
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/x86/mpparse.c
--- a/xen/arch/x86/mpparse.c    Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/x86/mpparse.c    Tue Nov 08 10:34:33 2011 +0100
@@ -161,7 +161,7 @@
                        return cpu;
                }
                x86_cpu_to_apicid[cpu] = apicid;
-               cpu_set(cpu, cpu_present_map);
+               cpumask_set_cpu(cpu, &cpu_present_map);
        }
 
        if (++num_processors > 8) {
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/x86/numa.c
--- a/xen/arch/x86/numa.c       Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/x86/numa.c       Tue Nov 08 10:34:33 2011 +0100
@@ -288,7 +288,7 @@
 
 __cpuinit void numa_add_cpu(int cpu)
 {
-       cpu_set(cpu, node_to_cpumask[cpu_to_node(cpu)]);
+       cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
 } 
 
 void __cpuinit numa_set_node(int cpu, int node)
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/x86/smpboot.c    Tue Nov 08 10:34:33 2011 +0100
@@ -244,7 +244,7 @@
     int i;
     struct cpuinfo_x86 *c = cpu_data;
 
-    cpu_set(cpu, cpu_sibling_setup_map);
+    cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
 
     if ( c[cpu].x86_num_siblings > 1 )
     {
@@ -380,7 +380,7 @@
      */
     lock_vector_lock();
     __setup_vector_irq(cpu);
-    cpu_set(cpu, cpu_online_map);
+    cpumask_set_cpu(cpu, &cpu_online_map);
     unlock_vector_lock();
 
     init_percpu_time();
@@ -804,8 +804,8 @@
 
 void __init smp_prepare_boot_cpu(void)
 {
-    cpu_set(smp_processor_id(), cpu_online_map);
-    cpu_set(smp_processor_id(), cpu_present_map);
+    cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
+    cpumask_set_cpu(smp_processor_id(), &cpu_present_map);
 }
 
 static void
@@ -933,7 +933,7 @@
                    "break assumed cross-CPU TSC coherency.\n"
                    " ** Consider using boot parameter \"tsc=skewed\" "
                    "which forces TSC emulation where appropriate.\n", cpu);
-        cpu_set(cpu, tsc_sync_cpu_mask);
+        cpumask_set_cpu(cpu, &tsc_sync_cpu_mask);
     }
 
     srat_detect_node(cpu);
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/arch/x86/time.c       Tue Nov 08 10:34:33 2011 +0100
@@ -1573,7 +1573,7 @@
 
 void pit_broadcast_enter(void)
 {
-    cpu_set(smp_processor_id(), pit_broadcast_mask);
+    cpumask_set_cpu(smp_processor_id(), &pit_broadcast_mask);
 }
 
 void pit_broadcast_exit(void)
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/common/cpupool.c
--- a/xen/common/cpupool.c      Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/common/cpupool.c      Tue Nov 08 10:34:33 2011 +0100
@@ -253,7 +253,7 @@
 
     spin_lock(&cpupool_lock);
     ret = cpu_disable_scheduler(cpu);
-    cpu_set(cpu, cpupool_free_cpus);
+    cpumask_set_cpu(cpu, &cpupool_free_cpus);
     if ( !ret )
     {
         ret = schedule_cpu_switch(cpu, NULL);
@@ -409,8 +409,8 @@
 static void cpupool_cpu_add(unsigned int cpu)
 {
     spin_lock(&cpupool_lock);
-    cpu_clear(cpu, cpupool_locked_cpus);
-    cpu_set(cpu, cpupool_free_cpus);
+    cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
+    cpumask_set_cpu(cpu, &cpupool_free_cpus);
     cpupool_assign_cpu_locked(cpupool0, cpu);
     spin_unlock(&cpupool_lock);
 }
@@ -428,7 +428,7 @@
     if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
         ret = -EBUSY;
     else
-        cpu_set(cpu, cpupool_locked_cpus);
+        cpumask_set_cpu(cpu, &cpupool_locked_cpus);
     spin_unlock(&cpupool_lock);
 
     return ret;
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/common/sched_credit2.c        Tue Nov 08 10:34:33 2011 +0100
@@ -1725,7 +1725,7 @@
     {
         /* Update the idle mask if necessary */
         if ( !cpumask_test_cpu(cpu, &rqd->idle) )
-            cpu_set(cpu, rqd->idle);
+            cpumask_set_cpu(cpu, &rqd->idle);
         /* Make sure avgload gets updated periodically even
          * if there's no activity */
         update_load(ops, rqd, NULL, 0, now);
@@ -1860,7 +1860,7 @@
     INIT_LIST_HEAD(&rqd->runq);
     spin_lock_init(&rqd->lock);
 
-    cpu_set(rqi, prv->active_queues);
+    cpumask_set_cpu(rqi, &prv->active_queues);
 }
 
 static void deactivate_runqueue(struct csched_private *prv, int rqi)
@@ -1927,12 +1927,12 @@
     /* Set the runqueue map */
     prv->runq_map[cpu]=rqi;
     
-    cpu_set(cpu, rqd->idle);
-    cpu_set(cpu, rqd->active);
+    cpumask_set_cpu(cpu, &rqd->idle);
+    cpumask_set_cpu(cpu, &rqd->active);
 
     spin_unlock(old_lock);
 
-    cpu_set(cpu, prv->initialized);
+    cpumask_set_cpu(cpu, &prv->initialized);
 
     spin_unlock_irqrestore(&prv->lock, flags);
 
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/include/asm-ia64/linux-xen/asm/acpi.h
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h Tue Nov 08 10:34:33 2011 +0100
@@ -153,7 +153,7 @@
        high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
 
        for (cpu = low_cpu; cpu < high_cpu; cpu++) {
-               cpu_set(cpu, early_cpu_possible_map);
+               cpumask_set_cpu(cpu, &early_cpu_possible_map);
                if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
                        node_cpuid[cpu].nid = next_nid;
                        next_nid++;
diff -r f9c4494e77c8 -r 1f3056be6c83 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Tue Nov 08 10:33:02 2011 +0100
+++ b/xen/include/xen/cpumask.h Tue Nov 08 10:34:33 2011 +0100
@@ -97,7 +97,6 @@
        return cpu;
 }
 
-#define cpu_set(cpu, dst) cpumask_set_cpu(cpu, &(dst))
 static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
 {
        set_bit(cpumask_check(cpu), dstp->bits);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.