[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/3] nodemask: Remove implicit addressof from the API



The nodemask API differs from the cpumask API because each wrapper to bitmap
operations is further wrapped by a macro which takes the address of the
nodemask objects.

This results in code which is slightly confusing to read as it doesn't follow
C's calling conventions, and prohibits the use of slightly more complicated
constructs for specifying parameters.

Drop all wrapping macros, rename the nodemask static inline functions to drop
the double underscores, and feed MAX_NUMNODES into appropriate locations.

Take the opportunity to drop a compiler workaround for node_isset() for GCC
3.3.2 which is long out of support, and implment it with a static inline.

Update all callers to use the correct indirection themselves.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

v2:
 * New
---
 xen/arch/x86/dom0_build.c  |  12 +--
 xen/arch/x86/numa.c        |   8 +-
 xen/arch/x86/srat.c        |  15 ++--
 xen/common/domain.c        |   8 +-
 xen/common/page_alloc.c    |  28 +++----
 xen/common/sched_credit.c  |   2 +-
 xen/common/sysctl.c        |   2 +-
 xen/include/xen/nodemask.h | 181 ++++++++++++++++++---------------------------
 8 files changed, 110 insertions(+), 146 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index c69570920c..4af2ee0091 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -231,7 +231,7 @@ unsigned int __init dom0_max_vcpus(void)
 
     if ( pv_shim )
     {
-        nodes_setall(dom0_nodes);
+        nodes_setall(&dom0_nodes);
 
         /*
          * When booting in shim mode APs are not started until the guest brings
@@ -246,11 +246,11 @@ unsigned int __init dom0_max_vcpus(void)
 
     for ( i = 0; i < dom0_nr_pxms; ++i )
         if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
-            node_set(node, dom0_nodes);
-    nodes_and(dom0_nodes, dom0_nodes, node_online_map);
-    if ( nodes_empty(dom0_nodes) )
+            node_set(node, &dom0_nodes);
+    nodes_and(&dom0_nodes, &dom0_nodes, &node_online_map);
+    if ( nodes_empty(&dom0_nodes) )
         dom0_nodes = node_online_map;
-    for_each_node_mask ( node, dom0_nodes )
+    for_each_node_mask ( node, &dom0_nodes )
         cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
     cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid);
     if ( cpumask_empty(&dom0_cpus) )
@@ -344,7 +344,7 @@ unsigned long __init dom0_compute_nr_pages(
     if ( !dom0_mem_set && CONFIG_DOM0_MEM[0] )
         parse_dom0_mem(CONFIG_DOM0_MEM);
 
-    for_each_node_mask ( node, dom0_nodes )
+    for_each_node_mask ( node, &dom0_nodes )
         avail += avail_domheap_pages_region(node, 0, 0) +
                  initial_images_nrpages(node);
 
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index b3c9c12d7f..c36c69e842 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -186,15 +186,15 @@ void __init numa_init_array(void)
        mapping. To avoid this fill in the mapping for all possible
        CPUs, as the number of CPUs is not known yet.
        We round robin the existing nodes. */
-    rr = first_node(node_online_map);
+    rr = first_node(&node_online_map);
     for ( i = 0; i < nr_cpu_ids; i++ )
     {
         if ( cpu_to_node[i] != NUMA_NO_NODE )
             continue;
         numa_set_node(i, rr);
-        rr = next_node(rr, node_online_map);
+        rr = next_node(rr, &node_online_map);
         if ( rr == MAX_NUMNODES )
-            rr = first_node(node_online_map);
+            rr = first_node(&node_online_map);
     }
 }
 
@@ -271,7 +271,7 @@ void __init numa_initmem_init(unsigned long start_pfn, 
unsigned long end_pfn)
     /* setup dummy node covering all memory */
     memnode_shift = BITS_PER_LONG - 1;
     memnodemap = _memnodemap;
-    nodes_clear(node_online_map);
+    nodes_clear(&node_online_map);
     node_set_online(0);
     for ( i = 0; i < nr_cpu_ids; i++ )
         numa_set_node(i, 0);
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 47a4267220..348bcfea73 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -228,7 +228,7 @@ acpi_numa_x2apic_affinity_init(const struct 
acpi_srat_x2apic_cpu_affinity *pa)
        }
 
        apicid_to_node[pa->apic_id] = node;
-       node_set(node, processor_nodes_parsed);
+       node_set(node, &processor_nodes_parsed);
        acpi_numa = 1;
        printk(KERN_INFO "SRAT: PXM %u -> APIC %08x -> Node %u\n",
               pxm, pa->apic_id, node);
@@ -261,7 +261,7 @@ acpi_numa_processor_affinity_init(const struct 
acpi_srat_cpu_affinity *pa)
                return;
        }
        apicid_to_node[pa->apic_id] = node;
-       node_set(node, processor_nodes_parsed);
+       node_set(node, &processor_nodes_parsed);
        acpi_numa = 1;
        printk(KERN_INFO "SRAT: PXM %u -> APIC %02x -> Node %u\n",
               pxm, pa->apic_id, node);
@@ -332,7 +332,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
        if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
                struct node *nd = &nodes[node];
 
-               if (!node_test_and_set(node, memory_nodes_parsed)) {
+               if (!node_test_and_set(node, &memory_nodes_parsed)) {
                        nd->start = start;
                        nd->end = end;
                } else {
@@ -376,7 +376,7 @@ static int __init nodes_cover_memory(void)
 
                do {
                        found = 0;
-                       for_each_node_mask(j, memory_nodes_parsed)
+                       for_each_node_mask( j, &memory_nodes_parsed )
                                if (start < nodes[j].end
                                    && end > nodes[j].start) {
                                        if (start >= nodes[j].start) {
@@ -480,10 +480,11 @@ int __init acpi_scan_nodes(u64 start, u64 end)
                return -1;
        }
 
-       nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed);
+       nodes_or(&all_nodes_parsed, &memory_nodes_parsed,
+                &processor_nodes_parsed);
 
        /* Finally register nodes */
-       for_each_node_mask(i, all_nodes_parsed)
+       for_each_node_mask( i, &all_nodes_parsed )
        {
                u64 size = nodes[i].end - nodes[i].start;
                if ( size == 0 )
@@ -495,7 +496,7 @@ int __init acpi_scan_nodes(u64 start, u64 end)
        for (i = 0; i < nr_cpu_ids; i++) {
                if (cpu_to_node[i] == NUMA_NO_NODE)
                        continue;
-               if (!node_isset(cpu_to_node[i], processor_nodes_parsed))
+               if (!node_isset(cpu_to_node[i], &processor_nodes_parsed))
                        numa_set_node(i, NUMA_NO_NODE);
        }
        numa_init_array();
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 2308588052..cf2a963687 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -613,9 +613,9 @@ void domain_update_node_affinity(struct domain *d)
         dom_affinity = cpumask_empty(dom_cpumask_soft) ?
                            dom_cpumask : dom_cpumask_soft;
 
-        nodes_clear(d->node_affinity);
+        nodes_clear(&d->node_affinity);
         for_each_cpu ( cpu, dom_affinity )
-            node_set(cpu_to_node(cpu), d->node_affinity);
+            node_set(cpu_to_node(cpu), &d->node_affinity);
     }
 
     spin_unlock(&d->node_affinity_lock);
@@ -628,7 +628,7 @@ void domain_update_node_affinity(struct domain *d)
 int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity)
 {
     /* Being affine with no nodes is just wrong */
-    if ( nodes_empty(*affinity) )
+    if ( nodes_empty(affinity) )
         return -EINVAL;
 
     spin_lock(&d->node_affinity_lock);
@@ -637,7 +637,7 @@ int domain_set_node_affinity(struct domain *d, const 
nodemask_t *affinity)
      * Being/becoming explicitly affine to all nodes is not particularly
      * useful. Let's take it as the `reset node affinity` command.
      */
-    if ( nodes_full(*affinity) )
+    if ( nodes_full(affinity) )
     {
         d->auto_node_affinity = 1;
         goto out;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7bbb44f7d1..7bba5b0b2e 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -820,9 +820,9 @@ static struct page_info *get_free_buddy(unsigned int 
zone_lo,
     {
         if ( d != NULL )
         {
-            node = next_node(d->last_alloc_node, nodemask);
+            node = next_node(d->last_alloc_node, &nodemask);
             if ( node >= MAX_NUMNODES )
-                node = first_node(nodemask);
+                node = first_node(&nodemask);
         }
         if ( node >= MAX_NUMNODES )
             node = cpu_to_node(smp_processor_id());
@@ -874,23 +874,23 @@ static struct page_info *get_free_buddy(unsigned int 
zone_lo,
             return NULL;
 
         /* Pick next node. */
-        if ( !node_isset(node, nodemask) )
+        if ( !node_isset(node, &nodemask) )
         {
             /* Very first node may be caller-specified and outside nodemask. */
             ASSERT(!nodemask_retry);
-            first = node = first_node(nodemask);
+            first = node = first_node(&nodemask);
             if ( node < MAX_NUMNODES )
                 continue;
         }
-        else if ( (node = next_node(node, nodemask)) >= MAX_NUMNODES )
-            node = first_node(nodemask);
+        else if ( (node = next_node(node, &nodemask)) >= MAX_NUMNODES )
+            node = first_node(&nodemask);
         if ( node == first )
         {
             /* When we have tried all in nodemask, we fall back to others. */
             if ( (memflags & MEMF_exact_node) || nodemask_retry++ )
                 return NULL;
-            nodes_andnot(nodemask, node_online_map, nodemask);
-            first = node = first_node(nodemask);
+            nodes_andnot(&nodemask, &node_online_map, &nodemask);
+            first = node = first_node(&nodemask);
             if ( node >= MAX_NUMNODES )
                 return NULL;
         }
@@ -1167,7 +1167,7 @@ static unsigned int node_to_scrub(bool get_node)
         node = 0;
 
     if ( node_need_scrub[node] &&
-         (!get_node || !node_test_and_set(node, node_scrubbing)) )
+         (!get_node || !node_test_and_set(node, &node_scrubbing)) )
         return node;
 
     /*
@@ -1178,7 +1178,7 @@ static unsigned int node_to_scrub(bool get_node)
     for ( ; ; )
     {
         do {
-            node = cycle_node(node, node_online_map);
+            node = cycle_node(node, &node_online_map);
         } while ( !cpumask_empty(&node_to_cpumask(node)) &&
                   (node != local_node) );
 
@@ -1201,10 +1201,10 @@ static unsigned int node_to_scrub(bool get_node)
              * then we'd need to take this lock every time we come in here.
              */
             if ( (dist < shortest || closest == NUMA_NO_NODE) &&
-                 !node_test_and_set(node, node_scrubbing) )
+                 !node_test_and_set(node, &node_scrubbing) )
             {
                 if ( closest != NUMA_NO_NODE )
-                    node_clear(closest, node_scrubbing);
+                    node_clear(closest, &node_scrubbing);
                 shortest = dist;
                 closest = node;
             }
@@ -1356,7 +1356,7 @@ bool scrub_free_pages(void)
     spin_unlock(&heap_lock);
 
  out_nolock:
-    node_clear(node, node_scrubbing);
+    node_clear(node, &node_scrubbing);
     return node_to_scrub(false) != NUMA_NO_NODE;
 }
 
@@ -2006,7 +2006,7 @@ static void __init scrub_heap_pages(void)
             continue;
 
         last_distance = INT_MAX;
-        best_node = first_node(node_online_map);
+        best_node = first_node(&node_online_map);
         /* Figure out which NODE CPUs are close. */
         for_each_online_node ( j )
         {
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 3c0d7c7267..611ff26153 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1810,7 +1810,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
             } while( peer_cpu != first_cpu );
 
  next_node:
-            peer_node = cycle_node(peer_node, node_online_map);
+            peer_node = cycle_node(peer_node, &node_online_map);
         } while( peer_node != node );
     }
 
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 765effde8d..f1884b0ab9 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -280,7 +280,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) 
u_sysctl)
         bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
         bool_t do_distance = !guest_handle_is_null(ni->distance);
 
-        num_nodes = last_node(node_online_map) + 1;
+        num_nodes = last_node(&node_online_map) + 1;
 
         if ( do_meminfo || do_distance )
         {
diff --git a/xen/include/xen/nodemask.h b/xen/include/xen/nodemask.h
index e287399352..cbf5d0ee33 100644
--- a/xen/include/xen/nodemask.h
+++ b/xen/include/xen/nodemask.h
@@ -58,12 +58,6 @@
  * node_set_offline(node)              clear bit 'node' in node_online_map
  *
  * for_each_online_node(node)          for-loop node over node_online_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
- *    to generate slightly worse code.  So use a simple one-line #define
- *    for node_isset(), instead of wrapping an inline inside a macro, the
- *    way we do the other calls.
  */
 
 #include <xen/kernel.h>
@@ -73,161 +67,131 @@
 typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
 extern nodemask_t _unused_nodemask_arg_;
 
-#define node_set(node, dst) __node_set((node), &(dst))
-static inline void __node_set(int node, volatile nodemask_t *dstp)
+static inline void node_set(int node, volatile nodemask_t *dstp)
 {
        set_bit(node, dstp->bits);
 }
 
-#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static inline void node_clear(int node, volatile nodemask_t *dstp)
 {
        clear_bit(node, dstp->bits);
 }
 
-#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, int nbits)
+static inline void nodes_setall(nodemask_t *dstp)
 {
-       bitmap_fill(dstp->bits, nbits);
+       bitmap_fill(dstp->bits, MAX_NUMNODES);
 }
 
-#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, int nbits)
+static inline void nodes_clear(nodemask_t *dstp)
 {
-       bitmap_zero(dstp->bits, nbits);
+       bitmap_zero(dstp->bits, MAX_NUMNODES);
 }
 
-/* No static inline type checking - see Subtlety (1) above. */
-#define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
+static inline int node_isset(int node, const nodemask_t *src)
+{
+       return test_bit(node, src->bits);
+}
 
-#define node_test_and_set(node, nodemask) \
-                       __node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline int node_test_and_set(int node, nodemask_t *addr)
 {
        return test_and_set_bit(node, addr->bits);
 }
 
-#define nodes_and(dst, src1, src2) \
-                       __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+                             const nodemask_t *src2p)
 {
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_and(dstp->bits, src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_or(dst, src1, src2) \
-                       __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+                            const nodemask_t *src2p)
 {
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_or(dstp->bits, src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_xor(dst, src1, src2) \
-                       __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+                             const nodemask_t *src2p)
 {
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_andnot(dst, src1, src2) \
-                       __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+                                const nodemask_t *src2p)
 {
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_complement(dst, src) \
-                       __nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
-                                       const nodemask_t *srcp, int nbits)
+static inline void nodes_complement(nodemask_t *dstp, const nodemask_t *srcp)
 {
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
+       bitmap_complement(dstp->bits, srcp->bits, MAX_NUMNODES);
 }
 
-#define nodes_equal(src1, src2) \
-                       __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline int nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p)
 {
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
+       return bitmap_equal(src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_intersects(src1, src2) \
-                       __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline int nodes_intersects(const nodemask_t *src1p,
+                                  const nodemask_t *src2p)
 {
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+       return bitmap_intersects(src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_subset(src1, src2) \
-                       __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline int nodes_subset(const nodemask_t *src1p, const nodemask_t 
*src2p)
 {
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
+       return bitmap_subset(src1p->bits, src2p->bits, MAX_NUMNODES);
 }
 
-#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, int nbits)
+static inline int nodes_empty(const nodemask_t *srcp)
 {
-       return bitmap_empty(srcp->bits, nbits);
+       return bitmap_empty(srcp->bits, MAX_NUMNODES);
 }
 
-#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, int nbits)
+static inline int nodes_full(const nodemask_t *srcp)
 {
-       return bitmap_full(srcp->bits, nbits);
+       return bitmap_full(srcp->bits, MAX_NUMNODES);
 }
 
-#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, int nbits)
+static inline int nodes_weight(const nodemask_t *srcp)
 {
-       return bitmap_weight(srcp->bits, nbits);
+       return bitmap_weight(srcp->bits, MAX_NUMNODES);
 }
 
-#define nodes_shift_right(dst, src, n) \
-                       __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_right(nodemask_t *dstp,
-                                       const nodemask_t *srcp, int n, int 
nbits)
+static inline void nodes_shift_right(nodemask_t *dstp, const nodemask_t *srcp,
+                                    int n)
 {
-       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+       bitmap_shift_right(dstp->bits, srcp->bits, n, MAX_NUMNODES);
 }
 
-#define nodes_shift_left(dst, src, n) \
-                       __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_left(nodemask_t *dstp,
-                                       const nodemask_t *srcp, int n, int 
nbits)
+static inline void nodes_shift_left(nodemask_t *dstp, const nodemask_t *srcp,
+                                   int n)
 {
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+       bitmap_shift_left(dstp->bits, srcp->bits, n, MAX_NUMNODES);
 }
 
 /* FIXME: better would be to fix all architectures to never return
           > MAX_NUMNODES, then the silly min_ts could be dropped. */
 
-#define first_node(src) __first_node(&(src), MAX_NUMNODES)
-static inline int __first_node(const nodemask_t *srcp, int nbits)
+static inline int first_node(const nodemask_t *srcp)
 {
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+       return min_t(int, MAX_NUMNODES,
+                    find_first_bit(srcp->bits, MAX_NUMNODES));
 }
 
-#define next_node(n, src) __next_node((n), &(src), MAX_NUMNODES)
-static inline int __next_node(int n, const nodemask_t *srcp, int nbits)
+static inline int next_node(int n, const nodemask_t *srcp)
 {
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+       return min_t(int, MAX_NUMNODES,
+                    find_next_bit(srcp->bits, MAX_NUMNODES, n + 1));
 }
 
-#define last_node(src) __last_node(&(src), MAX_NUMNODES)
-static inline int __last_node(const nodemask_t *srcp, int nbits)
+static inline int last_node(const nodemask_t *srcp)
 {
-       int node, pnode = nbits;
-       for (node = __first_node(srcp, nbits);
-            node < nbits;
-            node = __next_node(node, srcp, nbits))
+       int node, pnode = MAX_NUMNODES;
+
+       for (node = first_node(srcp);
+            node < MAX_NUMNODES; node = next_node(node, srcp))
                pnode = node;
+
        return pnode;
 }
 
@@ -237,27 +201,26 @@ static inline int __last_node(const nodemask_t *srcp, int 
nbits)
        if (sizeof(m) == sizeof(unsigned long)) {                       \
                m.bits[0] = 1UL<<(node);                                \
        } else {                                                        \
-               nodes_clear(m);                                         \
-               node_set((node), m);                                    \
+               nodes_clear(&m);                                        \
+               node_set(node, &m);                                     \
        }                                                               \
        m;                                                              \
 })
 
-#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static inline int first_unset_node(const nodemask_t *maskp)
 {
-       return min_t(int,MAX_NUMNODES,
-                       find_first_zero_bit(maskp->bits, MAX_NUMNODES));
+       return min_t(int, MAX_NUMNODES,
+                    find_first_zero_bit(maskp->bits, MAX_NUMNODES));
 }
 
-#define cycle_node(n, src) __cycle_node((n), &(src), MAX_NUMNODES)
-static inline int __cycle_node(int n, const nodemask_t *maskp, int nbits)
+static inline int cycle_node(int n, const nodemask_t *maskp)
 {
-    int nxt = __next_node(n, maskp, nbits);
+       int nxt = next_node(n, maskp);
+
+       if (nxt == MAX_NUMNODES)
+               nxt = first_node(maskp);
 
-    if (nxt == nbits)
-        nxt = __first_node(maskp, nbits);
-    return nxt;
+       return nxt;
 }
 
 #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
@@ -305,8 +268,8 @@ static inline int __cycle_node(int n, const nodemask_t 
*maskp, int nbits)
 extern nodemask_t node_online_map;
 
 #if MAX_NUMNODES > 1
-#define num_online_nodes()     nodes_weight(node_online_map)
-#define node_online(node)      node_isset((node), node_online_map)
+#define num_online_nodes()     nodes_weight(&node_online_map)
+#define node_online(node)      node_isset(node, &node_online_map)
 #else
 #define num_online_nodes()     1
 #define node_online(node)      ((node) == 0)
@@ -321,9 +284,9 @@ extern nodemask_t node_online_map;
        node;                                   \
 })
 
-#define node_set_online(node)     set_bit((node), node_online_map.bits)
-#define node_set_offline(node)    clear_bit((node), node_online_map.bits)
+#define node_set_online(node)     set_bit(node, node_online_map.bits)
+#define node_set_offline(node)    clear_bit(node, node_online_map.bits)
 
-#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
+#define for_each_online_node(node) for_each_node_mask(node, &node_online_map)
 
 #endif /* __LINUX_NODEMASK_H */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.