[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/numa: adjust datatypes for node and pxm



commit 54ce2db8b8953b17c6b22042df83193299a991eb
Author:     Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
AuthorDate: Thu Feb 26 14:06:26 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Feb 26 14:06:26 2015 +0100

    x86/numa: adjust datatypes for node and pxm
    
    Use u8-sized node IDs and unsigned PXMs consistently throughout
    code (and introduce nodeid_t type).
    
    Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/irq.c                  |    4 +-
 xen/arch/x86/numa.c                 |   15 ++++++------
 xen/arch/x86/setup.c                |    2 +-
 xen/arch/x86/smpboot.c              |    6 +++-
 xen/arch/x86/srat.c                 |   41 ++++++++++++++++++----------------
 xen/arch/x86/x86_64/mm.c            |    5 ++-
 xen/common/page_alloc.c             |    7 +++--
 xen/drivers/passthrough/vtd/iommu.c |    5 ++-
 xen/include/asm-arm/numa.h          |    4 ++-
 xen/include/asm-x86/irq.h           |    3 +-
 xen/include/asm-x86/numa.h          |   24 +++++++++++---------
 11 files changed, 65 insertions(+), 51 deletions(-)

diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 92e1854..786d1fc 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -153,7 +153,7 @@ int __init bind_irq_vector(int irq, int vector, const 
cpumask_t *cpu_mask)
 /*
  * Dynamic irq allocate and deallocation for MSI
  */
-int create_irq(int node)
+int create_irq(nodeid_t node)
 {
     int irq, ret;
     struct irq_desc *desc;
@@ -173,7 +173,7 @@ int create_irq(int node)
     {
         cpumask_t *mask = NULL;
 
-        if (node != NUMA_NO_NODE && node >= 0)
+        if ( node != NUMA_NO_NODE )
         {
             mask = &node_to_cpumask(node);
             if (cpumask_empty(mask))
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index e500f33..132d694 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -36,13 +36,13 @@ static typeof(*memnodemap) _memnodemap[64];
 unsigned long memnodemapsize;
 u8 *memnodemap;
 
-unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
+nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {
     [0 ... NR_CPUS-1] = NUMA_NO_NODE
 };
 /*
  * Keep BIOS's CPU2node information, should not be used for memory allocaion
  */
-unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
+nodeid_t apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
     [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 };
 cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
@@ -66,7 +66,7 @@ int srat_disabled(void)
  * -1 if node overlap or lost ram (shift too big)
  */
 static int __init populate_memnodemap(const struct node *nodes,
-                                      int numnodes, int shift, int *nodeids)
+                                      int numnodes, int shift, nodeid_t 
*nodeids)
 {
     unsigned long spdx, epdx;
     int i, res = -1;
@@ -151,7 +151,7 @@ static int __init extract_lsb_from_nodes(const struct node 
*nodes,
 }
 
 int __init compute_hash_shift(struct node *nodes, int numnodes,
-                              int *nodeids)
+                              nodeid_t *nodeids)
 {
     int shift;
 
@@ -173,7 +173,7 @@ int __init compute_hash_shift(struct node *nodes, int 
numnodes,
     return shift;
 }
 /* initialize NODE_DATA given nodeid and start/end */
-void __init setup_node_bootmem(int nodeid, u64 start, u64 end)
+void __init setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end)
 { 
     unsigned long start_pfn, end_pfn;
 
@@ -295,7 +295,7 @@ __cpuinit void numa_add_cpu(int cpu)
     cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
 } 
 
-void __cpuinit numa_set_node(int cpu, int node)
+void __cpuinit numa_set_node(int cpu, nodeid_t node)
 {
     cpu_to_node[cpu] = node;
 }
@@ -341,7 +341,8 @@ static __init int numa_setup(char *opt)
  */
 void __init init_cpu_to_node(void)
 {
-    int i, node;
+    unsigned int i;
+    nodeid_t node;
 
     for ( i = 0; i < nr_cpu_ids; i++ )
     {
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index d316212..7593533 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -188,7 +188,7 @@ static void __init init_idle_domain(void)
 
 void __devinit srat_detect_node(int cpu)
 {
-    unsigned node;
+    nodeid_t node;
     u32 apicid = x86_cpu_to_apicid[cpu];
 
     node = apicid_to_node[apicid];
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 7ae561c..314e253 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -843,7 +843,7 @@ void __cpu_die(unsigned int cpu)
 
 int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm)
 {
-    int node, cpu = -1;
+    int cpu = -1;
 
     dprintk(XENLOG_DEBUG, "cpu_add apic_id %x acpi_id %x pxm %x\n",
             apic_id, acpi_id, pxm);
@@ -877,7 +877,9 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t 
pxm)
 
     if ( !srat_disabled() )
     {
-        if ( (node = setup_node(pxm)) < 0 )
+        nodeid_t node = setup_node(pxm);
+
+        if ( node == NUMA_NO_NODE )
         {
             dprintk(XENLOG_WARNING,
                     "Setup node failed for pxm %x\n", pxm);
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 9702ff2..dfabba3 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -30,16 +30,16 @@ static struct node nodes[MAX_NUMNODES] __initdata;
 
 struct pxm2node {
        unsigned pxm;
-       u8 node;
+       nodeid_t node;
 };
 static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] =
        { [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE} };
 
-static int node_to_pxm(unsigned n);
+static unsigned node_to_pxm(nodeid_t n);
 
 static int num_node_memblks;
 static struct node node_memblk_range[NR_NODE_MEMBLKS];
-static int memblk_nodeid[NR_NODE_MEMBLKS];
+static nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
 
 static inline bool_t node_found(unsigned idx, unsigned pxm)
 {
@@ -47,7 +47,7 @@ static inline bool_t node_found(unsigned idx, unsigned pxm)
                (pxm2node[idx].node != NUMA_NO_NODE));
 }
 
-int pxm_to_node(unsigned pxm)
+nodeid_t pxm_to_node(unsigned pxm)
 {
        unsigned i;
 
@@ -58,13 +58,12 @@ int pxm_to_node(unsigned pxm)
                if (node_found(i, pxm))
                        return pxm2node[i].node;
 
-       /* Extend 0xff to (int)-1 */
-       return (signed char)NUMA_NO_NODE;
+       return NUMA_NO_NODE;
 }
 
-__devinit int setup_node(unsigned pxm)
+__devinit nodeid_t setup_node(unsigned pxm)
 {
-       int node;
+       nodeid_t node;
        unsigned idx;
        static bool_t warned;
 
@@ -90,7 +89,7 @@ __devinit int setup_node(unsigned pxm)
                warned = 1;
        }
 
-       return (signed char)NUMA_NO_NODE;
+       return NUMA_NO_NODE;
 
  finish:
        node = first_unset_node(nodes_found);
@@ -101,7 +100,7 @@ __devinit int setup_node(unsigned pxm)
        return node;
 }
 
-int valid_numa_range(u64 start, u64 end, int node)
+int valid_numa_range(u64 start, u64 end, nodeid_t node)
 {
        int i;
 
@@ -205,8 +204,9 @@ void __init acpi_numa_slit_init(struct acpi_table_slit 
*slit)
 void __init
 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
 {
-       int pxm, node;
-       int apic_id;
+       unsigned pxm;
+       nodeid_t node;
+       u32 apic_id;
 
        if (srat_disabled())
                return;
@@ -218,7 +218,7 @@ acpi_numa_x2apic_affinity_init(struct 
acpi_srat_x2apic_cpu_affinity *pa)
                return;
        pxm = pa->proximity_domain;
        node = setup_node(pxm);
-       if (node < 0) {
+       if (node == NUMA_NO_NODE) {
                printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
                bad_srat();
                return;
@@ -235,7 +235,9 @@ acpi_numa_x2apic_affinity_init(struct 
acpi_srat_x2apic_cpu_affinity *pa)
 void __init
 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
 {
-       int pxm, node;
+       unsigned pxm;
+       nodeid_t node;
+
        if (srat_disabled())
                return;
        if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
@@ -251,7 +253,7 @@ acpi_numa_processor_affinity_init(struct 
acpi_srat_cpu_affinity *pa)
                pxm |= pa->proximity_domain_hi[2] << 24;
        }
        node = setup_node(pxm);
-       if (node < 0) {
+       if (node == NUMA_NO_NODE) {
                printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
                bad_srat();
                return;
@@ -269,7 +271,8 @@ acpi_numa_memory_affinity_init(struct 
acpi_srat_mem_affinity *ma)
 {
        struct node *nd;
        u64 start, end;
-       int node, pxm;
+       unsigned pxm;
+       nodeid_t node;
        int i;
 
        if (srat_disabled())
@@ -295,7 +298,7 @@ acpi_numa_memory_affinity_init(struct 
acpi_srat_mem_affinity *ma)
        if (srat_rev < 2)
                pxm &= 0xff;
        node = setup_node(pxm);
-       if (node < 0) {
+       if (node == NUMA_NO_NODE) {
                printk(KERN_ERR "SRAT: Too many proximity domains.\n");
                bad_srat();
                return;
@@ -481,7 +484,7 @@ int __init acpi_scan_nodes(u64 start, u64 end)
        return 0;
 }
 
-static int node_to_pxm(unsigned n)
+static unsigned node_to_pxm(nodeid_t n)
 {
        unsigned i;
 
@@ -493,7 +496,7 @@ static int node_to_pxm(unsigned n)
        return 0;
 }
 
-int __node_distance(int a, int b)
+int __node_distance(nodeid_t a, nodeid_t b)
 {
        int index;
 
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d631aee..6875c92 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1343,7 +1343,8 @@ int mem_hotadd_check(unsigned long spfn, unsigned long 
epfn)
 int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
 {
     struct mem_hotadd_info info;
-    int ret, node;
+    int ret;
+    nodeid_t node;
     unsigned long old_max = max_page, old_total = total_pages;
     unsigned long old_node_start, old_node_span, orig_online;
     unsigned long i;
@@ -1353,7 +1354,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     if ( !mem_hotadd_check(spfn, epfn) )
         return -EINVAL;
 
-    if ( (node = setup_node(pxm)) == -1 )
+    if ( (node = setup_node(pxm)) == NUMA_NO_NODE )
         return -EINVAL;
 
     if ( !valid_numa_range(spfn << PAGE_SHIFT, epfn << PAGE_SHIFT, node) )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a7bdbfd..6bd3b75 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -580,8 +580,8 @@ static struct page_info *alloc_heap_pages(
     unsigned int order, unsigned int memflags,
     struct domain *d)
 {
-    unsigned int first_node, i, j, zone = 0, nodemask_retry = 0;
-    unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
+    unsigned int i, j, zone = 0, nodemask_retry = 0;
+    nodeid_t first_node, node = (nodeid_t)((memflags >> _MEMF_node) - 1);
     unsigned long request = 1UL << order;
     struct page_info *pg;
     nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map;
@@ -1279,7 +1279,8 @@ static void __init smp_scrub_heap_pages(void *data)
     unsigned long mfn, start, end;
     struct page_info *pg;
     struct scrub_region *r;
-    unsigned int temp_cpu, node, cpu_idx = 0;
+    unsigned int temp_cpu, cpu_idx = 0;
+    nodeid_t node;
     unsigned int cpu = smp_processor_id();
 
     if ( data )
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 2e113d7..1063677 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -190,14 +190,15 @@ u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd, 
unsigned long npages)
     struct acpi_rhsa_unit *rhsa;
     struct page_info *pg, *cur_pg;
     u64 *vaddr;
-    int node = -1, i;
+    nodeid_t node = NUMA_NO_NODE;
+    unsigned int i;
 
     rhsa = drhd_to_rhsa(drhd);
     if ( rhsa )
         node =  pxm_to_node(rhsa->proximity_domain);
 
     pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
-                             (node == -1 ) ? 0 : MEMF_node(node));
+                             (node == NUMA_NO_NODE) ? 0 : MEMF_node(node));
     if ( !pg )
         return 0;
 
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index 06a9d5a..a00cb7c 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -1,11 +1,13 @@
 #ifndef __ARCH_ARM_NUMA_H
 #define __ARCH_ARM_NUMA_H
 
+typedef u8 nodeid_t;
+
 /* Fake one node for now. See also node_online_map. */
 #define cpu_to_node(cpu) 0
 #define node_to_cpumask(node)   (cpu_online_map)
 
-static inline __attribute__((pure)) int phys_to_nid(paddr_t addr)
+static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
 {
     return 0;
 }
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index d3c55f3..a44305e 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -5,6 +5,7 @@
 
 #include <xen/config.h>
 #include <asm/atomic.h>
+#include <asm/numa.h>
 #include <xen/cpumask.h>
 #include <xen/smp.h>
 #include <xen/hvm/irq.h>
@@ -155,7 +156,7 @@ int  init_irq_data(void);
 void clear_irq_vector(int irq);
 
 int irq_to_vector(int irq);
-int create_irq(int node);
+int create_irq(nodeid_t node);
 void destroy_irq(unsigned int irq);
 int assign_irq_vector(int irq, const cpumask_t *);
 
diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h
index 5fa51ba..cc5b5d1 100644
--- a/xen/include/asm-x86/numa.h
+++ b/xen/include/asm-x86/numa.h
@@ -5,6 +5,8 @@
 
 #define NODES_SHIFT 6
 
+typedef u8 nodeid_t;
+
 extern int srat_rev;
 
 extern unsigned char cpu_to_node[];
@@ -20,8 +22,8 @@ struct node {
 };
 
 extern int compute_hash_shift(struct node *nodes, int numnodes,
-                             int *nodeids);
-extern int pxm_to_node(unsigned int pxm);
+                             nodeid_t *nodeids);
+extern nodeid_t pxm_to_node(unsigned int pxm);
 
 #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
 #define VIRTUAL_BUG_ON(x) 
@@ -32,12 +34,12 @@ extern int numa_off;
 
 
 extern int srat_disabled(void);
-extern void numa_set_node(int cpu, int node);
-extern int setup_node(unsigned int pxm);
+extern void numa_set_node(int cpu, nodeid_t node);
+extern nodeid_t setup_node(unsigned int pxm);
 extern void srat_detect_node(int cpu);
 
-extern void setup_node_bootmem(int nodeid, u64 start, u64 end);
-extern unsigned char apicid_to_node[];
+extern void setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end);
+extern nodeid_t apicid_to_node[];
 #ifdef CONFIG_NUMA
 extern void init_cpu_to_node(void);
 
@@ -54,14 +56,14 @@ extern u8 *memnodemap;
 struct node_data {
     unsigned long node_start_pfn;
     unsigned long node_spanned_pages;
-    unsigned int  node_id;
+    nodeid_t      node_id;
 };
 
 extern struct node_data node_data[];
 
-static inline __attribute__((pure)) int phys_to_nid(paddr_t addr) 
+static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
 { 
-       unsigned nid;
+       nodeid_t nid;
        VIRTUAL_BUG_ON((paddr_to_pdx(addr) >> memnode_shift) >= memnodemapsize);
        nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift]; 
        VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); 
@@ -75,7 +77,7 @@ static inline __attribute__((pure)) int phys_to_nid(paddr_t 
addr)
 #define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn + \
                                 NODE_DATA(nid)->node_spanned_pages)
 
-extern int valid_numa_range(u64 start, u64 end, int node);
+extern int valid_numa_range(u64 start, u64 end, nodeid_t node);
 #else
 #define init_cpu_to_node() do {} while (0)
 #define clear_node_cpumask(cpu) do {} while (0)
@@ -83,6 +85,6 @@ extern int valid_numa_range(u64 start, u64 end, int node);
 #endif
 
 void srat_parse_regions(u64 addr);
-extern int __node_distance(int a, int b);
+extern int __node_distance(nodeid_t a, nodeid_t b);
 
 #endif
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.