[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v3 06/24] x86: NUMA: Rename some generic functions



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

Rename some function in ACPI code as follow
 - Rename setup_node to acpi_setup_node
 - Rename bad_srat to numa_failed
 - Rename nodes_cover_memory to arch_sanitize_nodes_memory
   and changed return type to bool
 - Rename acpi_scan_nodes to numa_scan_nodes

Also introduce reset_pxm2node() to reset pxm2node variable.
This avoids exporting pxm2node.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>
---
 v3: Changed return type of arch_sanitize_nodes_memory
---
 xen/arch/x86/numa.c        |  2 +-
 xen/arch/x86/smpboot.c     |  2 +-
 xen/arch/x86/srat.c        | 55 ++++++++++++++++++++++++++--------------------
 xen/arch/x86/x86_64/mm.c   |  2 +-
 xen/include/asm-x86/acpi.h |  2 +-
 xen/include/asm-x86/numa.h |  2 +-
 6 files changed, 36 insertions(+), 29 deletions(-)

diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 2ea2ec0..44c2e08 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -271,7 +271,7 @@ void __init numa_initmem_init(unsigned long start_pfn, 
unsigned long end_pfn)
 
 #ifdef CONFIG_ACPI_NUMA
     if ( !numa_off &&
-         !acpi_scan_nodes(pfn_to_paddr(start_pfn), pfn_to_paddr(end_pfn)) )
+         !numa_scan_nodes(pfn_to_paddr(start_pfn), pfn_to_paddr(end_pfn)) )
         return;
 #endif
 
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 8d91f6c..78af0d2 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -957,7 +957,7 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t 
pxm)
 
     if ( !srat_disabled() )
     {
-        nodeid_t node = setup_node(pxm);
+        nodeid_t node = acpi_setup_node(pxm);
 
         if ( node == NUMA_NO_NODE )
         {
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 42cca5a..03bc37d 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -85,6 +85,14 @@ static inline bool node_found(unsigned int idx, unsigned int 
pxm)
                (pxm2node[idx].node != NUMA_NO_NODE));
 }
 
+static void reset_pxm2node(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
+               pxm2node[i].node = NUMA_NO_NODE;
+}
+
 nodeid_t pxm_to_node(unsigned int pxm)
 {
        unsigned int i;
@@ -99,7 +107,7 @@ nodeid_t pxm_to_node(unsigned int pxm)
        return NUMA_NO_NODE;
 }
 
-nodeid_t setup_node(unsigned int pxm)
+nodeid_t acpi_setup_node(unsigned int pxm)
 {
        nodeid_t node;
        unsigned int idx;
@@ -188,15 +196,14 @@ static void __init cutoff_node(nodeid_t i, paddr_t start, 
paddr_t end)
        }
 }
 
-static void __init bad_srat(void)
+static void __init numa_failed(void)
 {
        int i;
        printk(KERN_ERR "SRAT: SRAT not used.\n");
        acpi_numa = -1;
        for (i = 0; i < MAX_LOCAL_APIC; i++)
                apicid_to_node[i] = NUMA_NO_NODE;
-       for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
-               pxm2node[i].node = NUMA_NO_NODE;
+       reset_pxm2node();
        mem_hotplug = 0;
 }
 
@@ -252,7 +259,7 @@ acpi_numa_x2apic_affinity_init(const struct 
acpi_srat_x2apic_cpu_affinity *pa)
        if (srat_disabled())
                return;
        if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
-               bad_srat();
+               numa_failed();
                return;
        }
        if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
@@ -263,9 +270,9 @@ acpi_numa_x2apic_affinity_init(const struct 
acpi_srat_x2apic_cpu_affinity *pa)
        }
 
        pxm = pa->proximity_domain;
-       node = setup_node(pxm);
+       node = acpi_setup_node(pxm);
        if (node == NUMA_NO_NODE) {
-               bad_srat();
+               numa_failed();
                return;
        }
 
@@ -286,7 +293,7 @@ acpi_numa_processor_affinity_init(const struct 
acpi_srat_cpu_affinity *pa)
        if (srat_disabled())
                return;
        if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
-               bad_srat();
+               numa_failed();
                return;
        }
        if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
@@ -297,9 +304,9 @@ acpi_numa_processor_affinity_init(const struct 
acpi_srat_cpu_affinity *pa)
                pxm |= pa->proximity_domain_hi[1] << 16;
                pxm |= pa->proximity_domain_hi[2] << 24;
        }
-       node = setup_node(pxm);
+       node = acpi_setup_node(pxm);
        if (node == NUMA_NO_NODE) {
-               bad_srat();
+               numa_failed();
                return;
        }
        apicid_to_node[pa->apic_id] = node;
@@ -322,7 +329,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
        if (srat_disabled())
                return;
        if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
-               bad_srat();
+               numa_failed();
                return;
        }
        if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
@@ -332,7 +339,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
        {
                dprintk(XENLOG_WARNING,
                 "Too many numa entry, try bigger NR_NODE_MEMBLKS \n");
-               bad_srat();
+               numa_failed();
                return;
        }
 
@@ -341,9 +348,9 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
        pxm = ma->proximity_domain;
        if (srat_rev < 2)
                pxm &= 0xff;
-       node = setup_node(pxm);
+       node = acpi_setup_node(pxm);
        if (node == NUMA_NO_NODE) {
-               bad_srat();
+               numa_failed();
                return;
        }
        /* It is fine to add this area to the nodes data it will be used later*/
@@ -360,7 +367,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
                       mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end,
                       memblk->start, memblk->end);
                if (mismatch) {
-                       bad_srat();
+                       numa_failed();
                        return;
                }
        } else {
@@ -370,7 +377,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
                       "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u 
(%"PRIx64"-%"PRIx64")\n",
                       pxm, start, end, node_to_pxm(get_memblk_nodeid(i)),
                       memblk->start, memblk->end);
-               bad_srat();
+               numa_failed();
                return;
        }
        if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
@@ -392,7 +399,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
 
        if (numa_add_memblk(node, start, ma->length)) {
                printk(KERN_ERR "SRAT: node-id %u out of range\n", node);
-               bad_srat();
+               numa_failed();
                return;
        }
 
@@ -405,7 +412,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
 
 /* Sanity check to catch more bad SRATs (they are amazingly common).
    Make sure the PXMs cover all memory. */
-static int __init nodes_cover_memory(void)
+static bool __init arch_sanitize_nodes_memory(void)
 {
        int i;
 
@@ -443,10 +450,10 @@ static int __init nodes_cover_memory(void)
                if (start < end) {
                        printk(KERN_ERR "SRAT: No PXM for e820 range: "
                                "%016Lx - %016Lx\n", start, end);
-                       return 0;
+                       return false;
                }
        }
-       return 1;
+       return true;
 }
 
 void __init acpi_numa_arch_fixup(void) {}
@@ -503,7 +510,7 @@ void __init srat_parse_regions(paddr_t addr)
 }
 
 /* Use the information discovered above to actually set up the nodes. */
-int __init acpi_scan_nodes(paddr_t start, paddr_t end)
+int __init numa_scan_nodes(paddr_t start, paddr_t end)
 {
        unsigned int i;
        nodemask_t all_nodes_parsed;
@@ -517,8 +524,8 @@ int __init acpi_scan_nodes(paddr_t start, paddr_t end)
        if (acpi_numa <= 0)
                return -1;
 
-       if (!nodes_cover_memory()) {
-               bad_srat();
+       if (!arch_sanitize_nodes_memory()) {
+               numa_failed();
                return -1;
        }
 
@@ -529,7 +536,7 @@ int __init acpi_scan_nodes(paddr_t start, paddr_t end)
                memnode_shift = 0;
                printk(KERN_ERR
                     "SRAT: No NUMA node hash function found. Contact 
maintainer\n");
-               bad_srat();
+               numa_failed();
                return -1;
        }
 
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index aa1b94f..a4ffa1f 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1368,7 +1368,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     if ( !mem_hotadd_check(spfn, epfn) )
         return -EINVAL;
 
-    if ( (node = setup_node(pxm)) == NUMA_NO_NODE )
+    if ( (node = acpi_setup_node(pxm)) == NUMA_NO_NODE )
         return -EINVAL;
 
     if ( !valid_numa_range(spfn << PAGE_SHIFT, epfn << PAGE_SHIFT, node) )
diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h
index a6fad1e..220c2d7 100644
--- a/xen/include/asm-x86/acpi.h
+++ b/xen/include/asm-x86/acpi.h
@@ -104,7 +104,7 @@ extern void acpi_reserve_bootmem(void);
 #define ARCH_HAS_POWER_INIT    1
 
 extern s8 acpi_numa;
-extern int acpi_scan_nodes(paddr_t start, paddr_t end);
+extern int numa_scan_nodes(paddr_t start, paddr_t end);
 
 #ifdef CONFIG_ACPI_SLEEP
 
diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h
index 1bac25c..acf509c 100644
--- a/xen/include/asm-x86/numa.h
+++ b/xen/include/asm-x86/numa.h
@@ -36,7 +36,7 @@ extern bool numa_off;
 
 extern int srat_disabled(void);
 extern void numa_set_node(int cpu, nodeid_t node);
-extern nodeid_t setup_node(unsigned int pxm);
+extern nodeid_t acpi_setup_node(unsigned int pxm);
 extern void srat_detect_node(int cpu);
 
 extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.