[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH v2 07/25] x86: NUMA: Rename some generic functions
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx> Rename some function in ACPI code as follow - Rename setup_node to acpi_setup_node - Rename bad_srat to numa_failed - Rename nodes_cover_memory to arch_sanitize_nodes_memory - Rename acpi_scan_nodes to numa_scan_nodes Also introduce reset_pxm2node() to reset pxm2node variable. This avoids exporting pxm2node. Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx> --- xen/arch/x86/numa.c | 2 +- xen/arch/x86/smpboot.c | 2 +- xen/arch/x86/srat.c | 51 ++++++++++++++++++++++++++-------------------- xen/arch/x86/x86_64/mm.c | 2 +- xen/include/asm-x86/acpi.h | 2 +- xen/include/asm-x86/numa.h | 2 +- 6 files changed, 34 insertions(+), 27 deletions(-) diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c index 0888d53..3bdab9a 100644 --- a/xen/arch/x86/numa.c +++ b/xen/arch/x86/numa.c @@ -298,7 +298,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) #endif #ifdef CONFIG_ACPI_NUMA - if ( !is_numa_off() && !acpi_scan_nodes((uint64_t)start_pfn << PAGE_SHIFT, + if ( !is_numa_off() && !numa_scan_nodes((uint64_t)start_pfn << PAGE_SHIFT, (uint64_t)end_pfn << PAGE_SHIFT) ) return; #endif diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 82559ed..203733e 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -959,7 +959,7 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) if ( !srat_disabled() ) { - nodeid_t node = setup_node(pxm); + nodeid_t node = acpi_setup_node(pxm); if ( node == NUMA_NO_NODE ) { diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c index 983e1d8..3ade36d 100644 --- a/xen/arch/x86/srat.c +++ b/xen/arch/x86/srat.c @@ -85,6 +85,14 @@ static inline bool node_found(unsigned int idx, unsigned int pxm) (pxm2node[idx].node != NUMA_NO_NODE)); } +static void reset_pxm2node(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pxm2node); i++) + pxm2node[i].node = NUMA_NO_NODE; +} + nodeid_t pxm_to_node(unsigned int pxm) { unsigned int i; @@ -99,7 +107,7 @@ nodeid_t pxm_to_node(unsigned int pxm) return NUMA_NO_NODE; } -nodeid_t setup_node(unsigned pxm) +nodeid_t acpi_setup_node(unsigned int pxm) { nodeid_t node; unsigned int idx; @@ -188,15 +196,14 @@ static void __init cutoff_node(int i, paddr_t start, paddr_t end) } } -static void __init bad_srat(void) +static void __init numa_failed(void) { int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); set_acpi_numa(0); for (i = 0; i < MAX_LOCAL_APIC; i++) apicid_to_node[i] = NUMA_NO_NODE; - for (i = 0; i < ARRAY_SIZE(pxm2node); i++) - pxm2node[i].node = NUMA_NO_NODE; + reset_pxm2node(); mem_hotplug = 0; } @@ -252,7 +259,7 @@ acpi_numa_x2apic_affinity_init(const struct acpi_srat_x2apic_cpu_affinity *pa) if (srat_disabled()) return; if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { - bad_srat(); + numa_failed(); return; } if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) @@ -263,9 +270,9 @@ acpi_numa_x2apic_affinity_init(const struct acpi_srat_x2apic_cpu_affinity *pa) } pxm = pa->proximity_domain; - node = setup_node(pxm); + node = acpi_setup_node(pxm); if (node == NUMA_NO_NODE) { - bad_srat(); + numa_failed(); return; } @@ -286,7 +293,7 @@ acpi_numa_processor_affinity_init(const struct acpi_srat_cpu_affinity *pa) if (srat_disabled()) return; if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { - bad_srat(); + numa_failed(); return; } if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) @@ -297,9 +304,9 @@ acpi_numa_processor_affinity_init(const struct acpi_srat_cpu_affinity *pa) pxm |= pa->proximity_domain_hi[1] << 16; pxm |= pa->proximity_domain_hi[2] << 24; } - node = setup_node(pxm); + node = acpi_setup_node(pxm); if (node == NUMA_NO_NODE) { - bad_srat(); + numa_failed(); return; } apicid_to_node[pa->apic_id] = node; @@ -322,7 +329,7 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) if (srat_disabled()) return; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { - bad_srat(); + numa_failed(); return; } if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) @@ -332,7 +339,7 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) { dprintk(XENLOG_WARNING, "Too many numa entry, try bigger NR_NODE_MEMBLKS \n"); - bad_srat(); + numa_failed(); return; } @@ -341,9 +348,9 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) pxm = ma->proximity_domain; if (srat_rev < 2) pxm &= 0xff; - node = setup_node(pxm); + node = acpi_setup_node(pxm); if (node == NUMA_NO_NODE) { - bad_srat(); + numa_failed(); return; } /* It is fine to add this area to the nodes data it will be used later*/ @@ -360,7 +367,7 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end, memblk->start, memblk->end); if (mismatch) { - bad_srat(); + numa_failed(); return; } } else { @@ -370,7 +377,7 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u (%"PRIx64"-%"PRIx64")\n", pxm, start, end, node_to_pxm(get_memblk_nodeid(i)), memblk->start, memblk->end); - bad_srat(); + numa_failed(); return; } if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { @@ -392,7 +399,7 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) if (numa_add_memblk(node, start, ma->length)) { printk(KERN_ERR "SRAT: node-id %u out of range\n", node); - bad_srat(); + numa_failed(); return; } @@ -405,7 +412,7 @@ acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma) /* Sanity check to catch more bad SRATs (they are amazingly common). Make sure the PXMs cover all memory. */ -static int __init nodes_cover_memory(void) +static int __init arch_sanitize_nodes_memory(void) { int i; @@ -503,7 +510,7 @@ void __init srat_parse_regions(uint64_t addr) } /* Use the information discovered above to actually set up the nodes. */ -int __init acpi_scan_nodes(uint64_t start, uint64_t end) +int __init numa_scan_nodes(uint64_t start, uint64_t end) { int i; nodemask_t all_nodes_parsed; @@ -517,8 +524,8 @@ int __init acpi_scan_nodes(uint64_t start, uint64_t end) if (get_acpi_numa() == 0) return -1; - if (!nodes_cover_memory()) { - bad_srat(); + if (!arch_sanitize_nodes_memory()) { + numa_failed(); return -1; } @@ -529,7 +536,7 @@ int __init acpi_scan_nodes(uint64_t start, uint64_t end) memnode_shift = 0; printk(KERN_ERR "SRAT: No NUMA node hash function found. Contact maintainer\n"); - bad_srat(); + numa_failed(); return -1; } diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 34f3250..f0082e1 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -1369,7 +1369,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm) if ( !mem_hotadd_check(spfn, epfn) ) return -EINVAL; - if ( (node = setup_node(pxm)) == NUMA_NO_NODE ) + if ( (node = acpi_setup_node(pxm)) == NUMA_NO_NODE ) return -EINVAL; if ( !valid_numa_range(spfn << PAGE_SHIFT, epfn << PAGE_SHIFT, node) ) diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h index 9298d42..445b8e5 100644 --- a/xen/include/asm-x86/acpi.h +++ b/xen/include/asm-x86/acpi.h @@ -103,7 +103,7 @@ extern void acpi_reserve_bootmem(void); #define ARCH_HAS_POWER_INIT 1 -extern int acpi_scan_nodes(u64 start, u64 end); +extern int numa_scan_nodes(u64 start, u64 end); #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) #ifdef CONFIG_ACPI_SLEEP diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h index ae5768b..7237ad1 100644 --- a/xen/include/asm-x86/numa.h +++ b/xen/include/asm-x86/numa.h @@ -32,7 +32,7 @@ extern void numa_add_cpu(int cpu); extern void numa_init_array(void); extern bool srat_disabled(void); extern void numa_set_node(int cpu, nodeid_t node); -extern nodeid_t setup_node(unsigned int pxm); +extern nodeid_t acpi_setup_node(unsigned int pxm); extern void srat_detect_node(int cpu); extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end); -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |