[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common
On Wed, 11 Aug 2021, Wei Chen wrote: > After the previous patches preparations, numa_scan_nodes can be > used by Arm and x86. So we move this function from x86 to common. > As node_cover_memory will not be used cross files, we restore its > static attribute in this patch. > > Signed-off-by: Wei Chen <wei.chen@xxxxxxx> > --- > xen/arch/x86/srat.c | 52 ------------------------------------ > xen/common/numa.c | 54 +++++++++++++++++++++++++++++++++++++- > xen/include/asm-x86/acpi.h | 3 --- > xen/include/xen/numa.h | 3 ++- > 4 files changed, 55 insertions(+), 57 deletions(-) > > diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c > index c979939fdd..c9f019c307 100644 > --- a/xen/arch/x86/srat.c > +++ b/xen/arch/x86/srat.c > @@ -361,58 +361,6 @@ void __init srat_parse_regions(u64 addr) > pfn_pdx_hole_setup(mask >> PAGE_SHIFT); > } > > -/* Use the information discovered above to actually set up the nodes. */ > -int __init numa_scan_nodes(u64 start, u64 end) > -{ > - int i; > - nodemask_t all_nodes_parsed; > - > - /* First clean up the node list */ > - for (i = 0; i < MAX_NUMNODES; i++) > - cutoff_node(i, start, end); > - > -#ifdef CONFIG_ACPI_NUMA > - if (acpi_numa <= 0) > - return -1; > -#endif > - > - if (!nodes_cover_memory()) { > - bad_srat(); > - return -1; > - } > - > - memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, > - memblk_nodeid); > - > - if (memnode_shift < 0) { > - printk(KERN_ERR > - "SRAT: No NUMA node hash function found. Contact > maintainer\n"); > - bad_srat(); > - return -1; > - } > - > - nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed); > - > - /* Finally register nodes */ > - for_each_node_mask(i, all_nodes_parsed) > - { > - u64 size = nodes[i].end - nodes[i].start; > - if ( size == 0 ) > - printk(KERN_WARNING "SRAT: Node %u has no memory. " > - "BIOS Bug or mis-configured hardware?\n", i); > - > - setup_node_bootmem(i, nodes[i].start, nodes[i].end); > - } > - for (i = 0; i < nr_cpu_ids; i++) { > - if (cpu_to_node[i] == NUMA_NO_NODE) > - continue; > - if (!nodemask_test(cpu_to_node[i], &processor_nodes_parsed)) > - numa_set_node(i, NUMA_NO_NODE); > - } > - numa_init_array(); > - return 0; > -} > - > static unsigned node_to_pxm(nodeid_t n) > { > unsigned i; > diff --git a/xen/common/numa.c b/xen/common/numa.c > index 4152bbe83b..8ca13e27d1 100644 > --- a/xen/common/numa.c > +++ b/xen/common/numa.c > @@ -195,7 +195,7 @@ void __init cutoff_node(int i, u64 start, u64 end) > > /* Sanity check to catch more bad SRATs (they are amazingly common). > Make sure the PXMs cover all memory. */ > -int __init nodes_cover_memory(void) > +static int __init nodes_cover_memory(void) > { > int i; > uint32_t nr_banks = arch_meminfo_get_nr_bank(); > @@ -271,6 +271,58 @@ void __init numa_init_array(void) > } > } > > +/* Use the information discovered above to actually set up the nodes. */ > +int __init numa_scan_nodes(u64 start, u64 end) > +{ > + int i; > + nodemask_t all_nodes_parsed; > + > + /* First clean up the node list */ > + for (i = 0; i < MAX_NUMNODES; i++) > + cutoff_node(i, start, end); > + > +#ifdef CONFIG_ACPI_NUMA > + if (acpi_numa <= 0) > + return -1; > +#endif > + > + if (!nodes_cover_memory()) { > + bad_srat(); > + return -1; > + } > + > + memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, > + memblk_nodeid); > + > + if (memnode_shift < 0) { > + printk(KERN_ERR > + "SRAT: No NUMA node hash function found. Contact > maintainer\n"); > + bad_srat(); > + return -1; > + } > + > + nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed); > + > + /* Finally register nodes */ > + for_each_node_mask(i, all_nodes_parsed) > + { > + u64 size = nodes[i].end - nodes[i].start; > + if ( size == 0 ) > + printk(KERN_WARNING "SRAT: Node %u has no memory. " > + "BIOS Bug or mis-configured hardware?\n", i); Not all archs have a BIOS so I'd say "firmware bug". Like last time, we usually don't do code changes together with code movement, but in this case it might be OK. I am also happy with a separate patch to adjust the two comments (this one and the one in the previous patch).
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |