[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.4] x86/NUMA: don't account hotplug regions
commit cfb5d2001784dfdec638ba335fd9252f5833ee2d Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu Sep 10 15:54:13 2015 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Sep 10 15:54:13 2015 +0200 x86/NUMA: don't account hotplug regions ... except in cases where they really matter: node_memblk_range[] now is the only place all regions get stored. nodes[] and NODE_DATA() track present memory only. This improves the reporting when nodes have disjoint "normal" and hotplug regions, with the hotplug region sitting above the highest populated page. In such cases a node's spanned-pages value (visible in both XEN_SYSCTL_numainfo and 'u' debug key output) covered all the way up to top of populated memory, giving quite different a picture from what an otherwise identically configured system without and hotplug regions would report. Note, however, that the actual hotplug case (as well as cases of nodes with multiple disjoint present regions) is still not being handled such that the reported values would represent how much memory a node really has (but that can be considered intentional). Reported-by: Jim Fehlig <jfehlig@xxxxxxxx> This at once makes nodes_cover_memory() no longer consider E820_RAM regions covered by SRAT hotplug regions. Also reject self-overlaps with mismatching hotplug flags. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Tested-by: Jim Fehlig <jfehlig@xxxxxxxx> master commit: c011f470e6e79208f5baa071b4d072b78c88e2ba master date: 2015-08-31 13:52:24 +0200 --- xen/arch/x86/srat.c | 56 +++++++++++++++++++++++++++++++------------------- 1 files changed, 35 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c index 78ea7c0..d4e479c 100644 --- a/xen/arch/x86/srat.c +++ b/xen/arch/x86/srat.c @@ -32,7 +32,7 @@ static u8 __read_mostly pxm2node[256] = { [0 ... 255] = NUMA_NO_NODE }; static int num_node_memblks; static struct node node_memblk_range[NR_NODE_MEMBLKS]; static int memblk_nodeid[NR_NODE_MEMBLKS]; - +static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS); static int node_to_pxm(int n); @@ -89,9 +89,9 @@ static __init int conflicting_memblks(u64 start, u64 end) if (nd->start == nd->end) continue; if (nd->end > start && nd->start < end) - return memblk_nodeid[i]; + return i; if (nd->end == end && nd->start == start) - return memblk_nodeid[i]; + return i; } return -1; } @@ -229,7 +229,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { - struct node *nd; u64 start, end; int node, pxm; int i; @@ -263,30 +262,40 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) } /* It is fine to add this area to the nodes data it will be used later*/ i = conflicting_memblks(start, end); - if (i == node) { - printk(KERN_WARNING - "SRAT: Warning: PXM %d (%"PRIx64"-%"PRIx64") overlaps with itself (%" - PRIx64"-%"PRIx64")\n", pxm, start, end, nodes[i].start, nodes[i].end); - } else if (i >= 0) { + if (i < 0) + /* everything fine */; + else if (memblk_nodeid[i] == node) { + bool_t mismatch = !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) != + !test_bit(i, memblk_hotplug); + + printk("%sSRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with itself (%"PRIx64"-%"PRIx64")\n", + mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end, + node_memblk_range[i].start, node_memblk_range[i].end); + if (mismatch) { + bad_srat(); + return; + } + } else { printk(KERN_ERR - "SRAT: PXM %d (%"PRIx64"-%"PRIx64") overlaps with PXM %d (%" - PRIx64"-%"PRIx64")\n", pxm, start, end, node_to_pxm(i), - nodes[i].start, nodes[i].end); + "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u (%"PRIx64"-%"PRIx64")\n", + pxm, start, end, node_to_pxm(memblk_nodeid[i]), + node_memblk_range[i].start, node_memblk_range[i].end); bad_srat(); return; } - nd = &nodes[node]; - if (!node_test_and_set(node, memory_nodes_parsed)) { - nd->start = start; - nd->end = end; - } else { - if (start < nd->start) + if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { + struct node *nd = &nodes[node]; + + if (!node_test_and_set(node, memory_nodes_parsed)) { nd->start = start; - if (nd->end < end) nd->end = end; + } else { + if (start < nd->start) + nd->start = start; + if (nd->end < end) + nd->end = end; + } } - if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && end > mem_hotplug) - mem_hotplug = end; printk(KERN_INFO "SRAT: Node %u PXM %u %"PRIx64"-%"PRIx64"%s\n", node, pxm, start, end, ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " (hotplug)" : ""); @@ -294,6 +303,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) node_memblk_range[num_node_memblks].start = start; node_memblk_range[num_node_memblks].end = end; memblk_nodeid[num_node_memblks] = node; + if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { + __set_bit(num_node_memblks, memblk_hotplug); + if (end > mem_hotplug) + mem_hotplug = end; + } num_node_memblks++; } -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.4 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |