[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v2 19/25] ACPI: Refactor acpi SRAT and SLIT table handling code



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

Move SRAT handling code which is common across
architecture is moved to new file xen/drivers/acpi/srat.c
from xen/arch/x86/srat.c file. New header file srat.h is
introduced.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>
---
 xen/arch/x86/dom0_build.c           |   1 +
 xen/arch/x86/mm.c                   |   2 -
 xen/arch/x86/physdev.c              |   1 +
 xen/arch/x86/setup.c                |   1 +
 xen/arch/x86/smpboot.c              |   1 +
 xen/arch/x86/srat.c                 | 250 +-----------------------------
 xen/arch/x86/x86_64/mm.c            |   1 +
 xen/drivers/acpi/Makefile           |   1 +
 xen/drivers/acpi/srat.c             | 299 ++++++++++++++++++++++++++++++++++++
 xen/drivers/passthrough/vtd/iommu.c |   1 +
 xen/include/acpi/srat.h             |  24 +++
 xen/include/asm-x86/mm.h            |   1 -
 xen/include/asm-x86/numa.h          |   4 -
 xen/include/xen/mm.h                |   2 +
 xen/include/xen/numa.h              |   1 -
 15 files changed, 333 insertions(+), 257 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 20221b5..c131a81 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -11,6 +11,7 @@
 #include <xen/sched.h>
 #include <xen/sched-if.h>
 #include <xen/softirq.h>
+#include <acpi/srat.h>
 
 #include <asm/dom0_build.h>
 #include <asm/hpet.h>
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a6b2649..ebabb0c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -137,8 +137,6 @@ l1_pgentry_t __section(".bss.page_aligned") 
__aligned(PAGE_SIZE)
 #define PTE_UPDATE_WITH_CMPXCHG
 #endif
 
-paddr_t __read_mostly mem_hotplug;
-
 /* Private domain structs for DOMID_XEN and DOMID_IO. */
 struct domain *dom_xen, *dom_io, *dom_cow;
 
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 81cd6c9..ecc0daf 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -8,6 +8,7 @@
 #include <xen/guest_access.h>
 #include <xen/iocap.h>
 #include <xen/serial.h>
+#include <acpi/srat.h>
 #include <asm/current.h>
 #include <asm/io_apic.h>
 #include <asm/msi.h>
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 4410e53..d29fd1a 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -27,6 +27,7 @@
 #include <xen/tmem_xen.h>
 #include <xen/virtual_region.h>
 #include <xen/watchdog.h>
+#include <acpi/srat.h>
 #include <public/version.h>
 #include <compat/platform.h>
 #include <compat/xen.h>
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 203733e..7dc06e4 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -33,6 +33,7 @@
 #include <xen/serial.h>
 #include <xen/numa.h>
 #include <xen/cpu.h>
+#include <acpi/srat.h>
 #include <asm/current.h>
 #include <asm/mc146818rtc.h>
 #include <asm/desc.h>
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 55947bb..760df7f 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -18,14 +18,12 @@
 #include <xen/acpi.h>
 #include <xen/numa.h>
 #include <xen/pfn.h>
+#include <acpi/srat.h>
 #include <asm/e820.h>
 #include <asm/page.h>
 
-static struct acpi_table_slit *__read_mostly acpi_slit;
-
 extern nodemask_t processor_nodes_parsed;
 extern nodemask_t memory_nodes_parsed;
-
 /*
  * Keep BIOS's CPU2node information, should not be used for memory allocaion
  */
@@ -33,87 +31,6 @@ nodeid_t apicid_to_node[MAX_LOCAL_APIC] = {
     [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 };
 
-struct pxm2node {
-       unsigned int pxm;
-       nodeid_t node;
-};
-static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] =
-       { [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE} };
-
-static unsigned node_to_pxm(nodeid_t n);
-
-static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS);
-
-static inline bool node_found(unsigned int idx, unsigned int pxm)
-{
-       return ((pxm2node[idx].pxm == pxm) &&
-               (pxm2node[idx].node != NUMA_NO_NODE));
-}
-
-static void reset_pxm2node(void)
-{
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
-               pxm2node[i].node = NUMA_NO_NODE;
-}
-
-nodeid_t pxm_to_node(unsigned int pxm)
-{
-       unsigned int i;
-
-       if ((pxm < ARRAY_SIZE(pxm2node)) && node_found(pxm, pxm))
-               return pxm2node[pxm].node;
-
-       for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
-               if (node_found(i, pxm))
-                       return pxm2node[i].node;
-
-       return NUMA_NO_NODE;
-}
-
-nodeid_t acpi_setup_node(unsigned int pxm)
-{
-       nodeid_t node;
-       unsigned int idx;
-       static bool warned;
-       static unsigned int nodes_found;
-
-       BUILD_BUG_ON(MAX_NUMNODES >= NUMA_NO_NODE);
-
-       if (pxm < ARRAY_SIZE(pxm2node)) {
-               if (node_found(pxm, pxm))
-                       return pxm2node[pxm].node;
-
-               /* Try to maintain indexing of pxm2node by pxm */
-               if (pxm2node[pxm].node == NUMA_NO_NODE) {
-                       idx = pxm;
-                       goto finish;
-               }
-       }
-
-       for (idx = 0; idx < ARRAY_SIZE(pxm2node); idx++)
-               if (pxm2node[idx].node == NUMA_NO_NODE)
-                       goto finish;
-
-       if (!warned) {
-               printk(KERN_WARNING "SRAT: Too many proximity domains (%#x)\n",
-                      pxm);
-               warned = 1;
-       }
-
-       return NUMA_NO_NODE;
-
- finish:
-       node = nodes_found++;
-       if (node >= MAX_NUMNODES)
-               return NUMA_NO_NODE;
-       pxm2node[idx].pxm = pxm;
-       pxm2node[idx].node = node;
-
-       return node;
-}
-
 void __init numa_failed(void)
 {
        int i;
@@ -125,48 +42,6 @@ void __init numa_failed(void)
        mem_hotplug = 0;
 }
 
-/*
- * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
- * up the NUMA heuristics which wants the local node to have a smaller
- * distance than the others.
- * Do some quick checks here and only use the SLIT if it passes.
- */
-static int __init slit_valid(struct acpi_table_slit *slit)
-{
-       int i, j;
-       int d = slit->locality_count;
-       for (i = 0; i < d; i++) {
-               for (j = 0; j < d; j++)  {
-                       uint8_t val = slit->entry[d*i + j];
-                       if (i == j) {
-                               if (val != LOCAL_DISTANCE)
-                                       return 0;
-                       } else if (val <= LOCAL_DISTANCE)
-                               return 0;
-               }
-       }
-       return 1;
-}
-
-/* Callback for SLIT parsing */
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
-{
-       unsigned long mfn;
-       if (!slit_valid(slit)) {
-               printk(KERN_INFO "ACPI: SLIT table looks invalid. "
-                      "Not used.\n");
-               return;
-       }
-       mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1);
-       if (!mfn) {
-               printk(KERN_ERR "ACPI: Unable to allocate memory for "
-                      "saving ACPI SLIT numa information.\n");
-               return;
-       }
-       acpi_slit = mfn_to_virt(mfn);
-       memcpy(acpi_slit, slit, slit->header.length);
-}
-
 /* Callback for Proximity Domain -> x2APIC mapping */
 void __init
 acpi_numa_x2apic_affinity_init(const struct acpi_srat_x2apic_cpu_affinity *pa)
@@ -234,100 +109,6 @@ acpi_numa_processor_affinity_init(const struct 
acpi_srat_cpu_affinity *pa)
               pxm, pa->apic_id, node);
 }
 
-/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-void __init
-acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma)
-{
-       uint64_t start, end;
-       unsigned pxm;
-       nodeid_t node;
-       int i;
-       struct node *memblk;
-
-       if (srat_disabled())
-               return;
-       if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
-               numa_failed();
-               return;
-       }
-       if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
-               return;
-
-       if (get_num_node_memblks() >= NR_NODE_MEMBLKS)
-       {
-               dprintk(XENLOG_WARNING,
-                "Too many numa entry, try bigger NR_NODE_MEMBLKS \n");
-               numa_failed();
-               return;
-       }
-
-       start = ma->base_address;
-       end = start + ma->length;
-       pxm = ma->proximity_domain;
-       if (srat_rev < 2)
-               pxm &= 0xff;
-       node = acpi_setup_node(pxm);
-       if (node == NUMA_NO_NODE) {
-               numa_failed();
-               return;
-       }
-       /* It is fine to add this area to the nodes data it will be used later*/
-       i = conflicting_memblks(start, end);
-       if (i < 0)
-               /* everything fine */;
-       else if (get_memblk_nodeid(i) == node) {
-               bool mismatch = !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) !=
-                               !test_bit(i, memblk_hotplug);
-
-               memblk = get_node_memblk_range(i);
-
-               printk("%sSRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with 
itself (%"PRIx64"-%"PRIx64")\n",
-                      mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end,
-                      memblk->start, memblk->end);
-               if (mismatch) {
-                       numa_failed();
-                       return;
-               }
-       } else {
-               memblk = get_node_memblk_range(i);
-
-               printk(KERN_ERR
-                      "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u 
(%"PRIx64"-%"PRIx64")\n",
-                      pxm, start, end, node_to_pxm(get_memblk_nodeid(i)),
-                      memblk->start, memblk->end);
-               numa_failed();
-               return;
-       }
-       if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
-               struct node *nd = get_numa_node(node);
-
-               if (!node_test_and_set(node, memory_nodes_parsed)) {
-                       nd->start = start;
-                       nd->end = end;
-               } else {
-                       if (start < nd->start)
-                               nd->start = start;
-                       if (nd->end < end)
-                               nd->end = end;
-               }
-       }
-       printk(KERN_INFO "SRAT: Node %u PXM %u %"PRIx64"-%"PRIx64"%s\n",
-              node, pxm, start, end,
-              ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " (hotplug)" : "");
-
-       if (numa_add_memblk(node, start, ma->length)) {
-               printk(KERN_ERR "SRAT: node-id %u out of range\n", node);
-               numa_failed();
-               return;
-       }
-
-       if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
-               __set_bit(get_num_node_memblks(), memblk_hotplug);
-               if (end > mem_hotplug)
-                       mem_hotplug = end;
-       }
-}
-
 /* Sanity check to catch more bad SRATs (they are amazingly common).
    Make sure the PXMs cover all memory. */
 int __init arch_sanitize_nodes_memory(void)
@@ -427,35 +208,6 @@ void __init srat_parse_regions(uint64_t addr)
        pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
 }
 
-static unsigned node_to_pxm(nodeid_t n)
-{
-       unsigned i;
-
-       if ((n < ARRAY_SIZE(pxm2node)) && (pxm2node[n].node == n))
-               return pxm2node[n].pxm;
-       for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
-               if (pxm2node[i].node == n)
-                       return pxm2node[i].pxm;
-       return 0;
-}
-
-static uint8_t acpi_node_distance(nodeid_t a, nodeid_t b)
-{
-       unsigned index;
-       uint8_t slit_val;
-
-       if (!acpi_slit)
-               return a == b ? LOCAL_DISTANCE : REMOTE_DISTANCE;
-       index = acpi_slit->locality_count * node_to_pxm(a);
-       slit_val = acpi_slit->entry[index + node_to_pxm(b)];
-
-       /* ACPI defines 0xff as an unreachable node and 0-9 are undefined */
-       if ((slit_val == 0xff) || (slit_val <= 9))
-               return NUMA_NO_DISTANCE;
-       else
-               return slit_val;
-}
-
 uint8_t __node_distance(nodeid_t a, nodeid_t b)
 {
        return acpi_node_distance(a, b);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index f0082e1..8d5fd4e 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -27,6 +27,7 @@ asm(".file \"" __FILE__ "\"");
 #include <xen/guest_access.h>
 #include <xen/hypercall.h>
 #include <xen/mem_access.h>
+#include <acpi/srat.h>
 #include <asm/current.h>
 #include <asm/asm_defns.h>
 #include <asm/page.h>
diff --git a/xen/drivers/acpi/Makefile b/xen/drivers/acpi/Makefile
index 444b11d..69edc26 100644
--- a/xen/drivers/acpi/Makefile
+++ b/xen/drivers/acpi/Makefile
@@ -4,6 +4,7 @@ subdir-$(CONFIG_X86) += apei
 
 obj-bin-y += tables.init.o
 obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_NUMA) += srat.o
 obj-y += osl.o
 obj-$(CONFIG_HAS_CPUFREQ) += pmstat.o
 
diff --git a/xen/drivers/acpi/srat.c b/xen/drivers/acpi/srat.c
new file mode 100644
index 0000000..9a68a4b
--- /dev/null
+++ b/xen/drivers/acpi/srat.c
@@ -0,0 +1,299 @@
+/*
+ * ACPI 3.0 based NUMA setup
+ * Copyright 2004 Andi Kleen, SuSE Labs.
+ *
+ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
+ *
+ * Called from acpi_numa_init while reading the SRAT and SLIT tables.
+ * Assumes all memory regions belonging to a single proximity domain
+ * are in one chunk. Holes between them will be included in the node.
+ *
+ * Adapted for Xen: Ryan Harper <ryanh@xxxxxxxxxx>
+ */
+
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/inttypes.h>
+#include <xen/nodemask.h>
+#include <xen/acpi.h>
+#include <xen/numa.h>
+#include <xen/pfn.h>
+#include <acpi/srat.h>
+#include <asm/page.h>
+#include <asm/acpi.h>
+
+paddr_t __read_mostly mem_hotplug;
+extern nodemask_t memory_nodes_parsed;
+static struct acpi_table_slit __read_mostly *acpi_slit;
+static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] =
+    { [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE} };
+
+static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS);
+
+static inline bool_t node_found(unsigned int idx, unsigned int pxm)
+{
+    return ( (pxm2node[idx].pxm == pxm) &&
+        (pxm2node[idx].node != NUMA_NO_NODE) );
+}
+
+void reset_pxm2node(void)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(pxm2node); i++ )
+        pxm2node[i].node = NUMA_NO_NODE;
+}
+
+unsigned node_to_pxm(nodeid_t n)
+{
+    unsigned int i;
+
+    if ( (n < ARRAY_SIZE(pxm2node)) && (pxm2node[n].node == n) )
+        return pxm2node[n].pxm;
+
+    for ( i = 0; i < ARRAY_SIZE(pxm2node); i++ )
+        if ( pxm2node[i].node == n )
+            return pxm2node[i].pxm;
+
+    return 0;
+}
+
+nodeid_t pxm_to_node(unsigned int pxm)
+{
+    unsigned int i;
+
+    if ( (pxm < ARRAY_SIZE(pxm2node)) && node_found(pxm, pxm) )
+        return pxm2node[pxm].node;
+
+    for ( i = 0; i < ARRAY_SIZE(pxm2node); i++ )
+        if ( node_found(i, pxm) )
+            return pxm2node[i].node;
+
+    return NUMA_NO_NODE;
+}
+
+nodeid_t acpi_setup_node(unsigned int pxm)
+{
+    nodeid_t node;
+    unsigned int idx;
+    static bool_t warned;
+    static unsigned int nodes_found;
+
+    BUILD_BUG_ON(MAX_NUMNODES >= NUMA_NO_NODE);
+
+    if ( pxm < ARRAY_SIZE(pxm2node) )
+    {
+        if ( node_found(pxm, pxm) )
+            return pxm2node[pxm].node;
+
+        /* Try to maintain indexing of pxm2node by pxm */
+        if ( pxm2node[pxm].node == NUMA_NO_NODE )
+        {
+            idx = pxm;
+            goto finish;
+        }
+    }
+
+    for ( idx = 0; idx < ARRAY_SIZE(pxm2node); idx++ )
+        if ( pxm2node[idx].node == NUMA_NO_NODE )
+            goto finish;
+
+    if ( !warned )
+    {
+        printk(KERN_WARNING "SRAT: Too many proximity domains (%#x)\n", pxm);
+        warned = 1;
+    }
+
+    return NUMA_NO_NODE;
+
+ finish:
+    node = nodes_found++;
+    if ( node >= MAX_NUMNODES )
+        return NUMA_NO_NODE;
+    pxm2node[idx].pxm = pxm;
+    pxm2node[idx].node = node;
+
+    return node;
+}
+
+/*
+ * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
+ * up the NUMA heuristics which wants the local node to have a smaller
+ * distance than the others.
+ * Do some quick checks here and only use the SLIT if it passes.
+ */
+static __init int slit_valid(struct acpi_table_slit *slit)
+{
+    int i, j;
+    int d = slit->locality_count;
+
+    for ( i = 0; i < d; i++ )
+    {
+        for ( j = 0; j < d; j++ )
+        {
+            uint8_t val = slit->entry[d * i + j];
+
+            if ( i == j )
+            {
+                if ( val != LOCAL_DISTANCE )
+                    return 0;
+            } else if ( val <= LOCAL_DISTANCE )
+                return 0;
+        }
+    }
+
+    return 1;
+}
+
+/* Callback for SLIT parsing */
+void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+{
+    unsigned long mfn;
+
+    if ( !slit_valid(slit) )
+    {
+        printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+        return;
+    }
+
+    mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1);
+    if ( !mfn )
+    {
+        printk(KERN_ERR "ACPI: Unable to allocate memory for "
+               "saving ACPI SLIT numa information.\n");
+        return;
+    }
+    acpi_slit = mfn_to_virt(mfn);
+    memcpy(acpi_slit, slit, slit->header.length);
+}
+
+/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
+void __init
+acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma)
+{
+    uint64_t start, end;
+    unsigned pxm;
+    nodeid_t node;
+    int i;
+    struct node *memblk;
+
+    if ( srat_disabled() )
+        return;
+
+    if ( ma->header.length != sizeof(struct acpi_srat_mem_affinity) )
+    {
+        numa_failed();
+        return;
+    }
+
+    if ( !(ma->flags & ACPI_SRAT_MEM_ENABLED) )
+        return;
+
+    if ( get_num_node_memblks() >= NR_NODE_MEMBLKS )
+    {
+        dprintk(XENLOG_WARNING,
+                "Too many numa entry, try bigger NR_NODE_MEMBLKS \n");
+        numa_failed();
+        return;
+    }
+
+    start = ma->base_address;
+    end = start + ma->length;
+    pxm = ma->proximity_domain;
+    if ( srat_rev < 2 )
+        pxm &= 0xff;
+    node = acpi_setup_node(pxm);
+    if ( node == NUMA_NO_NODE )
+    {
+        numa_failed();
+        return;
+    }
+
+    /* It is fine to add this area to the nodes data it will be used later*/
+    i = conflicting_memblks(start, end);
+    if ( i < 0 )
+        /* everything fine */;
+    else if ( get_memblk_nodeid(i) == node )
+    {
+        bool_t mismatch = !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) !=
+                          !test_bit(i, memblk_hotplug);
+
+        memblk = get_node_memblk_range(i);
+
+        printk("%sSRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with itself 
(%"PRIx64"-%"PRIx64")\n",
+               mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end,
+               memblk->start, memblk->end);
+        if ( mismatch )
+        {
+            numa_failed();
+            return;
+        }
+    }
+    else
+    {
+        memblk = get_node_memblk_range(i);
+
+        printk(KERN_ERR
+               "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u 
(%"PRIx64"-%"PRIx64")\n",
+               pxm, start, end, node_to_pxm(get_memblk_nodeid(i)),
+               memblk->start, memblk->end);
+        numa_failed();
+        return;
+    }
+
+    if ( !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) )
+    {
+        struct node *nd = get_numa_node(node);
+
+        if ( !node_test_and_set(node, memory_nodes_parsed) )
+        {
+            nd->start = start;
+            nd->end = end;
+        }
+        else
+        {
+            if ( start < nd->start )
+                nd->start = start;
+            if ( nd->end < end )
+                nd->end = end;
+        }
+    }
+    printk(KERN_INFO "SRAT: Node %u PXM %u %"PRIx64"-%"PRIx64"%s\n",
+           node, pxm, start, end,
+           ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " (hotplug)" : "");
+
+    numa_add_memblk(node, start, ma->length);
+    if ( ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE )
+    {
+        __set_bit(get_num_node_memblks(), memblk_hotplug);
+        if ( end > mem_hotplug )
+            mem_hotplug = end;
+    }
+}
+
+uint8_t acpi_node_distance(nodeid_t a, nodeid_t b)
+{
+    unsigned index;
+    uint8_t slit_val;
+
+    if ( !acpi_slit )
+        return a == b ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+
+    index = acpi_slit->locality_count * node_to_pxm(a);
+    slit_val = acpi_slit->entry[index + node_to_pxm(b)];
+
+    /* ACPI defines 0xff as an unreachable node and 0-9 are undefined */
+    if ( (slit_val == 0xff) || (slit_val <= 9) )
+        return NUMA_NO_DISTANCE;
+    else
+        return slit_val;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index a5c61c6..d882951 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -30,6 +30,7 @@
 #include <xen/pci.h>
 #include <xen/pci_regs.h>
 #include <xen/keyhandler.h>
+#include <acpi/srat.h>
 #include <asm/msi.h>
 #include <asm/irq.h>
 #include <asm/hvm/vmx/vmx.h>
diff --git a/xen/include/acpi/srat.h b/xen/include/acpi/srat.h
new file mode 100644
index 0000000..c630ae9
--- /dev/null
+++ b/xen/include/acpi/srat.h
@@ -0,0 +1,24 @@
+#ifndef __XEN_SRAT_H__
+#define __XEN_SRAT_H__
+
+extern int srat_rev;
+struct pxm2node {
+    unsigned int pxm;
+    nodeid_t node;
+};
+
+extern nodeid_t pxm_to_node(unsigned pxm);
+extern nodeid_t acpi_setup_node(unsigned pxm);
+extern unsigned int node_to_pxm(nodeid_t n);
+extern uint8_t acpi_node_distance(nodeid_t a, nodeid_t b);
+extern void reset_pxm2node(void);
+#endif /* __XEN_SRAT_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index e22603c..be0e0d4 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -405,7 +405,6 @@ static inline int get_page_and_type(struct page_info *page,
 int check_descriptor(const struct domain *, struct desc_struct *d);
 
 extern bool_t opt_allow_superpage;
-extern paddr_t mem_hotplug;
 
 /******************************************************************************
  * With shadow pagetables, the different kinds of address start 
diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h
index 7cff220..d8fa67c 100644
--- a/xen/include/asm-x86/numa.h
+++ b/xen/include/asm-x86/numa.h
@@ -7,8 +7,6 @@
 
 typedef uint8_t nodeid_t;
 
-extern int srat_rev;
-
 extern nodeid_t      cpu_to_node[NR_CPUS];
 extern cpumask_t     node_to_cpumask[];
 
@@ -17,8 +15,6 @@ extern cpumask_t     node_to_cpumask[];
 #define node_to_first_cpu(node)  (__ffs(node_to_cpumask[node]))
 #define node_to_cpumask(node)    (node_to_cpumask[node])
 
-extern nodeid_t pxm_to_node(unsigned int pxm);
-
 #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
 
 extern void numa_add_cpu(int cpu);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 88de3c1..61d059d 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -63,6 +63,8 @@ TYPE_SAFE(unsigned long, mfn);
 #undef mfn_t
 #endif
 
+extern paddr_t mem_hotplug;
+
 static inline mfn_t mfn_add(mfn_t mfn, unsigned long i)
 {
     return _mfn(mfn_x(mfn) + i);
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index b40a841..851f4a7 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -25,7 +25,6 @@ extern int compute_memnode_shift(struct node *nodes, int 
numnodes,
 extern void numa_init_array(void);
 extern bool_t srat_disabled(void);
 extern void numa_set_node(int cpu, nodeid_t node);
-extern nodeid_t acpi_setup_node(unsigned int pxm);
 extern void srat_detect_node(int cpu);
 extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end);
 extern void init_cpu_to_node(void);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.