[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC][PATCH] domheap optimization for NUMA



Keir Fraser wrote:
On 2/4/08 14:06, "Zhai, Edwin" <edwin.zhai@xxxxxxxxx> wrote:

The issue is alloc_domheap_pages take domain* as parameter to indicate if need
account pages for the domain, sometimes it's NULL. In this case, we can't
deduct the node from domain. I believe it's why use cpu here as getting cpuid is
easier.

Yes, but it's a bad interface, particularlty when the function is called
alloc_domheap_pages_on_node(). Pass in a nodeid. Write a helper function to
work out the nodeid from the domain*.
I was just looking at this code, too, so I fixed this. Eventually alloc_heap_pages is called, which deals with nodes only, so I replaced cpu with node everywhere else, too. Now __alloc_domheap_pages and alloc_domheap_pages_on_node are almost the same (except parameter ordering), so I removed the first one, since the naming of the latter is better. Passing node numbers instead of cpu numbers needs cpu_to_node and asm/numa.h, if you think there is a better way, I am all ears.

Another option, always use domain* to locate node(not allowed NULL) and add a
new flag _MEMF_assign to indicate the assignment, which changes the interface
and is invasive.

Yes, that's a bad idea.

 -- Keir

The first diff is against Edwin's patch, the second includes it.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>

Regards,
Andre.

--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 277-84917
----to satisfy European Law for business letters:
AMD Saxony Limited Liability Company & Co. KG,
Wilschdorfer Landstr. 101, 01109 Dresden, Germany
Register Court Dresden: HRA 4896, General Partner authorized
to represent: AMD Saxony LLC (Wilmington, Delaware, US)
General Manager of AMD Saxony LLC: Dr. Hans-R. Deppe, Thomas McCoy
diff -r b1cd7c0749f4 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/arch/x86/domain.c     Thu Apr 03 00:38:53 2008 +0200
@@ -46,6 +46,7 @@
 #include <asm/debugreg.h>
 #include <asm/msr.h>
 #include <asm/nmi.h>
+#include <asm/numa.h>
 #include <xen/iommu.h>
 #ifdef CONFIG_COMPAT
 #include <compat/vcpu.h>
@@ -477,8 +478,9 @@ int arch_domain_create(struct domain *d,
 
 #else /* __x86_64__ */
 
-    if ( (pg = alloc_domheap_page_on_node(NULL, d->vcpu[0])) == NULL )
-        goto fail;
+    if ( (pg = alloc_domheap_page_on_node(NULL,
+        cpu_to_node(d->vcpu[0]->processor))) == NULL )
+            goto fail;
     d->arch.mm_perdomain_l2 = page_to_virt(pg);
     clear_page(d->arch.mm_perdomain_l2);
     for ( i = 0; i < (1 << pdpt_order); i++ )
@@ -486,8 +488,9 @@ int arch_domain_create(struct domain *d,
             l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
                           __PAGE_HYPERVISOR);
 
-    if ( (pg = alloc_domheap_page_on_node(NULL, d->vcpu[0])) == NULL )
-        goto fail;
+    if ( (pg = alloc_domheap_page_on_node(NULL,
+        cpu_to_node(d->vcpu[0]->processor))) == NULL )
+            goto fail;
     d->arch.mm_perdomain_l3 = page_to_virt(pg);
     clear_page(d->arch.mm_perdomain_l3);
     d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
diff -r b1cd7c0749f4 xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/arch/x86/hvm/stdvga.c Thu Apr 03 00:38:53 2008 +0200
@@ -32,6 +32,7 @@
 #include <xen/sched.h>
 #include <xen/domain_page.h>
 #include <asm/hvm/support.h>
+#include <asm/numa.h>
 
 #define PAT(x) (x)
 static const uint32_t mask16[16] = {
@@ -513,8 +514,9 @@ void stdvga_init(struct domain *d)
     
     for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
     {
-        if ( (pg = alloc_domheap_page_on_node(NULL, d->vcpu[0])) == NULL )
-            break;
+        if ( (pg = alloc_domheap_page_on_node(NULL,
+            cpu_to_node(d->vcpu[0]->processor))) == NULL )
+                break;
         s->vram_page[i] = pg;
         p = map_domain_page(page_to_mfn(pg));
         clear_page(p);
diff -r b1cd7c0749f4 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/arch/x86/hvm/vlapic.c Thu Apr 03 00:38:53 2008 +0200
@@ -33,6 +33,7 @@
 #include <xen/sched.h>
 #include <asm/current.h>
 #include <asm/hvm/vmx/vmx.h>
+#include <asm/numa.h>
 #include <public/hvm/ioreq.h>
 #include <public/hvm/params.h>
 
@@ -928,7 +929,8 @@ int vlapic_init(struct vcpu *v)
         memflags = MEMF_bits(32);
 #endif
 
-    vlapic->regs_page = alloc_domheap_pages_on_node(NULL, 0, memflags, v);
+    vlapic->regs_page = alloc_domheap_pages_on_node(NULL, 0, memflags,
+        cpu_to_node(v->processor));
     if ( vlapic->regs_page == NULL )
     {
         dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
diff -r b1cd7c0749f4 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/arch/x86/mm/hap/hap.c Thu Apr 03 00:38:53 2008 +0200
@@ -38,6 +38,7 @@
 #include <asm/hap.h>
 #include <asm/paging.h>
 #include <asm/domain.h>
+#include <asm/numa.h>
 
 #include "private.h"
 
@@ -135,7 +136,8 @@ static struct page_info *hap_alloc_p2m_p
          && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
     {
         free_domheap_page(pg);
-        pg = alloc_domheap_pages_on_node(NULL, 0, MEMF_bits(32), d->vcpu[0]);
+        pg = alloc_domheap_pages_on_node(NULL, 0, MEMF_bits(32),
+            cpu_to_node(d->vcpu[0]->processor));
         if ( likely(pg != NULL) )
         {
             void *p = hap_map_domain_page(page_to_mfn(pg));
@@ -199,7 +201,8 @@ hap_set_allocation(struct domain *d, uns
         if ( d->arch.paging.hap.total_pages < pages )
         {
             /* Need to allocate more memory from domheap */
-            pg = alloc_domheap_page_on_node(NULL, d->vcpu[0]);
+            pg = alloc_domheap_page_on_node(NULL,
+                cpu_to_node(d->vcpu[0]->processor));
             if ( pg == NULL )
             {
                 HAP_PRINTK("failed to allocate hap pages.\n");
diff -r b1cd7c0749f4 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/arch/x86/mm/paging.c  Thu Apr 03 00:38:53 2008 +0200
@@ -26,6 +26,7 @@
 #include <asm/p2m.h>
 #include <asm/hap.h>
 #include <asm/guest_access.h>
+#include <asm/numa.h>
 #include <xsm/xsm.h>
 
 #define hap_enabled(d) (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
@@ -99,7 +100,8 @@ static mfn_t paging_new_log_dirty_page(s
 static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
 {
     mfn_t mfn;
-    struct page_info *page = alloc_domheap_page_on_node(NULL, d->vcpu[0]);
+    struct page_info *page = alloc_domheap_page_on_node(NULL,
+        cpu_to_node(d->vcpu[0]->processor));
 
     if ( unlikely(page == NULL) )
     {
diff -r b1cd7c0749f4 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Apr 03 00:38:53 2008 +0200
@@ -36,6 +36,7 @@
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
+#include <asm/numa.h>
 #include "private.h"
 
 
@@ -1249,7 +1250,8 @@ static unsigned int sh_set_allocation(st
         {
             /* Need to allocate more memory from domheap */
             sp = (struct shadow_page_info *)
-                alloc_domheap_pages_on_node(NULL, order, 0, d->vcpu[0]);
+                alloc_domheap_pages_on_node(NULL, order, 0,
+                    cpu_to_node(d->vcpu[0]->processor));
             if ( sp == NULL ) 
             { 
                 SHADOW_PRINTK("failed to allocate shadow pages.\n");
diff -r b1cd7c0749f4 xen/common/memory.c
--- a/xen/common/memory.c       Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/common/memory.c       Thu Apr 03 00:38:53 2008 +0200
@@ -21,6 +21,7 @@
 #include <xen/errno.h>
 #include <asm/current.h>
 #include <asm/hardirq.h>
+#include <asm/numa.h>
 #include <public/memory.h>
 #include <xsm/xsm.h>
 
@@ -37,10 +38,10 @@ struct memop_args {
     int          preempted;  /* Was the hypercall preempted? */
 };
 
-static unsigned int select_local_cpu(struct domain *d)
+static unsigned int select_local_node(struct domain *d)
 {
     struct vcpu *v = d->vcpu[0];
-    return (v ? v->processor : 0);
+    return (v ? cpu_to_node(v->processor) : 0);
 }
 
 static void increase_reservation(struct memop_args *a)
@@ -49,7 +50,7 @@ static void increase_reservation(struct 
     unsigned long i;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
-    unsigned int cpu = select_local_cpu(d);
+    unsigned int node = select_local_node(d);
 
     if ( !guest_handle_is_null(a->extent_list) &&
          !guest_handle_okay(a->extent_list, a->nr_extents) )
@@ -67,7 +68,8 @@ static void increase_reservation(struct 
             goto out;
         }
 
-        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+        page = alloc_domheap_pages_on_node (
+            d, a->extent_order, a->memflags, node);
         if ( unlikely(page == NULL) ) 
         {
             gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
@@ -96,7 +98,7 @@ static void populate_physmap(struct memo
     unsigned long i, j;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
-    unsigned int cpu = select_local_cpu(d);
+    unsigned int node = select_local_node(d);
 
     if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
         return;
@@ -116,7 +118,8 @@ static void populate_physmap(struct memo
         if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
             goto out;
 
-        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+        page = alloc_domheap_pages_on_node (
+            d, a->extent_order, a->memflags, node);
         if ( unlikely(page == NULL) ) 
         {
             gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
@@ -296,7 +299,7 @@ static long memory_exchange(XEN_GUEST_HA
     unsigned long in_chunk_order, out_chunk_order;
     xen_pfn_t     gpfn, gmfn, mfn;
     unsigned long i, j, k;
-    unsigned int  memflags = 0, cpu;
+    unsigned int  memflags = 0, node;
     long          rc = 0;
     struct domain *d;
     struct page_info *page;
@@ -352,7 +355,7 @@ static long memory_exchange(XEN_GUEST_HA
     memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
         d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
 
-    cpu = select_local_cpu(d);
+    node = select_local_node(d);
 
     for ( i = (exch.nr_exchanged >> in_chunk_order);
           i < (exch.in.nr_extents >> in_chunk_order);
@@ -401,8 +404,8 @@ static long memory_exchange(XEN_GUEST_HA
         /* Allocate a chunk's worth of anonymous output pages. */
         for ( j = 0; j < (1UL << out_chunk_order); j++ )
         {
-            page = __alloc_domheap_pages(
-                NULL, cpu, exch.out.extent_order, memflags);
+            page = alloc_domheap_pages_on_node(
+                NULL, exch.out.extent_order, memflags, node);
             if ( unlikely(page == NULL) )
             {
                 rc = -ENOMEM;
diff -r b1cd7c0749f4 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/common/page_alloc.c   Thu Apr 03 00:38:53 2008 +0200
@@ -36,6 +36,7 @@
 #include <xen/numa.h>
 #include <xen/nodemask.h>
 #include <asm/page.h>
+#include <asm/numa.h>
 #include <asm/flushtlb.h>
 
 /*
@@ -328,10 +329,10 @@ static void init_node_heap(int node)
 /* Allocate 2^@order contiguous pages. */
 static struct page_info *alloc_heap_pages(
     unsigned int zone_lo, unsigned int zone_hi,
-    unsigned int cpu, unsigned int order)
+    unsigned int node, unsigned int order)
 {
     unsigned int i, j, zone;
-    unsigned int node = cpu_to_node(cpu), num_nodes = num_online_nodes();
+    unsigned int num_nodes = num_online_nodes();
     unsigned long request = 1UL << order;
     cpumask_t extra_cpus_mask, mask;
     struct page_info *pg;
@@ -670,7 +671,8 @@ void *alloc_xenheap_pages(unsigned int o
 
     ASSERT(!in_irq());
 
-    pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, smp_processor_id(), order);
+    pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, 
+        cpu_to_node(smp_processor_id()), order);
     if ( unlikely(pg == NULL) )
         goto no_memory;
 
@@ -778,9 +780,9 @@ int assign_pages(
 }
 
 
-struct page_info *__alloc_domheap_pages(
-    struct domain *d, unsigned int cpu, unsigned int order, 
-    unsigned int memflags)
+struct page_info *alloc_domheap_pages_on_node(
+    struct domain *d, unsigned int order, unsigned int memflags,
+    unsigned int node)
 {
     struct page_info *pg = NULL;
     unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
@@ -797,7 +799,7 @@ struct page_info *__alloc_domheap_pages(
 
     if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
     {
-        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
+        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, node, order);
 
         /* Failure? Then check if we can fall back to the DMA pool. */
         if ( unlikely(pg == NULL) &&
@@ -811,7 +813,7 @@ struct page_info *__alloc_domheap_pages(
 
     if ( (pg == NULL) &&
          ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
-                                 cpu, order)) == NULL) )
+                                 node, order)) == NULL) )
          return NULL;
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
@@ -826,14 +828,8 @@ struct page_info *alloc_domheap_pages(
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int flags)
 {
-    return __alloc_domheap_pages(d, smp_processor_id(), order, flags);
-}
-
-struct page_info *alloc_domheap_pages_on_node(
-    struct domain *d, unsigned int order, unsigned int flags, struct vcpu *v)
-{
-    unsigned int cpu = v ? v->processor : smp_processor_id();
-    return __alloc_domheap_pages(d, cpu, order, flags);
+    return alloc_domheap_pages_on_node (d, order, flags,
+        cpu_to_node (smp_processor_id());
 }
 
 void free_domheap_pages(struct page_info *pg, unsigned int order)
diff -r b1cd7c0749f4 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/drivers/passthrough/vtd/iommu.c       Thu Apr 03 00:38:53 2008 +0200
@@ -24,6 +24,7 @@
 #include <xen/xmalloc.h>
 #include <xen/domain_page.h>
 #include <xen/iommu.h>
+#include <asm/numa.h>
 #include "iommu.h"
 #include "dmar.h"
 #include "../pci-direct.h"
@@ -269,7 +270,8 @@ static struct page_info *addr_to_dma_pag
 
         if ( dma_pte_addr(*pte) == 0 )
         {
-            pg = alloc_domheap_page_on_node(NULL, domain->vcpu[0]);
+            pg = alloc_domheap_page_on_node(NULL,
+                cpu_to_node(domain->vcpu[0]->processor));
             vaddr = map_domain_page(page_to_mfn(pg));
             if ( !vaddr )
             {
diff -r b1cd7c0749f4 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Wed Apr 02 16:07:24 2008 +0200
+++ b/xen/include/xen/mm.h      Thu Apr 03 00:38:53 2008 +0200
@@ -55,16 +55,14 @@ struct page_info *alloc_domheap_pages(
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int memflags);
 struct page_info *alloc_domheap_pages_on_node(
-    struct domain *d, unsigned int order, unsigned int memflags, struct vcpu 
*v);
-struct page_info *__alloc_domheap_pages(
-    struct domain *d, unsigned int cpu, unsigned int order, 
-    unsigned int memflags);
+    struct domain *d, unsigned int order, unsigned int memflags,
+    unsigned int node_id);
 void free_domheap_pages(struct page_info *pg, unsigned int order);
 unsigned long avail_domheap_pages_region(
     unsigned int node, unsigned int min_width, unsigned int max_width);
 unsigned long avail_domheap_pages(void);
 #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
-#define alloc_domheap_page_on_node(d, v) (alloc_domheap_pages_on_node(d,0,0,v))
+#define alloc_domheap_page_on_node(d, n) (alloc_domheap_pages_on_node(d,0,0,n))
 #define free_domheap_page(p)  (free_domheap_pages(p,0))
 
 void scrub_heap_pages(void);
diff -r db943e8d1051 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/arch/x86/domain.c     Thu Apr 03 00:38:44 2008 +0200
@@ -46,6 +46,7 @@
 #include <asm/debugreg.h>
 #include <asm/msr.h>
 #include <asm/nmi.h>
+#include <asm/numa.h>
 #include <xen/iommu.h>
 #ifdef CONFIG_COMPAT
 #include <compat/vcpu.h>
@@ -477,8 +478,9 @@ int arch_domain_create(struct domain *d,
 
 #else /* __x86_64__ */
 
-    if ( (pg = alloc_domheap_page(NULL)) == NULL )
-        goto fail;
+    if ( (pg = alloc_domheap_page_on_node(NULL,
+        cpu_to_node(d->vcpu[0]->processor))) == NULL )
+            goto fail;
     d->arch.mm_perdomain_l2 = page_to_virt(pg);
     clear_page(d->arch.mm_perdomain_l2);
     for ( i = 0; i < (1 << pdpt_order); i++ )
@@ -486,8 +488,9 @@ int arch_domain_create(struct domain *d,
             l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
                           __PAGE_HYPERVISOR);
 
-    if ( (pg = alloc_domheap_page(NULL)) == NULL )
-        goto fail;
+    if ( (pg = alloc_domheap_page_on_node(NULL,
+        cpu_to_node(d->vcpu[0]->processor))) == NULL )
+            goto fail;
     d->arch.mm_perdomain_l3 = page_to_virt(pg);
     clear_page(d->arch.mm_perdomain_l3);
     d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
diff -r db943e8d1051 xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/arch/x86/hvm/stdvga.c Thu Apr 03 00:38:44 2008 +0200
@@ -32,6 +32,7 @@
 #include <xen/sched.h>
 #include <xen/domain_page.h>
 #include <asm/hvm/support.h>
+#include <asm/numa.h>
 
 #define PAT(x) (x)
 static const uint32_t mask16[16] = {
@@ -513,8 +514,9 @@ void stdvga_init(struct domain *d)
     
     for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
     {
-        if ( (pg = alloc_domheap_page(NULL)) == NULL )
-            break;
+        if ( (pg = alloc_domheap_page_on_node(NULL,
+            cpu_to_node(d->vcpu[0]->processor))) == NULL )
+                break;
         s->vram_page[i] = pg;
         p = map_domain_page(page_to_mfn(pg));
         clear_page(p);
diff -r db943e8d1051 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Thu Apr 03 00:38:45 2008 +0200
@@ -33,6 +33,7 @@
 #include <xen/sched.h>
 #include <asm/current.h>
 #include <asm/hvm/vmx/vmx.h>
+#include <asm/numa.h>
 #include <public/hvm/ioreq.h>
 #include <public/hvm/params.h>
 
@@ -928,7 +929,8 @@ int vlapic_init(struct vcpu *v)
         memflags = MEMF_bits(32);
 #endif
 
-    vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
+    vlapic->regs_page = alloc_domheap_pages_on_node(NULL, 0, memflags,
+        cpu_to_node(v->processor));
     if ( vlapic->regs_page == NULL )
     {
         dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
diff -r db943e8d1051 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu Apr 03 00:38:45 2008 +0200
@@ -38,6 +38,7 @@
 #include <asm/hap.h>
 #include <asm/paging.h>
 #include <asm/domain.h>
+#include <asm/numa.h>
 
 #include "private.h"
 
@@ -135,7 +136,8 @@ static struct page_info *hap_alloc_p2m_p
          && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
     {
         free_domheap_page(pg);
-        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
+        pg = alloc_domheap_pages_on_node(NULL, 0, MEMF_bits(32),
+            cpu_to_node(d->vcpu[0]->processor));
         if ( likely(pg != NULL) )
         {
             void *p = hap_map_domain_page(page_to_mfn(pg));
@@ -199,7 +201,8 @@ hap_set_allocation(struct domain *d, uns
         if ( d->arch.paging.hap.total_pages < pages )
         {
             /* Need to allocate more memory from domheap */
-            pg = alloc_domheap_page(NULL);
+            pg = alloc_domheap_page_on_node(NULL,
+                cpu_to_node(d->vcpu[0]->processor));
             if ( pg == NULL )
             {
                 HAP_PRINTK("failed to allocate hap pages.\n");
diff -r db943e8d1051 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/arch/x86/mm/paging.c  Thu Apr 03 00:38:45 2008 +0200
@@ -26,6 +26,7 @@
 #include <asm/p2m.h>
 #include <asm/hap.h>
 #include <asm/guest_access.h>
+#include <asm/numa.h>
 #include <xsm/xsm.h>
 
 #define hap_enabled(d) (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
@@ -99,7 +100,8 @@ static mfn_t paging_new_log_dirty_page(s
 static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
 {
     mfn_t mfn;
-    struct page_info *page = alloc_domheap_page(NULL);
+    struct page_info *page = alloc_domheap_page_on_node(NULL,
+        cpu_to_node(d->vcpu[0]->processor));
 
     if ( unlikely(page == NULL) )
     {
diff -r db943e8d1051 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Apr 03 00:38:45 2008 +0200
@@ -36,6 +36,7 @@
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
+#include <asm/numa.h>
 #include "private.h"
 
 
@@ -1249,7 +1250,8 @@ static unsigned int sh_set_allocation(st
         {
             /* Need to allocate more memory from domheap */
             sp = (struct shadow_page_info *)
-                alloc_domheap_pages(NULL, order, 0);
+                alloc_domheap_pages_on_node(NULL, order, 0,
+                    cpu_to_node(d->vcpu[0]->processor));
             if ( sp == NULL ) 
             { 
                 SHADOW_PRINTK("failed to allocate shadow pages.\n");
diff -r db943e8d1051 xen/common/memory.c
--- a/xen/common/memory.c       Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/common/memory.c       Thu Apr 03 00:38:45 2008 +0200
@@ -21,6 +21,7 @@
 #include <xen/errno.h>
 #include <asm/current.h>
 #include <asm/hardirq.h>
+#include <asm/numa.h>
 #include <public/memory.h>
 #include <xsm/xsm.h>
 
@@ -37,10 +38,10 @@ struct memop_args {
     int          preempted;  /* Was the hypercall preempted? */
 };
 
-static unsigned int select_local_cpu(struct domain *d)
+static unsigned int select_local_node(struct domain *d)
 {
     struct vcpu *v = d->vcpu[0];
-    return (v ? v->processor : 0);
+    return (v ? cpu_to_node(v->processor) : 0);
 }
 
 static void increase_reservation(struct memop_args *a)
@@ -49,7 +50,7 @@ static void increase_reservation(struct 
     unsigned long i;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
-    unsigned int cpu = select_local_cpu(d);
+    unsigned int node = select_local_node(d);
 
     if ( !guest_handle_is_null(a->extent_list) &&
          !guest_handle_okay(a->extent_list, a->nr_extents) )
@@ -67,7 +68,8 @@ static void increase_reservation(struct 
             goto out;
         }
 
-        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+        page = alloc_domheap_pages_on_node (
+            d, a->extent_order, a->memflags, node);
         if ( unlikely(page == NULL) ) 
         {
             gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
@@ -96,7 +98,7 @@ static void populate_physmap(struct memo
     unsigned long i, j;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
-    unsigned int cpu = select_local_cpu(d);
+    unsigned int node = select_local_node(d);
 
     if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
         return;
@@ -116,7 +118,8 @@ static void populate_physmap(struct memo
         if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
             goto out;
 
-        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+        page = alloc_domheap_pages_on_node (
+            d, a->extent_order, a->memflags, node);
         if ( unlikely(page == NULL) ) 
         {
             gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
@@ -296,7 +299,7 @@ static long memory_exchange(XEN_GUEST_HA
     unsigned long in_chunk_order, out_chunk_order;
     xen_pfn_t     gpfn, gmfn, mfn;
     unsigned long i, j, k;
-    unsigned int  memflags = 0, cpu;
+    unsigned int  memflags = 0, node;
     long          rc = 0;
     struct domain *d;
     struct page_info *page;
@@ -352,7 +355,7 @@ static long memory_exchange(XEN_GUEST_HA
     memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
         d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
 
-    cpu = select_local_cpu(d);
+    node = select_local_node(d);
 
     for ( i = (exch.nr_exchanged >> in_chunk_order);
           i < (exch.in.nr_extents >> in_chunk_order);
@@ -401,8 +404,8 @@ static long memory_exchange(XEN_GUEST_HA
         /* Allocate a chunk's worth of anonymous output pages. */
         for ( j = 0; j < (1UL << out_chunk_order); j++ )
         {
-            page = __alloc_domheap_pages(
-                NULL, cpu, exch.out.extent_order, memflags);
+            page = alloc_domheap_pages_on_node(
+                NULL, exch.out.extent_order, memflags, node);
             if ( unlikely(page == NULL) )
             {
                 rc = -ENOMEM;
diff -r db943e8d1051 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/common/page_alloc.c   Thu Apr 03 00:38:45 2008 +0200
@@ -36,6 +36,7 @@
 #include <xen/numa.h>
 #include <xen/nodemask.h>
 #include <asm/page.h>
+#include <asm/numa.h>
 #include <asm/flushtlb.h>
 
 /*
@@ -328,10 +329,10 @@ static void init_node_heap(int node)
 /* Allocate 2^@order contiguous pages. */
 static struct page_info *alloc_heap_pages(
     unsigned int zone_lo, unsigned int zone_hi,
-    unsigned int cpu, unsigned int order)
+    unsigned int node, unsigned int order)
 {
     unsigned int i, j, zone;
-    unsigned int node = cpu_to_node(cpu), num_nodes = num_online_nodes();
+    unsigned int num_nodes = num_online_nodes();
     unsigned long request = 1UL << order;
     cpumask_t extra_cpus_mask, mask;
     struct page_info *pg;
@@ -670,7 +671,8 @@ void *alloc_xenheap_pages(unsigned int o
 
     ASSERT(!in_irq());
 
-    pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, smp_processor_id(), order);
+    pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN, 
+        cpu_to_node(smp_processor_id()), order);
     if ( unlikely(pg == NULL) )
         goto no_memory;
 
@@ -778,9 +780,9 @@ int assign_pages(
 }
 
 
-struct page_info *__alloc_domheap_pages(
-    struct domain *d, unsigned int cpu, unsigned int order, 
-    unsigned int memflags)
+struct page_info *alloc_domheap_pages_on_node(
+    struct domain *d, unsigned int order, unsigned int memflags,
+    unsigned int node)
 {
     struct page_info *pg = NULL;
     unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
@@ -797,7 +799,7 @@ struct page_info *__alloc_domheap_pages(
 
     if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
     {
-        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
+        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, node, order);
 
         /* Failure? Then check if we can fall back to the DMA pool. */
         if ( unlikely(pg == NULL) &&
@@ -811,7 +813,7 @@ struct page_info *__alloc_domheap_pages(
 
     if ( (pg == NULL) &&
          ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
-                                 cpu, order)) == NULL) )
+                                 node, order)) == NULL) )
          return NULL;
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
@@ -826,7 +828,8 @@ struct page_info *alloc_domheap_pages(
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int flags)
 {
-    return __alloc_domheap_pages(d, smp_processor_id(), order, flags);
+    return alloc_domheap_pages_on_node (d, order, flags,
+        cpu_to_node (smp_processor_id());
 }
 
 void free_domheap_pages(struct page_info *pg, unsigned int order)
diff -r db943e8d1051 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Thu Apr 03 00:38:45 2008 +0200
@@ -24,6 +24,7 @@
 #include <xen/xmalloc.h>
 #include <xen/domain_page.h>
 #include <xen/iommu.h>
+#include <asm/numa.h>
 #include "iommu.h"
 #include "dmar.h"
 #include "../pci-direct.h"
@@ -269,7 +270,8 @@ static struct page_info *addr_to_dma_pag
 
         if ( dma_pte_addr(*pte) == 0 )
         {
-            pg = alloc_domheap_page(NULL);
+            pg = alloc_domheap_page_on_node(NULL,
+                cpu_to_node(domain->vcpu[0]->processor));
             vaddr = map_domain_page(page_to_mfn(pg));
             if ( !vaddr )
             {
diff -r db943e8d1051 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Tue Apr 01 10:09:33 2008 +0100
+++ b/xen/include/xen/mm.h      Thu Apr 03 00:38:45 2008 +0200
@@ -54,14 +54,15 @@ void init_domheap_pages(paddr_t ps, padd
 void init_domheap_pages(paddr_t ps, paddr_t pe);
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int memflags);
-struct page_info *__alloc_domheap_pages(
-    struct domain *d, unsigned int cpu, unsigned int order, 
-    unsigned int memflags);
+struct page_info *alloc_domheap_pages_on_node(
+    struct domain *d, unsigned int order, unsigned int memflags,
+    unsigned int node_id);
 void free_domheap_pages(struct page_info *pg, unsigned int order);
 unsigned long avail_domheap_pages_region(
     unsigned int node, unsigned int min_width, unsigned int max_width);
 unsigned long avail_domheap_pages(void);
 #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
+#define alloc_domheap_page_on_node(d, n) (alloc_domheap_pages_on_node(d,0,0,n))
 #define free_domheap_page(p)  (free_domheap_pages(p,0))
 
 void scrub_heap_pages(void);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.