[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/4] [HVM] introduce CPU affinity for allocate_physmap call



Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
# HG changeset patch
# User andre.przywara@xxxxxxx
# Date 1186492260 -7200
# Node ID e730c1207604414f6f2779cc6adb213e3c1362eb
# Parent  0534ec5aa830c665ac95bc0750a22cd6c5413733
made HVM memory allocation CPU aware

diff -r 0534ec5aa830 -r e730c1207604 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Tue Aug 07 14:28:18 2007 +0200
+++ b/tools/ioemu/vl.c  Tue Aug 07 15:11:00 2007 +0200
@@ -6847,7 +6847,7 @@ int set_mm_mapping(int xc_handle, uint32
     }
 
     err = xc_domain_memory_populate_physmap(xc_handle, domid, nr_pages, 0,
-                                            address_bits, extent_start);
+                            address_bits, XENMEM_DEFAULT_CPU, extent_start);
     if (err) {
         fprintf(stderr, "Failed to populate physmap\n");
         return -1;
diff -r 0534ec5aa830 -r e730c1207604 tools/libxc/xc_dom_x86.c
--- a/tools/libxc/xc_dom_x86.c  Tue Aug 07 14:28:18 2007 +0200
+++ b/tools/libxc/xc_dom_x86.c  Tue Aug 07 15:11:00 2007 +0200
@@ -711,7 +711,7 @@ int arch_setup_meminit(struct xc_dom_ima
     /* allocate guest memory */
     rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid,
                                            dom->total_pages, 0, 0,
-                                           dom->p2m_host);
+                                           XENMEM_DEFAULT_CPU, dom->p2m_host);
     return rc;
 }
 
diff -r 0534ec5aa830 -r e730c1207604 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Aug 07 14:28:18 2007 +0200
+++ b/tools/libxc/xc_domain.c   Tue Aug 07 15:11:00 2007 +0200
@@ -506,6 +506,7 @@ int xc_domain_memory_populate_physmap(in
                                           unsigned long nr_extents,
                                           unsigned int extent_order,
                                           unsigned int address_bits,
+                                          unsigned int cpu,
                                           xen_pfn_t *extent_start)
 {
     int err;
@@ -513,7 +514,8 @@ int xc_domain_memory_populate_physmap(in
         .nr_extents   = nr_extents,
         .extent_order = extent_order,
         .address_bits = address_bits,
-        .domid        = domid
+        .domid        = domid,
+        .cpu          = cpu
     };
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
diff -r 0534ec5aa830 -r e730c1207604 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Tue Aug 07 14:28:18 2007 +0200
+++ b/tools/libxc/xc_domain_restore.c   Tue Aug 07 15:11:00 2007 +0200
@@ -126,7 +126,7 @@ static int uncanonicalize_pagetable(int 
     /* Allocate the requisite number of mfns. */
     if ( nr_mfns &&
          (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, 0,
-                                            p2m_batch) != 0) )
+                                     XENMEM_DEFAULT_CPU, p2m_batch) != 0) )
     { 
         ERROR("Failed to allocate memory for batch.!\n"); 
         errno = ENOMEM;
@@ -495,7 +495,7 @@ int xc_domain_restore(int xc_handle, int
         /* Now allocate a bunch of mfns for this batch */
         if ( nr_mfns &&
              (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
-                                                0, p2m_batch) != 0) )
+                             0, XENMEM_DEFAULT_CPU, p2m_batch) != 0) )
         { 
             ERROR("Failed to allocate memory for batch.!\n"); 
             errno = ENOMEM;
diff -r 0534ec5aa830 -r e730c1207604 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Tue Aug 07 14:28:18 2007 +0200
+++ b/tools/libxc/xc_hvm_build.c        Tue Aug 07 15:11:00 2007 +0200
@@ -211,10 +211,11 @@ static int setup_guest(int xc_handle,
 
     /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
     rc = xc_domain_memory_populate_physmap(
-        xc_handle, dom, 0xa0, 0, 0, &page_array[0x00]);
+        xc_handle, dom, 0xa0, 0, 0, XENMEM_DEFAULT_CPU, &page_array[0x00]);
     if ( rc == 0 )
         rc = xc_domain_memory_populate_physmap(
-            xc_handle, dom, nr_pages - 0xc0, 0, 0, &page_array[0xc0]);
+            xc_handle, dom, nr_pages - 0xc0, 0, 0, XENMEM_DEFAULT_CPU,
+            &page_array[0xc0]);
     if ( rc != 0 )
     {
         PERROR("Could not allocate memory for HVM guest.\n");
diff -r 0534ec5aa830 -r e730c1207604 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Aug 07 14:28:18 2007 +0200
+++ b/tools/libxc/xenctrl.h     Tue Aug 07 15:11:00 2007 +0200
@@ -526,6 +526,7 @@ int xc_domain_memory_populate_physmap(in
                                       unsigned long nr_extents,
                                       unsigned int extent_order,
                                       unsigned int address_bits,
+                                      unsigned int cpu,
                                       xen_pfn_t *extent_start);
 
 int xc_domain_ioport_permission(int xc_handle,
diff -r 0534ec5aa830 -r e730c1207604 xen/common/memory.c
--- a/xen/common/memory.c       Tue Aug 07 14:28:18 2007 +0200
+++ b/xen/common/memory.c       Tue Aug 07 15:11:00 2007 +0200
@@ -30,6 +30,7 @@ struct memop_args {
     unsigned int nr_extents;   /* Number of extents to allocate or free. */
     unsigned int extent_order; /* Size of each extent. */
     unsigned int memflags;     /* Allocation flags. */
+    unsigned int cpu;          /* CPU (NUMA node) to take the mem from */
 
     /* INPUT/OUTPUT */
     unsigned int nr_done;    /* Number of extents processed so far. */
@@ -48,7 +49,7 @@ static void increase_reservation(struct 
     unsigned long i;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
-    unsigned int cpu = select_local_cpu(d);
+    unsigned int cpu;
 
     if ( !guest_handle_is_null(a->extent_list) &&
          !guest_handle_okay(a->extent_list, a->nr_extents) )
@@ -57,6 +58,9 @@ static void increase_reservation(struct 
     if ( (a->extent_order != 0) &&
          !multipage_allocation_permitted(current->domain) )
         return;
+
+    if ( a->cpu == XENMEM_DEFAULT_CPU ) cpu = select_local_cpu(d);
+        else cpu = a->cpu;
 
     for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
@@ -95,7 +99,7 @@ static void populate_physmap(struct memo
     unsigned long i, j;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
-    unsigned int cpu = select_local_cpu(d);
+    unsigned int cpu;
 
     if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
         return;
@@ -103,6 +107,9 @@ static void populate_physmap(struct memo
     if ( (a->extent_order != 0) &&
          !multipage_allocation_permitted(current->domain) )
         return;
+
+    if ( a->cpu == XENMEM_DEFAULT_CPU ) cpu = select_local_cpu(d);
+        else cpu = a->cpu;
 
     for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
@@ -518,6 +525,7 @@ long do_memory_op(unsigned long cmd, XEN
         args.extent_list  = reservation.extent_start;
         args.nr_extents   = reservation.nr_extents;
         args.extent_order = reservation.extent_order;
+        args.cpu          = reservation.cpu;
         args.nr_done      = start_extent;
         args.preempted    = 0;
         args.memflags     = 0;
diff -r 0534ec5aa830 -r e730c1207604 xen/include/public/memory.h
--- a/xen/include/public/memory.h       Tue Aug 07 14:28:18 2007 +0200
+++ b/xen/include/public/memory.h       Tue Aug 07 15:11:00 2007 +0200
@@ -35,6 +35,7 @@
 #define XENMEM_increase_reservation 0
 #define XENMEM_decrease_reservation 1
 #define XENMEM_populate_physmap     6
+#define XENMEM_DEFAULT_CPU ((unsigned int)-1)
 struct xen_memory_reservation {
 
     /*
@@ -66,6 +67,7 @@ struct xen_memory_reservation {
      * Unprivileged domains can specify only DOMID_SELF.
      */
     domid_t        domid;
+    unsigned int   cpu;
 };
 typedef struct xen_memory_reservation xen_memory_reservation_t;
 DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.