[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] xen/balloon: fix balloon driver accounting for HVM-with-PoD case



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1265033556 0
# Node ID a7781c0a3b9aa9c6daeabb8f58e17dea1c34850b
# Parent  c88a02a22a057a632e6c21442e42e56e07904988
xen/balloon: fix balloon driver accounting for HVM-with-PoD case

With PoD, ballooning down a guest to the target set through xenstore
based on its totalram_pages value isn't sufficient, since that value
doesn't include all the pages assigned to the guest. Since the delta
is static, determine it once at load time.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 drivers/xen/balloon/balloon.c |   47 +++++++++++++++++++++++++++++++++++++-----
 1 files changed, 42 insertions(+), 5 deletions(-)

diff -r c88a02a22a05 -r a7781c0a3b9a drivers/xen/balloon/balloon.c
--- a/drivers/xen/balloon/balloon.c     Fri Jan 29 07:57:48 2010 +0000
+++ b/drivers/xen/balloon/balloon.c     Mon Feb 01 14:12:36 2010 +0000
@@ -93,6 +93,18 @@ extern unsigned long totalhigh_pages;
 #define dec_totalhigh_pages() ((void)0)
 #endif
 
+#ifndef CONFIG_XEN
+/*
+ * In HVM guests accounting here uses the Xen visible values, but the kernel
+ * determined totalram_pages value shouldn't get altered. Since totalram_pages
+ * includes neither the kernel static image nor any memory allocated prior to
+ * or from the bootmem allocator, we have to synchronize the two values.
+ */
+static unsigned long __read_mostly totalram_bias;
+#else
+#define totalram_bias 0
+#endif
+
 /* List of ballooned pages, threaded through the mem_map array. */
 static LIST_HEAD(ballooned_pages);
 
@@ -289,7 +301,7 @@ static int increase_reservation(unsigned
        }
 
        bs.current_pages += rc;
-       totalram_pages = bs.current_pages;
+       totalram_pages = bs.current_pages - totalram_bias;
 
  out:
        balloon_unlock(flags);
@@ -362,7 +374,7 @@ static int decrease_reservation(unsigned
        BUG_ON(ret != nr_pages);
 
        bs.current_pages -= nr_pages;
-       totalram_pages = bs.current_pages;
+       totalram_pages = bs.current_pages - totalram_bias;
 
        balloon_unlock(flags);
 
@@ -498,7 +510,19 @@ static struct notifier_block xenstore_no
 
 static int __init balloon_init(void)
 {
-#if defined(CONFIG_X86) && defined(CONFIG_XEN) 
+#if !defined(CONFIG_XEN)
+# ifndef XENMEM_get_pod_target
+#  define XENMEM_get_pod_target 17
+       typedef struct xen_pod_target {
+               uint64_t target_pages;
+               uint64_t tot_pages;
+               uint64_t pod_cache_pages;
+               uint64_t pod_entries;
+               domid_t domid;
+       } xen_pod_target_t;
+# endif
+       xen_pod_target_t pod_target = { .domid = DOMID_SELF };
+#elif defined(CONFIG_X86)
        unsigned long pfn;
        struct page *page;
 #endif
@@ -512,7 +536,20 @@ static int __init balloon_init(void)
        bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
        totalram_pages   = bs.current_pages;
 #else 
-       bs.current_pages = totalram_pages; 
+       totalram_bias = HYPERVISOR_memory_op(
+               HYPERVISOR_memory_op(XENMEM_get_pod_target,
+                       &pod_target) != -ENOSYS
+               ? XENMEM_maximum_reservation
+               : XENMEM_current_reservation,
+               &pod_target.domid);
+       if ((long)totalram_bias != -ENOSYS) {
+               BUG_ON(totalram_bias < totalram_pages);
+               bs.current_pages = totalram_bias;
+               totalram_bias -= totalram_pages;
+       } else {
+               totalram_bias = 0;
+               bs.current_pages = totalram_pages;
+       }
 #endif
        bs.target_pages  = bs.current_pages;
        bs.balloon_low   = 0;
@@ -651,7 +688,7 @@ struct page **alloc_empty_pages_and_page
                        goto err;
                }
 
-               totalram_pages = --bs.current_pages;
+               totalram_pages = --bs.current_pages - totalram_bias;
 
                balloon_unlock(flags);
        }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.