[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: Fix maximum_gpfn() hypercall to always return max_gpfn not nr_gpfns.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1177536151 -3600
# Node ID 55d0a5c70986bad197e06bf2a2e3f78854dea8da
# Parent  59ea9dadfd078d0beed86aa11c807aa62f6ce399
xen: Fix maximum_gpfn() hypercall to always return max_gpfn not nr_gpfns.
Fix callers to convert this to nr_gpfns (aka p2m_size) if that's what
they actually need.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/libxc/xc_core_x86.c       |    8 ++++----
 tools/libxc/xc_domain_save.c    |    2 +-
 xen/arch/x86/mm.c               |    5 ++++-
 xen/arch/x86/mm/shadow/common.c |    3 +--
 4 files changed, 10 insertions(+), 8 deletions(-)

diff -r 59ea9dadfd07 -r 55d0a5c70986 tools/libxc/xc_core_x86.c
--- a/tools/libxc/xc_core_x86.c Wed Apr 25 22:06:13 2007 +0100
+++ b/tools/libxc/xc_core_x86.c Wed Apr 25 22:22:31 2007 +0100
@@ -21,9 +21,9 @@
 #include "xg_private.h"
 #include "xc_core.h"
 
-static int max_gpfn(int xc_handle, domid_t domid)
+static int nr_gpfns(int xc_handle, domid_t domid)
 {
-    return xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    return xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid) + 1;
 }
 
 int
@@ -38,7 +38,7 @@ xc_core_arch_memory_map_get(int xc_handl
                             xc_core_memory_map_t **mapp,
                             unsigned int *nr_entries)
 {
-    unsigned long p2m_size = max_gpfn(xc_handle, info->domid);
+    unsigned long p2m_size = nr_gpfns(xc_handle, info->domid);
     xc_core_memory_map_t *map;
 
     map = malloc(sizeof(*map));
@@ -65,7 +65,7 @@ xc_core_arch_map_p2m(int xc_handle, xc_d
     xen_pfn_t *live_p2m_frame_list_list = NULL;
     xen_pfn_t *live_p2m_frame_list = NULL;
     uint32_t dom = info->domid;
-    unsigned long p2m_size = max_gpfn(xc_handle, info->domid);
+    unsigned long p2m_size = nr_gpfns(xc_handle, info->domid);
     int ret = -1;
     int err;
 
diff -r 59ea9dadfd07 -r 55d0a5c70986 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Wed Apr 25 22:06:13 2007 +0100
+++ b/tools/libxc/xc_domain_save.c      Wed Apr 25 22:22:31 2007 +0100
@@ -870,7 +870,7 @@ int xc_domain_save(int xc_handle, int io
     }
 
     /* Get the size of the P2M table */
-    p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom);
+    p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;
 
     /* Domain is still running at this point */
     if ( live )
diff -r 59ea9dadfd07 -r 55d0a5c70986 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Apr 25 22:06:13 2007 +0100
+++ b/xen/arch/x86/mm.c Wed Apr 25 22:22:31 2007 +0100
@@ -249,7 +249,10 @@ int memory_is_conventional_ram(paddr_t p
 
 unsigned long domain_get_maximum_gpfn(struct domain *d)
 {
-    return is_hvm_domain(d) ? d->arch.p2m.max_mapped_pfn : arch_get_max_pfn(d);
+    if ( is_hvm_domain(d) )
+        return d->arch.p2m.max_mapped_pfn;
+    /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */
+    return arch_get_max_pfn(d) - 1;
 }
 
 void share_xen_page_with_guest(
diff -r 59ea9dadfd07 -r 55d0a5c70986 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Apr 25 22:06:13 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Apr 25 22:22:31 2007 +0100
@@ -2668,8 +2668,7 @@ sh_alloc_log_dirty_bitmap(struct domain 
 {
     ASSERT(d->arch.paging.shadow.dirty_bitmap == NULL);
     d->arch.paging.shadow.dirty_bitmap_size =
-        (domain_get_maximum_gpfn(d) + (BITS_PER_LONG - 1)) &
-        ~(BITS_PER_LONG - 1);
+        (domain_get_maximum_gpfn(d) + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
     d->arch.paging.shadow.dirty_bitmap =
         xmalloc_array(unsigned long,
                       d->arch.paging.shadow.dirty_bitmap_size / BITS_PER_LONG);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.