[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [qemu-upstream-unstable] xen-hvm: increase maxmem before calling xc_domain_populate_physmap



commit 901230fd8ce053cc21312a2eca2f3ba9f1d103f2
Author:     Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
AuthorDate: Wed Dec 3 08:15:19 2014 -0500
Commit:     Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
CommitDate: Tue Jan 27 13:50:50 2015 +0000

    xen-hvm: increase maxmem before calling xc_domain_populate_physmap
    
    Increase maxmem before calling xc_domain_populate_physmap_exact to
    avoid the risk of running out of guest memory. This way we can also
    avoid complex memory calculations in libxl at domain construction
    time.
    
    This patch fixes an abort() when assigning more than 4 NICs to a VM.
    
    upstream-commit-id: c1d322e6048796296555dd36fdd102d7fa2f50bf
    
    Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
    Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
 xen-hvm.c |   24 ++++++++++++++++++++++++
 1 files changed, 24 insertions(+), 0 deletions(-)

diff --git a/xen-hvm.c b/xen-hvm.c
index 7548794..e2e575b 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -90,6 +90,12 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t 
*shared_page, int vcpu)
 #endif
 
 #define BUFFER_IO_MAX_DELAY  100
+/* Leave some slack so that hvmloader does not complain about lack of
+ * memory at boot time ("Could not allocate order=0 extent").
+ * Once hvmloader is modified to cope with that situation without
+ * printing warning messages, QEMU_SPARE_PAGES can be removed.
+ */
+#define QEMU_SPARE_PAGES 16
 
 typedef struct XenPhysmap {
     hwaddr start_addr;
@@ -244,6 +250,8 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, 
MemoryRegion *mr)
     unsigned long nr_pfn;
     xen_pfn_t *pfn_list;
     int i;
+    xc_domaininfo_t info;
+    unsigned long free_pages;
 
     if (runstate_check(RUN_STATE_INMIGRATE)) {
         /* RAM already populated in Xen */
@@ -266,6 +274,22 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, 
MemoryRegion *mr)
         pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
     }
 
+    if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
+        (info.domain != xen_domid)) {
+        hw_error("xc_domain_getinfolist failed");
+    }
+    free_pages = info.max_pages - info.tot_pages;
+    if (free_pages > QEMU_SPARE_PAGES) {
+        free_pages -= QEMU_SPARE_PAGES;
+    } else {
+        free_pages = 0;
+    }
+    if ((free_pages < nr_pfn) &&
+        (xc_domain_setmaxmem(xen_xc, xen_domid,
+                             ((info.max_pages + nr_pfn - free_pages)
+                              << (XC_PAGE_SHIFT - 10))) < 0)) {
+        hw_error("xc_domain_setmaxmem failed");
+    }
     if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, 
pfn_list)) {
         hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
     }
--
generated by git-patchbot for /home/xen/git/qemu-upstream-unstable.git

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.