[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 6/8] tools: arm: refactor code to setup guest p2m and fill it with RAM



This will help when we have more guest RAM banks.

Mostly code motion of the p2m_host initialisation and allocation loop into the
new function populate_guest_memory, but in addition in the caller we now
initialise the p2m all the INVALID_MFN to handle any holes, although in this
patch we still fill in the entire allocated region.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Julien Grall <julien.grall@xxxxxxxxxx>
---
v2: New patch
---
 tools/libxc/xc_dom_arm.c |   62 ++++++++++++++++++++++++++++++++--------------
 1 file changed, 43 insertions(+), 19 deletions(-)

diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c
index b218e0f..cdb56e3 100644
--- a/tools/libxc/xc_dom_arm.c
+++ b/tools/libxc/xc_dom_arm.c
@@ -249,10 +249,42 @@ static int set_mode(xc_interface *xch, domid_t domid, 
char *guest_type)
     return rc;
 }
 
+static int populate_guest_memory(struct xc_dom_image *dom,
+                                 xen_pfn_t base_pfn, xen_pfn_t nr_pfns)
+{
+    int rc;
+    xen_pfn_t allocsz, pfn;
+
+    if (!nr_pfns)
+        return 0;
+
+    DOMPRINTF("%s: populating RAM @ %016"PRIx64"-%016"PRIx64" (%"PRId64"MB)",
+              __FUNCTION__,
+              (uint64_t)base_pfn << XC_PAGE_SHIFT,
+              (uint64_t)(base_pfn + nr_pfns) << XC_PAGE_SHIFT,
+              (uint64_t)nr_pfns >> (20-XC_PAGE_SHIFT));
+
+    for ( pfn = 0; pfn < nr_pfns; pfn++ )
+        dom->p2m_host[pfn] = base_pfn + pfn;
+
+    for ( pfn = rc = allocsz = 0; (pfn < nr_pfns) && !rc; pfn += allocsz )
+    {
+        allocsz = nr_pfns - pfn;
+        if ( allocsz > 1024*1024 )
+            allocsz = 1024*1024;
+
+        rc = xc_domain_populate_physmap_exact(
+            dom->xch, dom->guest_domid, allocsz,
+            0, 0, &dom->p2m_host[pfn]);
+    }
+
+    return rc;
+}
+
 int arch_setup_meminit(struct xc_dom_image *dom)
 {
     int rc;
-    xen_pfn_t pfn, allocsz, i;
+    xen_pfn_t pfn;
     uint64_t modbase;
 
     /* Convenient */
@@ -261,6 +293,8 @@ int arch_setup_meminit(struct xc_dom_image *dom)
     const uint64_t ram0size = ramsize;
     const uint64_t ram0end = GUEST_RAM0_BASE + ram0size;
 
+    const xen_pfn_t p2m_size = (ram0end - GUEST_RAM0_BASE) >> XC_PAGE_SHIFT;
+
     const uint64_t kernbase = dom->kernel_seg.vstart;
     const uint64_t kernend = ROUNDUP(dom->kernel_seg.vend, 21/*2MB*/);
     const uint64_t kernsize = kernend - kernbase;
@@ -294,27 +328,17 @@ int arch_setup_meminit(struct xc_dom_image *dom)
 
     dom->shadow_enabled = 1;
 
-    dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->total_pages);
+    dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * p2m_size);
     if ( dom->p2m_host == NULL )
         return -EINVAL;
+    for ( pfn = 0; pfn < p2m_size; pfn++ )
+        dom->p2m_host[pfn] = INVALID_MFN;
 
-    /* setup initial p2m */
-    for ( pfn = 0; pfn < dom->total_pages; pfn++ )
-        dom->p2m_host[pfn] = pfn + dom->rambase_pfn;
-
-    /* allocate guest memory */
-    for ( i = rc = allocsz = 0;
-          (i < dom->total_pages) && !rc;
-          i += allocsz )
-    {
-        allocsz = dom->total_pages - i;
-        if ( allocsz > 1024*1024 )
-            allocsz = 1024*1024;
-
-        rc = xc_domain_populate_physmap_exact(
-            dom->xch, dom->guest_domid, allocsz,
-            0, 0, &dom->p2m_host[i]);
-    }
+    /* setup initial p2m and allocate guest memory */
+    if ((rc = populate_guest_memory(dom,
+                                    GUEST_RAM0_BASE >> XC_PAGE_SHIFT,
+                                    ram0size >> XC_PAGE_SHIFT)))
+        return rc;
 
     /*
      * We try to place dtb+initrd at 128MB or if we have less RAM
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.