[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: fix setup of PVH Dom0 memory map



This patch adds the holes removed by MMIO regions to the end of the
memory map for PVH Dom0, so the guest OS doesn't have to manually
populate this memory.

Also, provide a suitable e820 memory map for PVH Dom0, that matches
the underlying p2m map. This means that PVH guests should always use
XENMEM_memory_map in order to obtain the e820, even when running as
Dom0.

Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
 xen/arch/x86/domain_build.c |  114 +++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 111 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 38ed9f6..85f34dc 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -327,11 +327,14 @@ static __init void pvh_add_mem_mapping(struct domain *d, 
unsigned long gfn,
  * pvh fixme: The following doesn't map MMIO ranges when they sit above the
  *            highest E820 covered address.
  */
-static __init void pvh_map_all_iomem(struct domain *d)
+static __init void pvh_map_all_iomem(struct domain *d, unsigned long nr_pages)
 {
     unsigned long start_pfn, end_pfn, end = 0, start = 0;
     const struct e820entry *entry;
-    unsigned int i, nump;
+    unsigned long nump, free_pages, nmap, navail, nr_holes = 0;
+    unsigned int i, order;
+    struct page_info *page;
+    int rc;
 
     for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
     {
@@ -353,6 +356,9 @@ static __init void pvh_map_all_iomem(struct domain *d)
                 nump = end_pfn - start_pfn;
                 /* Add pages to the mapping */
                 pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
+                if ( start_pfn <= nr_pages )
+                    nr_holes += (end_pfn < nr_pages) ?
+                                    nump : (nr_pages - start_pfn);
             }
             start = end;
         }
@@ -369,6 +375,107 @@ static __init void pvh_map_all_iomem(struct domain *d)
         nump = end_pfn - start_pfn;
         pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
     }
+
+    /*
+     * Add the memory removed by the holes at the end of the
+     * memory map.
+     */
+    for ( i = 0, entry = e820.map; i < e820.nr_map && nr_holes > 0;
+          i++, entry++ )
+    {
+        if ( entry->type != E820_RAM )
+            continue;
+
+        end_pfn = PFN_UP(entry->addr + entry->size);
+        if ( end_pfn <= nr_pages )
+            continue;
+
+        navail = end_pfn - nr_pages;
+        nmap = navail > nr_holes ? nr_holes : navail;
+        nr_holes -= nmap;
+        start_pfn = PFN_DOWN(entry->addr) < nr_pages ?
+                        nr_pages : PFN_DOWN(entry->addr);
+        order = get_order_from_pages(nmap);
+        page = alloc_domheap_pages(d, order, 0);
+        if ( !page )
+            panic("Not enough RAM for domain 0");
+        free_pages = 1UL << order;
+        while ( nmap )
+        {
+            order = get_order_from_pages(nmap);
+            /*
+             * get_order_from_pages ceils the allocation,
+             * but we don't want to add more memory than the
+             * requested amount, so always use at least one
+             * order less than the returned.
+             */
+            order = order > 9 ? 9 : (order > 0 ? order - 1 : 0);
+            rc = guest_physmap_add_page(d, start_pfn, page_to_mfn(page), 
order);
+            if ( rc != 0 )
+                panic("Unable to add gpfn %#lx mfn %#lx order: %u to Dom0 
physmap",
+                      start_pfn, page_to_mfn(page), order);
+            nmap -= 1UL << order;
+            free_pages -= 1UL << order;
+            start_pfn += 1UL << order;
+            page += 1UL << order;
+        }
+        for ( ; free_pages; free_pages-- )
+            free_domheap_pages(page++, 0);
+    }
+
+    ASSERT(nr_holes == 0);
+}
+
+static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
+{
+    struct e820entry *entry, *entry_guest;
+    unsigned int i;
+    unsigned long pages, cur_pages = 0;
+
+    /*
+     * Craft the e820 memory map for Dom0 based on the hardware e820 map.
+     */
+    d->arch.e820 = xzalloc_array(struct e820entry, e820.nr_map);
+    if ( !d->arch.e820 )
+        panic("Unable to allocate memory for Dom0 e820 map");
+    entry_guest = d->arch.e820;
+
+    /* Clamp e820 memory map to match the memory assigned to Dom0 */
+    for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
+    {
+        if ( entry->type != E820_RAM )
+        {
+            memcpy(entry_guest++, entry, sizeof(*entry));
+            d->arch.nr_e820++;
+            continue;
+        }
+
+        if ( nr_pages == cur_pages )
+        {
+            /*
+             * We already have all the assigned memory,
+             * skip this entry
+             */
+            continue;
+        }
+
+        memcpy(entry_guest, entry, sizeof(*entry));
+        pages = entry_guest->size >> PAGE_SHIFT;
+        if ( (cur_pages + pages) > nr_pages )
+        {
+            /* Truncate region */
+            entry_guest->size = (nr_pages - cur_pages) << PAGE_SHIFT;
+            cur_pages = nr_pages;
+        }
+        else
+        {
+            cur_pages += pages;
+        }
+        d->arch.nr_e820++;
+        entry_guest++;
+    }
+    ASSERT(cur_pages == nr_pages);
+    ASSERT(d->arch.nr_e820 <= e820.nr_map);
 }
 
 static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
@@ -1391,7 +1498,8 @@ int __init construct_dom0(
         pfn = shared_info_paddr >> PAGE_SHIFT;
         dom0_update_physmap(d, pfn, mfn, 0);
 
-        pvh_map_all_iomem(d);
+        pvh_map_all_iomem(d, nr_pages);
+        pvh_setup_e820(d, nr_pages);
     }
 
     if ( d->domain_id == hardware_domid )
-- 
1.7.7.5 (Apple Git-26)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.