[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 06/12] xen/x86: populate PVHv2 Dom0 physical memory map



Craft the Dom0 e820 memory map and populate it.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/domain_build.c | 199 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 193 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index c0ef40f..cb8ecbd 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -43,6 +43,11 @@ static long __initdata dom0_nrpages;
 static long __initdata dom0_min_nrpages;
 static long __initdata dom0_max_nrpages = LONG_MAX;
 
+/* GFN of the identity map for EPT. */
+#define HVM_IDENT_PT_GFN  0xfeffeu
+
+static unsigned int __initdata hvm_mem_stats[MAX_ORDER + 1];
+
 /*
  * dom0_mem=[min:<min_amt>,][max:<max_amt>,][<amt>]
  * 
@@ -304,7 +309,8 @@ static unsigned long __init compute_dom0_nr_pages(
             avail -= max_pdx >> s;
     }
 
-    need_paging = opt_dom0_shadow || (is_pvh_domain(d) && !iommu_hap_pt_share);
+    need_paging = opt_dom0_shadow || (has_hvm_container_domain(d) &&
+                  (!iommu_hap_pt_share || !paging_mode_hap(d)));
     for ( ; ; need_paging = 0 )
     {
         nr_pages = dom0_nrpages;
@@ -336,7 +342,8 @@ static unsigned long __init compute_dom0_nr_pages(
         avail -= dom0_paging_pages(d, nr_pages);
     }
 
-    if ( (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
+    if ( is_pv_domain(d) &&
+         (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
          ((dom0_min_nrpages <= 0) || (nr_pages > min_pages)) )
     {
         /*
@@ -547,11 +554,12 @@ static __init void pvh_map_all_iomem(struct domain *d, 
unsigned long nr_pages)
     ASSERT(nr_holes == 0);
 }
 
-static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
+static __init void hvm_setup_e820(struct domain *d, unsigned long nr_pages)
 {
     struct e820entry *entry, *entry_guest;
     unsigned int i;
     unsigned long pages, cur_pages = 0;
+    uint64_t start, end;
 
     /*
      * Craft the e820 memory map for Dom0 based on the hardware e820 map.
@@ -579,8 +587,19 @@ static __init void pvh_setup_e820(struct domain *d, 
unsigned long nr_pages)
             continue;
         }
 
-        *entry_guest = *entry;
-        pages = PFN_UP(entry_guest->size);
+        /*
+         * Make sure the start and length are aligned to PAGE_SIZE, because
+         * that's the minimum granularity of the 2nd stage translation.
+         */
+        start = ROUNDUP(entry->addr, PAGE_SIZE);
+        end = (entry->addr + entry->size) & PAGE_MASK;
+        if ( start >= end )
+            continue;
+
+        entry_guest->type = E820_RAM;
+        entry_guest->addr = start;
+        entry_guest->size = end - start;
+        pages = PFN_DOWN(entry_guest->size);
         if ( (cur_pages + pages) > nr_pages )
         {
             /* Truncate region */
@@ -591,6 +610,8 @@ static __init void pvh_setup_e820(struct domain *d, 
unsigned long nr_pages)
         {
             cur_pages += pages;
         }
+        ASSERT((entry_guest->addr & ~PAGE_MASK) == 0 &&
+               (entry_guest->size & ~PAGE_MASK) == 0);
  next:
         d->arch.nr_e820++;
         entry_guest++;
@@ -1631,7 +1652,7 @@ static int __init construct_dom0_pv(
         dom0_update_physmap(d, pfn, mfn, 0);
 
         pvh_map_all_iomem(d, nr_pages);
-        pvh_setup_e820(d, nr_pages);
+        hvm_setup_e820(d, nr_pages);
     }
 
     if ( d->domain_id == hardware_domid )
@@ -1647,15 +1668,181 @@ out:
     return rc;
 }
 
+/* Helper to convert from bytes into human-readable form. */
+static void __init pretty_print_bytes(uint64_t size)
+{
+    const char* units[] = {"B", "KB", "MB", "GB", "TB"};
+    int i = 0;
+
+    while ( ++i < sizeof(units) && size >= 1024 )
+        size >>= 10; /* size /= 1024 */
+
+    printk("%4" PRIu64 "%2s", size, units[i-1]);
+}
+
+/* Calculate the biggest usable order given a size in bytes. */
+static inline unsigned int get_order(uint64_t size)
+{
+    unsigned int order;
+    uint64_t pg;
+
+    ASSERT((size & ~PAGE_MASK) == 0);
+    pg = PFN_DOWN(size);
+    for ( order = 0; pg >= (1 << (order + 1)); order++ );
+
+    return order;
+}
+
+/* Populate an HVM memory range using the biggest possible order. */
+static void __init hvm_populate_memory_range(struct domain *d, uint64_t start,
+                                             uint64_t size)
+{
+    static unsigned int __initdata memflags = MEMF_no_dma|MEMF_exact_node;
+    unsigned int order;
+    struct page_info *page;
+    int rc;
+
+    ASSERT((size & ~PAGE_MASK) == 0 && (start & ~PAGE_MASK) == 0);
+
+    order = MAX_ORDER;
+    while ( size != 0 )
+    {
+        order = min(get_order(size), order);
+        page = alloc_domheap_pages(d, order, memflags);
+        if ( page == NULL )
+        {
+            if ( order == 0 && memflags )
+            {
+                /* Try again without any memflags. */
+                memflags = 0;
+                order = MAX_ORDER;
+                continue;
+            }
+            if ( order == 0 )
+                panic("Unable to allocate memory with order 0!\n");
+            order--;
+            continue;
+        }
+
+        hvm_mem_stats[order]++;
+        rc = guest_physmap_add_page(d, _gfn(PFN_DOWN(start)),
+                                    _mfn(page_to_mfn(page)), order);
+        if ( rc != 0 )
+            panic("Failed to populate memory: [%" PRIx64 " - %" PRIx64 "] 
%d\n",
+                  start, start + (((uint64_t)1) << (order + PAGE_SHIFT)), rc);
+        start += ((uint64_t)1) << (order + PAGE_SHIFT);
+        size -= ((uint64_t)1) << (order + PAGE_SHIFT);
+        if ( (size & 0xffffffff) == 0 )
+            process_pending_softirqs();
+    }
+
+}
+
+static int __init hvm_setup_p2m(struct domain *d)
+{
+    struct vcpu *v = d->vcpu[0];
+    unsigned long nr_pages;
+    int i;
+
+    printk("** Preparing memory map **\n");
+
+    /*
+     * Subtract one page for the EPT identity page table and two pages
+     * for the MADT replacement.
+     */
+    nr_pages = compute_dom0_nr_pages(d, NULL, 0) - 3;
+
+    hvm_setup_e820(d, nr_pages);
+    paging_set_allocation(d, dom0_paging_pages(d, nr_pages));
+
+    printk("Dom0 memory map:\n");
+    print_e820_memory_map(d->arch.e820, d->arch.nr_e820);
+
+    printk("** Populating memory map **\n");
+    /* Populate memory map. */
+    for ( i = 0; i < d->arch.nr_e820; i++ )
+    {
+        if ( d->arch.e820[i].type != E820_RAM )
+            continue;
+
+        hvm_populate_memory_range(d, d->arch.e820[i].addr,
+                                  d->arch.e820[i].size);
+    }
+
+    printk("Memory allocation stats:\n");
+    for ( i = 0; i <= MAX_ORDER; i++ )
+    {
+        if ( hvm_mem_stats[MAX_ORDER - i] != 0 )
+        {
+            printk("Order %2u: ", MAX_ORDER - i);
+            pretty_print_bytes(((uint64_t)1 << (MAX_ORDER - i + PAGE_SHIFT)) *
+                               hvm_mem_stats[MAX_ORDER - i]);
+            printk("\n");
+        }
+    }
+
+    if ( cpu_has_vmx && paging_mode_hap(d) && !vmx_unrestricted_guest(v) )
+    {
+        struct vcpu *saved_current;
+        struct page_info *page;
+        uint32_t *ident_pt;
+
+        /*
+         * Identity-map page table is required for running with CR0.PG=0
+         * when using Intel EPT. Create a 32-bit non-PAE page directory of
+         * superpages.
+         */
+        page = alloc_domheap_pages(d, 0, 0);
+        if ( unlikely(!page) )
+        {
+            printk("Unable to allocate page for identity map\n");
+            return -ENOMEM;
+        }
+
+        saved_current = current;
+        set_current(v);
+
+        ident_pt = __map_domain_page(page);
+        for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
+            ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
+                           _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
+        unmap_domain_page(ident_pt);
+
+        guest_physmap_add_page(d, _gfn(HVM_IDENT_PT_GFN),
+                               _mfn(page_to_mfn(page)), 0);
+        d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] =
+                                 HVM_IDENT_PT_GFN << PAGE_SHIFT;
+        set_current(saved_current);
+    }
+
+    return 0;
+}
+
 static int __init construct_dom0_hvm(struct domain *d, const module_t *image,
                                      unsigned long image_headroom,
                                      module_t *initrd,
                                      void *(*bootstrap_map)(const module_t *),
                                      char *cmdline)
 {
+    int rc;
 
     printk("** Building a PVH Dom0 **\n");
 
+    /* Sanity! */
+    BUG_ON(d->domain_id != 0);
+    BUG_ON(d->vcpu[0] == NULL);
+
+    process_pending_softirqs();
+
+    iommu_hwdom_init(d);
+
+    rc = hvm_setup_p2m(d);
+    if ( rc )
+    {
+        printk("Failed to setup Dom0 physical memory map\n");
+        return rc;
+    }
+
     return 0;
 }
 
-- 
2.7.4 (Apple Git-66)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.