[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/pvh: copy data from low 1MB to Dom0 physmap instead of mapping it



commit 94b8b2e09290cca0fcd8afd930f7f083458b1afe
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Fri Sep 21 12:23:44 2018 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Sep 21 12:23:44 2018 +0200

    x86/pvh: copy data from low 1MB to Dom0 physmap instead of mapping it
    
    Identity mapping RAM regions on the low 1MB for Dom0 is not ideal,
    since there's data there that could be used by Xen during runtime
    (like the AP trampoline), so instead of identity mapping the low 1MB
    into the Dom0 physmap populate those RAM regions and copy the data.
    
    Note that this allows to remove unshare_xen_page_with_guest since the
    only caller was the PVH Dom0 builder.
    
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/hvm/dom0_build.c | 55 ++++++++++++++-----------------------------
 xen/arch/x86/mm.c             | 16 -------------
 xen/include/xen/mm.h          |  1 -
 3 files changed, 18 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 5724883d8c..3e29cd30b8 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -278,33 +278,6 @@ static int __init pvh_setup_vmx_realmode_helpers(struct 
domain *d)
     return 0;
 }
 
-/* Assign the low 1MB to Dom0. */
-static void __init pvh_steal_low_ram(struct domain *d, unsigned long start,
-                                     unsigned long nr_pages)
-{
-    unsigned long mfn;
-
-    ASSERT(start + nr_pages <= PFN_DOWN(MB(1)));
-
-    for ( mfn = start; mfn < start + nr_pages; mfn++ )
-    {
-        struct page_info *pg = mfn_to_page(_mfn(mfn));
-        int rc;
-
-        rc = unshare_xen_page_with_guest(pg, dom_io);
-        if ( rc )
-        {
-            printk("Unable to unshare Xen mfn %#lx: %d\n", mfn, rc);
-            continue;
-        }
-
-        share_xen_page_with_guest(pg, d, SHARE_rw);
-        rc = guest_physmap_add_entry(d, _gfn(mfn), _mfn(mfn), 0, p2m_ram_rw);
-        if ( rc )
-            printk("Unable to add mfn %#lx to p2m: %d\n", mfn, rc);
-    }
-}
-
 static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
 {
     struct e820entry *entry, *entry_guest;
@@ -399,8 +372,8 @@ static int __init pvh_setup_p2m(struct domain *d)
     } while ( preempted );
 
     /*
-     * Memory below 1MB is identity mapped.
-     * NB: this only makes sense when booted from legacy BIOS.
+     * Memory below 1MB is identity mapped initially. RAM regions are
+     * populated and copied below, replacing the respective mappings.
      */
     rc = modify_identity_mmio(d, 0, MB1_PAGES, true);
     if ( rc )
@@ -420,16 +393,24 @@ static int __init pvh_setup_p2m(struct domain *d)
         addr = PFN_DOWN(d->arch.e820[i].addr);
         size = PFN_DOWN(d->arch.e820[i].size);
 
-        if ( addr >= MB1_PAGES )
-            rc = pvh_populate_memory_range(d, addr, size);
-        else
-        {
-            ASSERT(addr + size < MB1_PAGES);
-            pvh_steal_low_ram(d, addr, size);
-        }
-
+        rc = pvh_populate_memory_range(d, addr, size);
         if ( rc )
             return rc;
+
+        if ( addr < MB1_PAGES )
+        {
+            uint64_t end = min_t(uint64_t, MB(1),
+                                 d->arch.e820[i].addr + d->arch.e820[i].size);
+            enum hvm_translation_result res =
+                 hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)),
+                                        mfn_to_virt(addr),
+                                        d->arch.e820[i].addr - end,
+                                        v);
+
+            if ( res != HVMTRANS_okay )
+                printk("Failed to copy [%#lx, %#lx): %d\n",
+                       addr, addr + size, res);
+        }
     }
 
     if ( cpu_has_vmx && paging_mode_hap(d) && !vmx_unrestricted_guest(v) )
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index db7faa9632..af1440d578 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -511,22 +511,6 @@ void share_xen_page_with_guest(struct page_info *page, 
struct domain *d,
     spin_unlock(&d->page_alloc_lock);
 }
 
-int __init unshare_xen_page_with_guest(struct page_info *page,
-                                       struct domain *d)
-{
-    if ( page_get_owner(page) != d || !is_xen_heap_page(page) )
-        return -EINVAL;
-
-    if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
-        put_page(page);
-
-    /* Remove the owner and clear the flags. */
-    page->u.inuse.type_info = 0;
-    page_set_owner(page, NULL);
-
-    return 0;
-}
-
 void free_shared_domheap_page(struct page_info *page)
 {
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b3d46ab56b..9595539aee 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -645,7 +645,6 @@ enum XENSHARE_flags {
 };
 void share_xen_page_with_guest(struct page_info *page, struct domain *d,
                                enum XENSHARE_flags flags);
-int unshare_xen_page_with_guest(struct page_info *page, struct domain *d);
 
 static inline void share_xen_page_with_privileged_guests(
     struct page_info *page, enum XENSHARE_flags flags)
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.