[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] After preparing a page for page-in, allow immediate fill-in of the page contents



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1322763264 0
# Node ID 8529bca7a3f00cf58e2e92ab152407a7f90279d4
# Parent  6bac468165044ccabe234b8710ae5b3b86e106a4
After preparing a page for page-in, allow immediate fill-in of the page contents

p2m_mem_paging_prep ensures that an mfn is backing the paged-out gfn, and
transitions to the next state in the paging state machine for that page.
Foreign mappings of the gfn will now succeed. This is the key idea, as
it allows the pager to now map the gfn and fill in its contents.

Unfortunately, it also allows any other foreign mapper to map the gfn and read
its contents. This is particularly dangerous when the populate is launched
by a foreign mapper in the first place, which will be actively retrying the
map operation and might race with the pager. Qemu-dm being a prime example.

Fix the race by allowing a buffer to be optionally passed in the prep
operation, and having the hypervisor memcpy from that buffer into the newly
prepped page before promoting the gfn type.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 6bac46816504 -r 8529bca7a3f0 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Thu Dec 01 17:58:17 2011 +0000
+++ b/xen/arch/x86/mm/mem_event.c       Thu Dec 01 18:14:24 2011 +0000
@@ -45,7 +45,7 @@
     struct domain *dom_mem_event = current->domain;
     struct vcpu *v = current;
     unsigned long ring_addr = mec->ring_addr;
-    unsigned long shared_addr = mec->shared_addr;
+    unsigned long shared_addr = mec->u.shared_addr;
     l1_pgentry_t l1e;
     unsigned long shared_gfn = 0, ring_gfn = 0; /* gcc ... */
     p2m_type_t p2mt;
diff -r 6bac46816504 -r 8529bca7a3f0 xen/arch/x86/mm/mem_paging.c
--- a/xen/arch/x86/mm/mem_paging.c      Thu Dec 01 17:58:17 2011 +0000
+++ b/xen/arch/x86/mm/mem_paging.c      Thu Dec 01 18:14:24 2011 +0000
@@ -47,7 +47,7 @@
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP:
     {
         unsigned long gfn = mec->gfn;
-        return p2m_mem_paging_prep(d, gfn);
+        return p2m_mem_paging_prep(d, gfn, mec->u.buffer);
     }
     break;
 
diff -r 6bac46816504 -r 8529bca7a3f0 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Dec 01 17:58:17 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Thu Dec 01 18:14:24 2011 +0000
@@ -974,14 +974,21 @@
  * mfn if populate was called for  gfn which was nominated but not evicted. In
  * this case only the p2mt needs to be forwarded.
  */
-int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer)
 {
     struct page_info *page;
     p2m_type_t p2mt;
     p2m_access_t a;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    int ret;
+    int ret, page_extant = 1;
+    const void *user_ptr = (const void *) buffer;
+
+    if ( user_ptr )
+        /* Sanity check the buffer and bail out early if trouble */
+        if ( (buffer & (PAGE_SIZE - 1)) || 
+             (!access_ok(user_ptr, PAGE_SIZE)) )
+            return -EINVAL;
 
     p2m_lock(p2m);
 
@@ -1001,6 +1008,27 @@
         if ( unlikely(page == NULL) )
             goto out;
         mfn = page_to_mfn(page);
+        page_extant = 0;
+    }
+
+    /* If we were given a buffer, now is the time to use it */
+    if ( !page_extant && user_ptr )
+    {
+        void *guest_map;
+        int rc;
+
+        ASSERT( mfn_valid(mfn) );
+        guest_map = map_domain_page(mfn_x(mfn));
+        rc = copy_from_user(guest_map, user_ptr, PAGE_SIZE);
+        unmap_domain_page(guest_map);
+        if ( rc )
+        {
+            gdprintk(XENLOG_ERR, "Failed to load paging-in gfn %lx domain %u "
+                                 "bytes left %d\n", gfn, d->domain_id, rc);
+            ret = -EFAULT;
+            put_page(page); /* Don't leak pages */
+            goto out;            
+        }
     }
 
     /* Fix p2m mapping */
diff -r 6bac46816504 -r 8529bca7a3f0 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Dec 01 17:58:17 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Thu Dec 01 18:14:24 2011 +0000
@@ -479,7 +479,7 @@
 /* Start populating a paged out frame */
 void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
 /* Prepare the p2m for paging a frame in */
-int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer);
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct domain *d);
 #else
diff -r 6bac46816504 -r 8529bca7a3f0 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Thu Dec 01 17:58:17 2011 +0000
+++ b/xen/include/public/domctl.h       Thu Dec 01 18:14:24 2011 +0000
@@ -742,8 +742,12 @@
     uint32_t       op;           /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
     uint32_t       mode;         /* XEN_DOMCTL_MEM_EVENT_OP_* */
 
-    /* OP_ENABLE */
-    uint64_aligned_t shared_addr;  /* IN:  Virtual address of shared page */
+    union {
+        /* OP_ENABLE IN:  Virtual address of shared page */
+        uint64_aligned_t shared_addr;  
+        /* PAGING_PREP IN: buffer to immediately fill page in */
+        uint64_aligned_t buffer;
+    } u;
     uint64_aligned_t ring_addr;    /* IN:  Virtual address of ring page */
 
     /* Other OPs */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.