[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6 of 9] x86/mm: New domctl: add a shared page to the physmap


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
  • Date: Fri, 09 Dec 2011 15:22:33 -0500
  • Cc: andres@xxxxxxxxxxxxxx, keir.xen@xxxxxxxxx, tim@xxxxxxx, JBeulich@xxxxxxxx, adin@xxxxxxxxxxxxxx
  • Delivery-date: Fri, 09 Dec 2011 20:22:38 +0000
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=lagarcavilla.org; h=content-type :mime-version:content-transfer-encoding:subject:message-id :in-reply-to:references:date:from:to:cc; q=dns; s= lagarcavilla.org; b=sAvsmyi8NbBLoIq6Q9KUutk+aF+JG9sMOJq4adeRAS/P jQmz0rHemCB/7+1cJl+2A2e86plTQZXxMD2hctBI5tnk1hjER7QKkSDDcqksy8/P PGHRduZeVBO6LYcYFhCYgwQvzJB2fBwQzOXDX9++BSkGEjksSieZwGEBCqDFjKA=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

 xen/arch/x86/mm/mem_sharing.c |  104 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/domctl.h   |    3 +-
 2 files changed, 106 insertions(+), 1 deletions(-)


This domctl is useful to, for example, populate parts of a domain's
physmap with shared frames, directly.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r bfebf49b3eb6 -r bba44de8394a xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -851,6 +851,74 @@ err_out:
     return ret;
 }
 
+int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, 
shr_handle_t sh,
+                            struct domain *cd, unsigned long cgfn) 
+{
+    struct page_info *spage;
+    int ret = -EINVAL;
+    mfn_t smfn, cmfn;
+    p2m_type_t smfn_type, cmfn_type;
+    struct gfn_info *gfn_info;
+    struct p2m_domain *p2m = p2m_get_hostp2m(cd);
+    p2m_access_t a;
+    DECLARE_PG_LOCK_DATA(pld);
+    
+    /* XXX if sd == cd handle potential deadlock by ordering
+     * the get_ and put_gfn's */
+    smfn = get_gfn_query(sd, sgfn, &smfn_type);
+    cmfn = get_gfn_type_access(p2m, cgfn, &cmfn_type, &a, p2m_query, NULL);
+
+    /* Get the source shared page, check and lock */
+    ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
+    spage = __grab_shared_page(smfn, &pld);
+    if ( spage == NULL )
+        goto err_out;
+    ASSERT(smfn_type == p2m_ram_shared);
+
+    /* Check that the handles match */
+    if ( spage->shared_info->handle != sh )
+        goto err_unlock;
+
+    /* Make sure the target page is a hole in the physmap */
+    if ( mfn_valid(cmfn) ||
+         (!(p2m_is_ram(cmfn_type))) )
+    {
+        ret = XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID;
+        goto err_unlock;
+    }
+
+    /* This is simpler than regular sharing */
+    BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
+    if ( (gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) == NULL )
+    {
+        put_page_and_type(spage);
+        ret = -ENOMEM;
+        goto err_unlock;
+    }
+
+    p2m_lock(p2m);
+    ret = set_p2m_entry(p2m, cgfn, smfn, PAGE_ORDER_4K, p2m_ram_shared, a);
+    p2m_unlock(p2m);
+
+    /* Tempted to turn this into an assert */
+    if ( !ret ) {
+        ret = -ENOENT;
+        mem_sharing_gfn_destroy(gfn_info, 0);
+        put_page_and_type(spage);
+    } else {
+        atomic_inc(&cd->shr_pages);
+        atomic_inc(&nr_shared_mfns);
+        ret = 0;
+    }
+
+err_unlock:
+    mem_sharing_page_unlock(spage, &pld);
+err_out:
+    put_gfn(cd, cgfn);
+    put_gfn(sd, sgfn);
+    return ret;
+}
+
 int mem_sharing_unshare_page(struct domain *d,
                              unsigned long gfn, 
                              uint16_t flags)
@@ -1085,6 +1153,42 @@ int mem_sharing_domctl(struct domain *d,
         }
         break;
 
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ADD_PHYSMAP:
+        {
+            unsigned long sgfn, cgfn;
+            struct domain *cd;
+            shr_handle_t sh;
+
+            if ( !mem_sharing_enabled(d) )
+                return -EINVAL;
+
+            cd = get_domain_by_id(mec->u.share.client_domain);
+            if ( !cd )
+                return -ESRCH;
+
+            if ( !mem_sharing_enabled(cd) )
+            {
+                put_domain(cd);
+                return -EINVAL;
+            }
+
+            if ( XEN_DOMCTL_MEM_SHARING_FIELD_IS_GREF(mec->u.share.source_gfn) 
)
+            {
+                /* Cannot add a gref to the physmap */
+                put_domain(cd);
+                return -EINVAL;
+            }
+
+            sgfn    = mec->u.share.source_gfn;
+            sh      = mec->u.share.source_handle;
+            cgfn    = mec->u.share.client_gfn;
+
+            rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); 
+
+            put_domain(cd);
+        }
+        break;
+
         case XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME:
         {
             if ( !mem_sharing_enabled(d) )
diff -r bfebf49b3eb6 -r bba44de8394a xen/include/public/domctl.h
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -771,6 +771,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_e
 #define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN      5
 #define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN      6
 #define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF     7
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ADD_PHYSMAP    8
 
 #define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID  (-10)
 #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID  (-9)
@@ -797,7 +798,7 @@ struct xen_domctl_mem_sharing_op {
             } u;
             uint64_aligned_t  handle;     /* OUT: the handle           */
         } nominate;
-        struct mem_sharing_op_share {     /* OP_SHARE */
+        struct mem_sharing_op_share {     /* OP_SHARE/ADD_PHYSMAP */
             uint64_aligned_t source_gfn;    /* IN: the gfn of the source page 
*/
             uint64_aligned_t source_handle; /* IN: handle to the source page */
             uint64_aligned_t client_domain; /* IN: the client domain id */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.