[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/9] tools/libxc: Consistent usage of xc_vm_event_* interface



Modified xc_mem_paging_enable to use directly xc_vm_event_enable and
moved the ring_page handling from client to libxc (xenpaging).

Restricted vm_event_control usage only to simplest domctls which do
not expect any return values and change xc_vm_event_enable to call do_domctl
directly.

Removed xc_memshr_ring_enable/disable and xc_memshr_domain_resume.

Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h | 49 +--------------------------------
 tools/libxc/xc_mem_paging.c   | 23 +++++-----------
 tools/libxc/xc_memshr.c       | 34 -----------------------
 tools/libxc/xc_monitor.c      | 31 +++++++++++++++++----
 tools/libxc/xc_private.h      |  2 +-
 tools/libxc/xc_vm_event.c     | 64 ++++++++++++++++---------------------------
 tools/xenpaging/xenpaging.c   | 42 +++-------------------------
 7 files changed, 62 insertions(+), 183 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 538007a..28fdbc0 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1949,7 +1949,7 @@ int xc_altp2m_change_gfn(xc_interface *handle, uint32_t 
domid,
  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
  * support is considered experimental.
  */
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t 
*port);
+void *xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t 
*port);
 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
@@ -2082,53 +2082,6 @@ int xc_memshr_control(xc_interface *xch,
                       uint32_t domid,
                       int enable);
 
-/* Create a communication ring in which the hypervisor will place ENOMEM
- * notifications.
- *
- * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
- * allocated, and thus the out-of-memory error occurr.
- *
- * For complete examples on how to plumb a notification ring, look into
- * xenpaging or xen-access.
- *
- * On receipt of a notification, the helper should ensure there is memory
- * available to the domain before retrying.
- *
- * If a domain encounters an ENOMEM condition when sharing and this ring
- * has not been set up, the hypervisor will crash the domain.
- *
- * Fails with:
- *  EINVAL if port is NULL
- *  EINVAL if the sharing ring has already been enabled
- *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
- *  EINVAL if the gfn for the ring has not been populated
- *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
- *  EINVAL if the gfn for the ring cannot be written to
- *  EINVAL if the domain is dying
- *  ENOSPC if an event channel cannot be allocated for the ring
- *  ENOMEM if memory cannot be allocated for internal data structures
- *  EINVAL or EACCESS if the request is denied by the security policy
- */
-
-int xc_memshr_ring_enable(xc_interface *xch, 
-                          uint32_t domid,
-                          uint32_t *port);
-/* Disable the ring for ENOMEM communication.
- * May fail with EINVAL if the ring was not enabled in the first place.
- */
-int xc_memshr_ring_disable(xc_interface *xch, 
-                           uint32_t domid);
-
-/*
- * Calls below return EINVAL if sharing has not been enabled for the domain
- * Calls below return EINVAL if the domain is dying
- */
-/* Once a reponse to an ENOMEM notification is prepared, the tool can
- * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
- * event channel kick and/or this call. */
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid);
-
 /* Select a page for sharing. 
  *
  * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index a067706..08468fb 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -37,35 +37,26 @@ static int xc_mem_paging_memop(xc_interface *xch, uint32_t 
domain_id,
     return do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
 }
 
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
-                         uint32_t *port)
+void *xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
+                           uint32_t *port)
 {
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               port);
+    return xc_vm_event_enable(xch, domain_id,
+                              XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                              port);
 }
 
 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
 }
 
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
 }
 
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c
index d5e135e..06f613a 100644
--- a/tools/libxc/xc_memshr.c
+++ b/tools/libxc/xc_memshr.c
@@ -41,31 +41,6 @@ int xc_memshr_control(xc_interface *xch,
     return do_domctl(xch, &domctl);
 }
 
-int xc_memshr_ring_enable(xc_interface *xch, 
-                          uint32_t domid,
-                          uint32_t *port)
-{
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               port);
-}
-
-int xc_memshr_ring_disable(xc_interface *xch, 
-                           uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
 static int xc_memshr_memop(xc_interface *xch, uint32_t domid,
                             xen_mem_sharing_op_t *mso)
 {
@@ -200,15 +175,6 @@ int xc_memshr_range_share(xc_interface *xch,
     return xc_memshr_memop(xch, source_domain, &mso);
 }
 
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
 int xc_memshr_debug_gfn(xc_interface *xch,
                         uint32_t domid,
                         unsigned long gfn)
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index 4ac823e..d190c29 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -24,24 +24,43 @@
 
 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
 {
-    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
-                              port);
+    void *buffer;
+    int saved_errno;
+
+    /* Pause the domain for ring page setup */
+    if ( xc_domain_pause(xch, domain_id) )
+    {
+        PERROR("Unable to pause domain\n");
+        return NULL;
+    }
+
+    buffer = xc_vm_event_enable(xch, domain_id,
+                                HVM_PARAM_MONITOR_RING_PFN,
+                                port);
+    saved_errno = errno;
+    if ( xc_domain_unpause(xch, domain_id) )
+    {
+        if ( buffer )
+            saved_errno = errno;
+        PERROR("Unable to unpause domain");
+    }
+
+    errno = saved_errno;
+    return buffer;
 }
 
 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
 }
 
 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
 }
 
 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index adc3b6a..663e78b 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -412,7 +412,7 @@ int xc_ffs64(uint64_t x);
  * vm_event operations. Internal use only.
  */
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port);
+                        unsigned int mode);
 /*
  * Enables vm_event and returns the mapped ring page indicated by param.
  * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index a97c615..ea10366 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -23,20 +23,16 @@
 #include "xc_private.h"
 
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port)
+                        unsigned int mode)
 {
     DECLARE_DOMCTL;
-    int rc;
 
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = domain_id;
     domctl.u.vm_event_op.op = op;
     domctl.u.vm_event_op.mode = mode;
 
-    rc = do_domctl(xch, &domctl);
-    if ( !rc && port )
-        *port = domctl.u.vm_event_op.u.enable.port;
-    return rc;
+    return do_domctl(xch, &domctl);
 }
 
 void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
@@ -46,7 +42,8 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
     uint64_t pfn;
     xen_pfn_t ring_pfn, mmap_pfn;
     unsigned int op, mode;
-    int rc1, rc2, saved_errno;
+    int rc;
+    DECLARE_DOMCTL;
 
     if ( !port )
     {
@@ -54,17 +51,9 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
         return NULL;
     }
 
-    /* Pause the domain for ring page setup */
-    rc1 = xc_domain_pause(xch, domain_id);
-    if ( rc1 != 0 )
-    {
-        PERROR("Unable to pause domain\n");
-        return NULL;
-    }
-
     /* Get the pfn of the ring page */
-    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
-    if ( rc1 != 0 )
+    rc = xc_hvm_param_get(xch, domain_id, param, &pfn);
+    if ( rc != 0 )
     {
         PERROR("Failed to get pfn of ring page\n");
         goto out;
@@ -72,13 +61,13 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
 
     ring_pfn = pfn;
     mmap_pfn = pfn;
-    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
-    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+    rc = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
+    if ( rc || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
     {
         /* Page not in the physmap, try to populate it */
-        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
+        rc = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
                                               &ring_pfn);
-        if ( rc1 != 0 )
+        if ( rc != 0 )
         {
             PERROR("Failed to populate ring pfn\n");
             goto out;
@@ -87,7 +76,7 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
 
     mmap_pfn = ring_pfn;
     ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
-                                         &mmap_pfn, 1);
+                                     &mmap_pfn, 1);
     if ( !ring_page )
     {
         PERROR("Could not map the ring page\n");
@@ -117,40 +106,35 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
      */
     default:
         errno = EINVAL;
-        rc1 = -1;
+        rc = -1;
         goto out;
     }
 
-    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
-    if ( rc1 != 0 )
+    domctl.cmd = XEN_DOMCTL_vm_event_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_op.op = op;
+    domctl.u.vm_event_op.mode = mode;
+
+    rc = do_domctl(xch, &domctl);
+    if ( rc != 0 )
     {
         PERROR("Failed to enable vm_event\n");
         goto out;
     }
 
+    *port = domctl.u.vm_event_op.u.enable.port;
+
     /* Remove the ring_pfn from the guest's physmap */
-    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, 
&ring_pfn);
-    if ( rc1 != 0 )
+    rc = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
+    if ( rc != 0 )
         PERROR("Failed to remove ring page from guest physmap");
 
  out:
-    saved_errno = errno;
-
-    rc2 = xc_domain_unpause(xch, domain_id);
-    if ( rc1 != 0 || rc2 != 0 )
+    if ( rc != 0 )
     {
-        if ( rc2 != 0 )
-        {
-            if ( rc1 == 0 )
-                saved_errno = errno;
-            PERROR("Unable to unpause domain");
-        }
-
         if ( ring_page )
             xenforeignmemory_unmap(xch->fmem, ring_page, 1);
         ring_page = NULL;
-
-        errno = saved_errno;
     }
 
     return ring_page;
diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
index d0571ca..b4a3a5c 100644
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -337,40 +337,11 @@ static struct xenpaging *xenpaging_init(int argc, char 
*argv[])
         goto err;
     }
 
-    /* Map the ring page */
-    xc_get_hvm_param(xch, paging->vm_event.domain_id, 
-                        HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
-    mmap_pfn = ring_pfn;
-    paging->vm_event.ring_page = 
-        xc_map_foreign_pages(xch, paging->vm_event.domain_id,
-                             PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-    if ( !paging->vm_event.ring_page )
-    {
-        /* Map failed, populate ring page */
-        rc = xc_domain_populate_physmap_exact(paging->xc_handle, 
-                                              paging->vm_event.domain_id,
-                                              1, 0, 0, &ring_pfn);
-        if ( rc != 0 )
-        {
-            PERROR("Failed to populate ring gfn\n");
-            goto err;
-        }
-
-        paging->vm_event.ring_page = 
-            xc_map_foreign_pages(xch, paging->vm_event.domain_id,
-                                 PROT_READ | PROT_WRITE,
-                                 &mmap_pfn, 1);
-        if ( !paging->vm_event.ring_page )
-        {
-            PERROR("Could not map the ring page\n");
-            goto err;
-        }
-    }
-    
     /* Initialise Xen */
-    rc = xc_mem_paging_enable(xch, paging->vm_event.domain_id,
-                             &paging->vm_event.evtchn_port);
-    if ( rc != 0 )
+    paging->vm_event.ring_page =
+            xc_mem_paging_enable(xch, paging->vm_event.domain_id,
+                                 &paging->vm_event.evtchn_port);
+    if ( paging->vm_event.ring_page == NULL )
     {
         switch ( errno ) {
             case EBUSY:
@@ -418,11 +389,6 @@ static struct xenpaging *xenpaging_init(int argc, char 
*argv[])
                    (vm_event_sring_t *)paging->vm_event.ring_page,
                    PAGE_SIZE);
 
-    /* Now that the ring is set, remove it from the guest's physmap */
-    if ( xc_domain_decrease_reservation_exact(xch, 
-                    paging->vm_event.domain_id, 1, 0, &ring_pfn) )
-        PERROR("Failed to remove ring from guest physmap");
-
     /* Get max_pages from guest if not provided via cmdline */
     if ( !paging->max_pages )
     {
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.