[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 2/6] tools/libxc: Define VM_EVENT type



Define the type for each of the supported vm_event rings (paging,
monitor and sharing) and replace the ring param field with this type.

Replace XEN_DOMCTL_VM_EVENT_OP_ occurrences with their corresponding
XEN_VM_EVENT_TYPE_ counterpart.

Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
---
 tools/libxc/xc_mem_paging.c |  6 ++--
 tools/libxc/xc_monitor.c    |  6 ++--
 tools/libxc/xc_private.h    |  8 +++---
 tools/libxc/xc_vm_event.c   | 68 ++++++++++++++++++++++-----------------------
 xen/common/vm_event.c       | 12 ++++----
 xen/include/public/domctl.h | 45 ++++++++++++++++--------------
 6 files changed, 73 insertions(+), 72 deletions(-)

diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index 08468fb..37a8224 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -41,7 +41,7 @@ void *xc_mem_paging_enable(xc_interface *xch, uint32_t 
domain_id,
                            uint32_t *port)
 {
     return xc_vm_event_enable(xch, domain_id,
-                              XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                              XEN_VM_EVENT_TYPE_PAGING,
                               port);
 }
 
@@ -49,14 +49,14 @@ int xc_mem_paging_disable(xc_interface *xch, uint32_t 
domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
+                               XEN_VM_EVENT_TYPE_PAGING);
 }
 
 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING);
+                               XEN_VM_EVENT_TYPE_PAGING);
 }
 
 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index d190c29..718fe8b 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -35,7 +35,7 @@ void *xc_monitor_enable(xc_interface *xch, uint32_t 
domain_id, uint32_t *port)
     }
 
     buffer = xc_vm_event_enable(xch, domain_id,
-                                HVM_PARAM_MONITOR_RING_PFN,
+                                XEN_VM_EVENT_TYPE_MONITOR,
                                 port);
     saved_errno = errno;
     if ( xc_domain_unpause(xch, domain_id) )
@@ -53,14 +53,14 @@ int xc_monitor_disable(xc_interface *xch, uint32_t 
domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
+                               XEN_VM_EVENT_TYPE_MONITOR);
 }
 
 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR);
+                               XEN_VM_EVENT_TYPE_MONITOR);
 }
 
 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index 663e78b..482451c 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -412,12 +412,12 @@ int xc_ffs64(uint64_t x);
  * vm_event operations. Internal use only.
  */
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode);
+                        unsigned int type);
 /*
- * Enables vm_event and returns the mapped ring page indicated by param.
- * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
+ * Enables vm_event and returns the mapped ring page indicated by type.
+ * type can be XEN_VM_EVENT_TYPE_(PAGING/MONITOR/SHARING)
  */
-void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
                          uint32_t *port);
 
 int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index d9e3a49..4fc2548 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -23,29 +23,54 @@
 #include "xc_private.h"
 
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode)
+                        unsigned int type)
 {
     DECLARE_DOMCTL;
 
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = domain_id;
     domctl.u.vm_event_op.op = op;
-    domctl.u.vm_event_op.mode = mode;
+    domctl.u.vm_event_op.type = type;
 
     return do_domctl(xch, &domctl);
 }
 
-void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
+static int xc_vm_event_ring_pfn_param(int type, int *param)
+{
+    if ( !param )
+        return -EINVAL;
+
+    switch ( type )
+    {
+    case XEN_VM_EVENT_TYPE_PAGING:
+        *param = HVM_PARAM_PAGING_RING_PFN;
+        break;
+
+    case XEN_VM_EVENT_TYPE_MONITOR:
+        *param = HVM_PARAM_MONITOR_RING_PFN;
+        break;
+
+    case XEN_VM_EVENT_TYPE_SHARING:
+        *param = HVM_PARAM_SHARING_RING_PFN;
+        break;
+
+    default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
                          uint32_t *port)
 {
     void *ring_page = NULL;
     uint64_t pfn;
     xen_pfn_t ring_pfn, mmap_pfn;
-    unsigned int op, mode;
-    int rc;
+    int param, rc;
     DECLARE_DOMCTL;
 
-    if ( !port )
+    if ( !port || xc_vm_event_ring_pfn_param(type, &param) != 0 )
     {
         errno = EINVAL;
         return NULL;
@@ -83,37 +108,10 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
         goto out;
     }
 
-    switch ( param )
-    {
-    case HVM_PARAM_PAGING_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
-        break;
-
-    case HVM_PARAM_MONITOR_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
-        break;
-
-    case HVM_PARAM_SHARING_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
-        break;
-
-    /*
-     * This is for the outside chance that the HVM_PARAM is valid but is 
invalid
-     * as far as vm_event goes.
-     */
-    default:
-        errno = EINVAL;
-        rc = -1;
-        goto out;
-    }
-
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = domain_id;
-    domctl.u.vm_event_op.op = op;
-    domctl.u.vm_event_op.mode = mode;
+    domctl.u.vm_event_op.op = XEN_VM_EVENT_ENABLE;
+    domctl.u.vm_event_op.type = type;
 
     rc = do_domctl(xch, &domctl);
     if ( rc != 0 )
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 26cfa2c..dddc2d4 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -371,7 +371,7 @@ void vm_event_resume(struct domain *d, struct 
vm_event_domain *ved)
     vm_event_response_t rsp;
 
     /*
-     * vm_event_resume() runs in either XEN_DOMCTL_VM_EVENT_OP_*, or
+     * vm_event_resume() runs in either XEN_VM_EVENT_* domctls, or
      * EVTCHN_send context from the introspection consumer. Both contexts
      * are guaranteed not to be the subject of vm_event responses.
      * While we could ASSERT(v != current) for each VCPU in d in the loop
@@ -592,7 +592,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
 {
     int rc;
 
-    rc = xsm_vm_event_control(XSM_PRIV, d, vec->mode, vec->op);
+    rc = xsm_vm_event_control(XSM_PRIV, d, vec->type, vec->op);
     if ( rc )
         return rc;
 
@@ -619,10 +619,10 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
 
     rc = -ENOSYS;
 
-    switch ( vec->mode )
+    switch ( vec->type )
     {
 #ifdef CONFIG_HAS_MEM_PAGING
-    case XEN_DOMCTL_VM_EVENT_OP_PAGING:
+    case XEN_VM_EVENT_TYPE_PAGING:
     {
         rc = -EINVAL;
 
@@ -681,7 +681,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
     break;
 #endif
 
-    case XEN_DOMCTL_VM_EVENT_OP_MONITOR:
+    case XEN_VM_EVENT_TYPE_MONITOR:
     {
         rc = -EINVAL;
 
@@ -722,7 +722,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
     break;
 
 #ifdef CONFIG_HAS_MEM_SHARING
-    case XEN_DOMCTL_VM_EVENT_OP_SHARING:
+    case XEN_VM_EVENT_TYPE_SHARING:
     {
         rc = -EINVAL;
 
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 7e1cf21..26b1a55 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1,8 +1,8 @@
 /******************************************************************************
  * domctl.h
- *
+ * 
  * Domain management operations. For use by node control stack.
- *
+ * 
  * Permission is hereby granted, free of charge, to any person obtaining a copy
  * of this software and associated documentation files (the "Software"), to
  * deal in the Software without restriction, including without limitation the
@@ -769,18 +769,9 @@ struct xen_domctl_gdbsx_domstatus {
  * VM event operations
  */
 
-/* XEN_DOMCTL_vm_event_op */
-
 /*
- * There are currently three rings available for VM events:
- * sharing, monitor and paging. This hypercall allows one to
- * control these rings (enable/disable), as well as to signal
- * to the hypervisor to pull responses (resume) from the given
- * ring.
+ * There are currently three types of rings available for VM events.
  */
-#define XEN_VM_EVENT_ENABLE               0
-#define XEN_VM_EVENT_DISABLE              1
-#define XEN_VM_EVENT_RESUME               2
 
 /*
  * Domain memory paging
@@ -796,7 +787,7 @@ struct xen_domctl_gdbsx_domstatus {
  * EXDEV  - guest has PoD enabled
  * EBUSY  - guest has or had paging enabled, ring buffer still active
  */
-#define XEN_DOMCTL_VM_EVENT_OP_PAGING            1
+#define XEN_VM_EVENT_TYPE_PAGING         1
 
 /*
  * Monitor helper.
@@ -820,7 +811,7 @@ struct xen_domctl_gdbsx_domstatus {
  * EBUSY  - guest has or had access enabled, ring buffer still active
  *
  */
-#define XEN_DOMCTL_VM_EVENT_OP_MONITOR           2
+#define XEN_VM_EVENT_TYPE_MONITOR        2
 
 /*
  * Sharing ENOMEM helper.
@@ -835,15 +826,27 @@ struct xen_domctl_gdbsx_domstatus {
  * Note that shring can be turned on (as per the domctl below)
  * *without* this ring being setup.
  */
-#define XEN_DOMCTL_VM_EVENT_OP_SHARING           3
+#define XEN_VM_EVENT_TYPE_SHARING        3
+
+/*
+ * This hypercall allows one to control the vm_event rings (enable/disable),
+ * as well as to signal to the hypervisor to pull responses (resume) and
+ * retrieve the event channel from the given ring.
+ */
+#define XEN_VM_EVENT_ENABLE               0
+#define XEN_VM_EVENT_DISABLE              1
+#define XEN_VM_EVENT_RESUME               2
 
-/* Use for teardown/setup of helper<->hypervisor interface for paging,
- * access and sharing.*/
+/*
+ * Use for teardown/setup of helper<->hypervisor interface for paging,
+ * access and sharing.
+ */
+/* XEN_DOMCTL_vm_event_op */
 struct xen_domctl_vm_event_op {
-    uint32_t       op;           /* XEN_VM_EVENT_* */
-    uint32_t       mode;         /* XEN_DOMCTL_VM_EVENT_OP_* */
+    uint32_t        op;           /* XEN_VM_EVENT_* */
+    uint32_t        type;         /* XEN_VM_EVENT_TYPE_* */
 
-    uint32_t port;              /* OUT: event channel for ring */
+    uint32_t        port;         /* OUT: event channel for ring */
 };
 
 /*
@@ -997,7 +1000,7 @@ struct xen_domctl_psr_cmt_op {
  * Enable/disable monitoring various VM events.
  * This domctl configures what events will be reported to helper apps
  * via the ring buffer "MONITOR". The ring has to be first enabled
- * with the domctl XEN_DOMCTL_VM_EVENT_OP_MONITOR.
+ * with XEN_VM_EVENT_ENABLE.
  *
  * GET_CAPABILITIES can be used to determine which of these features is
  * available on a given platform.
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.