[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/4] vm_event: Add support for multi-page ring buffer



In high throughput introspection scenarios where lots of monitor
vm_events are generated, the ring buffer can fill up before the monitor
application gets a chance to handle all the requests thus blocking
other vcpus which will have to wait for a slot to become available.

This patch adds support for extending the ring buffer by allocating a
number of pages from domheap and mapping them to the monitor
application's domain using the foreignmemory_map_resource interface.
Unlike the current implementation, the ring buffer pages are not part of
the introspected DomU, so they will not be reclaimed when the monitor is
disabled.

Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h       |   2 +
 tools/libxc/xc_monitor.c            |   7 +
 tools/libxc/xc_private.h            |   3 +
 tools/libxc/xc_vm_event.c           |  49 +++++++
 tools/tests/xen-access/xen-access.c |  33 +++--
 xen/arch/x86/domain_page.c          |   3 +-
 xen/arch/x86/mm.c                   |  14 ++
 xen/common/vm_event.c               | 258 +++++++++++++++++++++++++++---------
 xen/include/public/domctl.h         |   1 +
 xen/include/public/memory.h         |   1 +
 xen/include/xen/sched.h             |   5 +-
 xen/include/xen/vm_event.h          |   4 +
 12 files changed, 305 insertions(+), 75 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index bb75bcc..4f91ee9 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2005,6 +2005,8 @@ int xc_get_mem_access(xc_interface *xch, uint32_t 
domain_id,
  * Caller has to unmap this page when done.
  */
 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+void *xc_monitor_enable_ex(xc_interface *xch, uint32_t domain_id,
+                           int order, uint32_t *port);
 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
 /*
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index 15e6a0e..5188835 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -28,6 +28,13 @@ void *xc_monitor_enable(xc_interface *xch, uint32_t 
domain_id, uint32_t *port)
                               port);
 }
 
+void *xc_monitor_enable_ex(xc_interface *xch, uint32_t domain_id, int order,
+                           uint32_t *port)
+{
+    return xc_vm_event_enable_ex(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR,
+                                 order, port);
+}
+
 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index be22986..03d9460 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -436,6 +436,9 @@ int xc_vm_event_control(xc_interface *xch, uint32_t 
domain_id, unsigned int op,
 void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
                          uint32_t *port);
 
+void *xc_vm_event_enable_ex(xc_interface *xch, uint32_t domain_id, int type,
+                            int order, uint32_t *port);
+
 int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
 
 #endif /* __XC_PRIVATE_H__ */
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index de37ca5..216bbe2 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -21,6 +21,7 @@
  */
 
 #include "xc_private.h"
+#include "xenforeignmemory.h"
 
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
                         unsigned int mode, uint32_t *port)
@@ -184,6 +185,54 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int type,
     return ring_page;
 }
 
+void *xc_vm_event_enable_ex(xc_interface *xch, uint32_t domain_id, int type,
+                            int order, uint32_t *port)
+{
+    xenforeignmemory_resource_handle *fres = NULL;
+    int saved_errno;
+    void *ring_buffer = NULL;
+
+    if ( !port )
+    {
+        errno = EINVAL;
+        return NULL;
+    }
+
+    /* Pause the domain for ring page setup */
+    if ( xc_domain_pause(xch, domain_id) )
+    {
+        PERROR("Unable to pause domain\n");
+        return NULL;
+    }
+
+    fres = xenforeignmemory_map_resource(xch->fmem, domain_id,
+                                         XENMEM_resource_vm_event, type, 0,
+                                         order, &ring_buffer,
+                                         PROT_READ | PROT_WRITE, 0);
+    if ( !fres )
+    {
+        PERROR("Unable to map vm_event ring pages resource\n");
+        goto out;
+    }
+
+    if ( xc_vm_event_control(xch, domain_id, XEN_VM_EVENT_GET_PORT, type, 
port) )
+        PERROR("Unable to get vm_event channel port\n");
+
+out:
+    saved_errno = errno;
+    if ( xc_domain_unpause(xch, domain_id) != 0 )
+    {
+        if (fres)
+            saved_errno = errno;
+        PERROR("Unable to unpause domain");
+    }
+
+    free(fres);
+    errno = saved_errno;
+    return ring_buffer;
+}
+
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/tests/xen-access/xen-access.c 
b/tools/tests/xen-access/xen-access.c
index 6aaee16..f4c4eda 100644
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -68,7 +68,8 @@ typedef struct vm_event {
     int port;
     vm_event_back_ring_t back_ring;
     uint32_t evtchn_port;
-    void *ring_page;
+    void *ring_buffer;
+    unsigned int ring_page_count;
 } vm_event_t;
 
 typedef struct xenaccess {
@@ -136,8 +137,9 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t 
*xenaccess)
         return 0;
 
     /* Tear down domain xenaccess in Xen */
-    if ( xenaccess->vm_event.ring_page )
-        munmap(xenaccess->vm_event.ring_page, XC_PAGE_SIZE);
+    if ( xenaccess->vm_event.ring_buffer )
+        munmap(xenaccess->vm_event.ring_buffer,
+               xenaccess->vm_event.ring_page_count * XC_PAGE_SIZE );
 
     if ( mem_access_enable )
     {
@@ -210,12 +212,25 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t 
domain_id)
     /* Set domain id */
     xenaccess->vm_event.domain_id = domain_id;
 
-    /* Enable mem_access */
-    xenaccess->vm_event.ring_page =
+    /* Ring buffer page count */
+    xenaccess->vm_event.ring_page_count = 2;
+
+    xenaccess->vm_event.ring_buffer = xc_monitor_enable_ex(
+        xenaccess->xc_handle,
+        xenaccess->vm_event.domain_id,
+        xenaccess->vm_event.ring_page_count,
+        &xenaccess->vm_event.evtchn_port);
+
+    if (xenaccess->vm_event.ring_buffer == NULL && errno == EOPNOTSUPP)
+    {
+        xenaccess->vm_event.ring_page_count = 1;
+        xenaccess->vm_event.ring_buffer =
             xc_monitor_enable(xenaccess->xc_handle,
                               xenaccess->vm_event.domain_id,
                               &xenaccess->vm_event.evtchn_port);
-    if ( xenaccess->vm_event.ring_page == NULL )
+    }
+
+    if ( xenaccess->vm_event.ring_buffer == NULL )
     {
         switch ( errno ) {
             case EBUSY:
@@ -254,10 +269,10 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t 
domain_id)
     xenaccess->vm_event.port = rc;
 
     /* Initialise ring */
-    SHARED_RING_INIT((vm_event_sring_t *)xenaccess->vm_event.ring_page);
+    SHARED_RING_INIT((vm_event_sring_t *)xenaccess->vm_event.ring_buffer);
     BACK_RING_INIT(&xenaccess->vm_event.back_ring,
-                   (vm_event_sring_t *)xenaccess->vm_event.ring_page,
-                   XC_PAGE_SIZE);
+                   (vm_event_sring_t *)xenaccess->vm_event.ring_buffer,
+                   XC_PAGE_SIZE * xenaccess->vm_event.ring_page_count );
 
     /* Get max_gpfn */
     rc = xc_domain_maximum_gpfn(xenaccess->xc_handle,
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 0d23e52..2a9cbf3 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -331,10 +331,9 @@ void *__map_domain_pages_global(const struct page_info 
*pg, unsigned int nr)
 {
     mfn_t mfn[nr];
     int i;
-    struct page_info *cur_pg = (struct page_info *)&pg[0];
 
     for (i = 0; i < nr; i++)
-        mfn[i] = page_to_mfn(cur_pg++);
+        mfn[i] = page_to_mfn(pg++);
 
     return map_domain_pages_global(mfn, nr);
 }
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index baea2f5..bec09d0 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -103,6 +103,7 @@
 #include <xen/efi.h>
 #include <xen/grant_table.h>
 #include <xen/hypercall.h>
+#include <xen/vm_event.h>
 #include <asm/paging.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
@@ -4415,6 +4416,19 @@ int arch_acquire_resource(struct domain *d, unsigned int 
type,
     }
 #endif
 
+    case XENMEM_resource_vm_event:
+    {
+        rc = vm_event_get_ring_frames(d, id, frame, nr_frames, mfn_list);
+        if ( rc )
+            break;
+        /*
+         * The frames will have been assigned to the domain that created
+         * the ioreq server.
+         */
+        *flags |= XENMEM_rsrc_acq_caller_owned;
+        break;
+    }
+
     default:
         rc = -EOPNOTSUPP;
         break;
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 4793aac..faece3c 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -39,16 +39,66 @@
 #define vm_event_ring_lock(_ved)       spin_lock(&(_ved)->ring_lock)
 #define vm_event_ring_unlock(_ved)     spin_unlock(&(_ved)->ring_lock)
 
+#define XEN_VM_EVENT_ALLOC_FROM_DOMHEAP 0xFFFFFFFF
+
+static int vm_event_alloc_ring(struct domain *d, struct vm_event_domain *ved)
+{
+    struct page_info *page;
+    void *va = NULL;
+    int i, rc = -ENOMEM;
+
+    page = alloc_domheap_pages(d, ved->ring_order, MEMF_no_refcount);
+    if ( !page )
+        return -ENOMEM;
+
+    for ( i = 0; i < (1 << ved->ring_order); i++ )
+        if ( !get_page_type(&page[i], PGT_writable_page) )
+        {
+            rc = -EINVAL;
+            goto fail;
+        }
+
+    va = __map_domain_pages_global(page, (1 << ved->ring_order));
+    if ( !va )
+        goto fail;
+
+    memset(va, 0, PAGE_SIZE << ved->ring_order);
+
+    ved->ring_buffer = va;
+    ved->ring_pg_struct = page;
+    return 0;
+
+fail:
+    i--;
+    for ( ; i >= 0; i-- )
+        put_page_type(&page[i]);
+    free_domheap_pages(page, ved->ring_order);
+
+    return rc;
+ }
+
+static void vm_event_destroy_ring(struct vm_event_domain *ved)
+{
+    if ( ved->ring_buffer )
+    {
+        int i;
+
+        unmap_domain_page_global(ved->ring_buffer);
+        for ( i = 0; i < (1 << ved->ring_order); i++ )
+            put_page_and_type(&(ved->ring_pg_struct[i]));
+        ved->ring_buffer = NULL;
+    }
+}
+
 static int vm_event_enable(
     struct domain *d,
-    struct xen_domctl_vm_event_op *vec,
     struct vm_event_domain **ved,
+    unsigned long param,
+    unsigned int nr_frames,
     int pause_flag,
-    int param,
     xen_event_channel_notification_t notification_fn)
 {
     int rc;
-    unsigned long ring_gfn = d->arch.hvm.params[param];
 
     if ( !*ved )
         *ved = xzalloc(struct vm_event_domain);
@@ -58,26 +108,39 @@ static int vm_event_enable(
     /* Only one helper at a time. If the helper crashed,
      * the ring is in an undefined state and so is the guest.
      */
-    if ( (*ved)->ring_page )
-        return -EBUSY;;
-
-    /* The parameter defaults to zero, and it should be
-     * set to something */
-    if ( ring_gfn == 0 )
-        return -ENOSYS;
+    if ( (*ved)->ring_buffer )
+        return -EBUSY;
 
     vm_event_ring_lock_init(*ved);
     vm_event_ring_lock(*ved);
 
     rc = vm_event_init_domain(d);
-
     if ( rc < 0 )
         goto err;
 
-    rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
-                                 &(*ved)->ring_page);
-    if ( rc < 0 )
-        goto err;
+    (*ved)->ring_order = get_order_from_pages(nr_frames);
+
+    if ( param == XEN_VM_EVENT_ALLOC_FROM_DOMHEAP )
+    {
+        rc = vm_event_alloc_ring(current->domain, *ved);
+        if ( rc < 0 )
+            goto err;
+    }
+    else
+    {
+        /* param points to a specific gfn */
+        unsigned long ring_gfn = d->arch.hvm.params[param];
+
+        /* The parameter defaults to zero, and it should be
+            * set to something */
+        if ( ring_gfn == 0 )
+            return -ENOSYS;
+
+        rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
+                                    &(*ved)->ring_buffer);
+        if ( rc < 0 )
+            goto err;
+    }
 
     /* Set the number of currently blocked vCPUs to 0. */
     (*ved)->blocked = 0;
@@ -88,12 +151,12 @@ static int vm_event_enable(
     if ( rc < 0 )
         goto err;
 
-    (*ved)->xen_port = vec->port = rc;
+    (*ved)->xen_port =  rc;
 
     /* Prepare ring buffer */
     FRONT_RING_INIT(&(*ved)->front_ring,
-                    (vm_event_sring_t *)(*ved)->ring_page,
-                    PAGE_SIZE);
+                    (vm_event_sring_t *)(*ved)->ring_buffer,
+                    PAGE_SIZE * nr_frames);
 
     /* Save the pause flag for this particular ring. */
     (*ved)->pause_flag = pause_flag;
@@ -105,8 +168,8 @@ static int vm_event_enable(
     return 0;
 
  err:
-    destroy_ring_for_helper(&(*ved)->ring_page,
-                            (*ved)->ring_pg_struct);
+    vm_event_destroy_ring(*ved);
+    vm_event_cleanup_domain(d);
     vm_event_ring_unlock(*ved);
     xfree(*ved);
     *ved = NULL;
@@ -221,9 +284,7 @@ static int vm_event_disable(struct domain *d, struct 
vm_event_domain **ved)
             }
         }
 
-        destroy_ring_for_helper(&(*ved)->ring_page,
-                                (*ved)->ring_pg_struct);
-
+        vm_event_destroy_ring(*ved);
         vm_event_cleanup_domain(d);
 
         vm_event_ring_unlock(*ved);
@@ -459,7 +520,7 @@ static int vm_event_grab_slot(struct vm_event_domain *ved, 
int foreign)
 {
     unsigned int avail_req;
 
-    if ( !ved->ring_page )
+    if ( !ved->ring_buffer )
         return -ENOSYS;
 
     vm_event_ring_lock(ved);
@@ -498,7 +559,7 @@ static int vm_event_wait_slot(struct vm_event_domain *ved)
 
 bool_t vm_event_check_ring(struct vm_event_domain *ved)
 {
-    return (ved && ved->ring_page);
+    return (ved && ved->ring_buffer);
 }
 
 /*
@@ -587,6 +648,46 @@ void vm_event_cleanup(struct domain *d)
 #endif
 }
 
+#ifdef CONFIG_HAS_MEM_PAGING
+static int vm_event_op_paging_is_supported(struct domain *d)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+    /* hvm fixme: p2m_is_foreign types need addressing */
+    if ( is_hvm_domain(hardware_domain) )
+        return -EOPNOTSUPP;
+
+    /* Only HAP is supported */
+    if ( !hap_enabled(d) )
+        return -ENODEV;
+
+    /* No paging if iommu is used */
+    if ( unlikely(need_iommu(d)) )
+        return -EMLINK;
+
+    /* Disallow paging in a PoD guest */
+    if ( p2m->pod.entry_count )
+        return -EXDEV;
+
+    return 0;
+}
+#endif /* CONFIG_HAS_MEM_PAGING */
+
+#ifdef CONFIG_HAS_MEM_SHARING
+static int vm_event_op_sharing_is_supported(struct domain *d)
+{
+    /* hvm fixme: p2m_is_foreign types need addressing */
+    if ( is_hvm_domain(hardware_domain) )
+        return -EOPNOTSUPP;
+
+    /* Only HAP is supported */
+    if ( !hap_enabled(d) )
+        return -ENODEV;
+
+    return 0;
+}
+#endif /* CONFIG_HAS_MEM_SHARING */
+
 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
                     XEN_GUEST_HANDLE_PARAM(void) u_domctl)
 {
@@ -629,35 +730,19 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
         switch( vec->op )
         {
         case XEN_VM_EVENT_ENABLE:
-        {
-            struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
-            rc = -EOPNOTSUPP;
-            /* hvm fixme: p2m_is_foreign types need addressing */
-            if ( is_hvm_domain(hardware_domain) )
-                break;
-
-            rc = -ENODEV;
-            /* Only HAP is supported */
-            if ( !hap_enabled(d) )
-                break;
-
-            /* No paging if iommu is used */
-            rc = -EMLINK;
-            if ( unlikely(need_iommu(d)) )
-                break;
-
-            rc = -EXDEV;
-            /* Disallow paging in a PoD guest */
-            if ( p2m->pod.entry_count )
+            rc = vm_event_op_paging_is_supported(d);
+            if ( rc )
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
-                                 HVM_PARAM_PAGING_RING_PFN,
+            rc = vm_event_enable(d, &d->vm_event_paging,
+                                 HVM_PARAM_PAGING_RING_PFN, 1,
+                                 _VPF_mem_paging,
                                  mem_paging_notification);
-        }
-        break;
+            if ( !rc )
+                vec->port = d->vm_event_paging->xen_port;
+
+            break;
 
         case XEN_VM_EVENT_DISABLE:
             if ( vm_event_check_ring(d->vm_event_paging) )
@@ -694,9 +779,14 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
             rc = arch_monitor_init_domain(d);
             if ( rc )
                 break;
-            rc = vm_event_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
-                                 HVM_PARAM_MONITOR_RING_PFN,
+
+            rc = vm_event_enable(d, &d->vm_event_monitor,
+                                 HVM_PARAM_MONITOR_RING_PFN, 1,
+                                  _VPF_mem_access,
                                  monitor_notification);
+            if ( !rc )
+                vec->port = d->vm_event_monitor->xen_port;
+
             break;
 
         case XEN_VM_EVENT_DISABLE:
@@ -716,6 +806,15 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
                 rc = -ENODEV;
             break;
 
+        case XEN_VM_EVENT_GET_PORT:
+            rc = -ENODEV;
+            if ( vm_event_check_ring(d->vm_event_monitor) )
+            {
+                vec->port = d->vm_event_monitor->xen_port;
+                rc = 0;
+            }
+            break;
+
         default:
             rc = -ENOSYS;
             break;
@@ -731,20 +830,18 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
         switch( vec->op )
         {
         case XEN_VM_EVENT_ENABLE:
-            rc = -EOPNOTSUPP;
-            /* hvm fixme: p2m_is_foreign types need addressing */
-            if ( is_hvm_domain(hardware_domain) )
-                break;
-
-            rc = -ENODEV;
-            /* Only HAP is supported */
-            if ( !hap_enabled(d) )
+            rc = vm_event_op_sharing_is_supported(d);
+            if ( rc )
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
-            rc = vm_event_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
-                                 HVM_PARAM_SHARING_RING_PFN,
+            rc = vm_event_enable(d, &d->vm_event_share,
+                                 HVM_PARAM_SHARING_RING_PFN, 1,
+                                 _VPF_mem_sharing,
                                  mem_sharing_notification);
+            if ( !rc )
+                vec->port = d->vm_event_share->xen_port;
+
             break;
 
         case XEN_VM_EVENT_DISABLE:
@@ -778,6 +875,43 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
     return rc;
 }
 
+int vm_event_get_ring_frames(struct domain *d, unsigned int id,
+                             unsigned long frame, unsigned int nr_frames,
+                             xen_pfn_t mfn_list[])
+{
+    int rc, i;
+    int pause_flag;
+    struct vm_event_domain **ved;
+    xen_event_channel_notification_t notification_fn;
+
+    switch(id)
+    {
+    case XEN_VM_EVENT_TYPE_MONITOR:
+        ved = &d->vm_event_monitor;
+        pause_flag = _VPF_mem_access;
+        notification_fn = monitor_notification;
+
+        rc = arch_monitor_init_domain(d);
+        if ( rc )
+            return rc;
+        break;
+
+    default:
+        return -ENOSYS;
+    }
+
+    rc = vm_event_enable(d, ved, XEN_VM_EVENT_ALLOC_FROM_DOMHEAP,
+                         nr_frames, pause_flag,
+                         notification_fn);
+    if ( rc )
+        return rc;
+
+    for ( i = 0; i < nr_frames; i++ )
+        mfn_list[i] = mfn_x(page_to_mfn(&(*ved)->ring_pg_struct[i]));
+
+    return 0;
+}
+
 void vm_event_vcpu_pause(struct vcpu *v)
 {
     ASSERT(v == current);
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index ac4ced2..066d4da 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -772,6 +772,7 @@ struct xen_domctl_gdbsx_domstatus {
 #define XEN_VM_EVENT_ENABLE               0
 #define XEN_VM_EVENT_DISABLE              1
 #define XEN_VM_EVENT_RESUME               2
+#define XEN_VM_EVENT_GET_PORT             3
 
 /*
  * Domain memory paging
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 8fc27ce..09400f5 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -612,6 +612,7 @@ struct xen_mem_acquire_resource {
 
 #define XENMEM_resource_ioreq_server 0
 #define XENMEM_resource_grant_table 1
+#define XENMEM_resource_vm_event 2
 
     /*
      * IN - a type-specific resource identifier, which must be zero
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 0ba80cb..be01f93 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -286,9 +286,10 @@ struct vm_event_domain
     /* The ring has 64 entries */
     unsigned char foreign_producers;
     unsigned char target_producers;
-    /* shared ring page */
-    void *ring_page;
+    /* shared ring pages */
+    void *ring_buffer;
     struct page_info *ring_pg_struct;
+    unsigned int ring_order;
     /* front-end ring */
     vm_event_front_ring_t front_ring;
     /* event channel port (vcpu0 only) */
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 2ff6e1c..d9c5e93 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -80,6 +80,10 @@ void vm_event_set_registers(struct vcpu *v, 
vm_event_response_t *rsp);
 
 void vm_event_monitor_next_interrupt(struct vcpu *v);
 
+int vm_event_get_ring_frames(struct domain *d, unsigned int id,
+                             unsigned long frame, unsigned int nr_frames,
+                             xen_pfn_t mfn_list[]);
+
 #endif /* __VM_EVENT_H__ */
 
 /*
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.