[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5 of 5] Allow memevent responses to be signaled via the event channel



 xen/arch/x86/mm/mem_event.c |  26 ++++++++++++++++++++------
 1 files changed, 20 insertions(+), 6 deletions(-)


Don't require a separate domctl to notify the memevent interface that an event
has occured.  This domctl can be taxing, particularly when you are scaling
events and paging to many domains across a single system.  Instead, we use the
existing event channel to signal when we place something in the ring (as per
normal ring operation).

Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r c2b5e65331ee -r 29701f5bdd84 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -37,9 +37,11 @@
 #define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
 #define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
 
-static int mem_event_enable(struct domain *d,
-                            xen_domctl_mem_event_op_t *mec,
-                            struct mem_event_domain *med)
+static int mem_event_enable(
+    struct domain *d,
+    xen_domctl_mem_event_op_t *mec,
+    struct mem_event_domain *med,
+    xen_event_channel_notification_t notification_fn)
 {
     int rc;
     struct domain *dom_mem_event = current->domain;
@@ -97,7 +99,7 @@ static int mem_event_enable(struct domai
     /* Allocate event channel */
     rc = alloc_unbound_xen_event_channel(d->vcpu[0],
                                          current->domain->domain_id,
-                                         NULL);
+                                         notification_fn);
     if ( rc < 0 )
         goto err;
 
@@ -330,6 +332,18 @@ int mem_event_check_ring(struct domain *
     return 0;
 }
 
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_paging_notification(struct vcpu *v, unsigned int port)
+{
+    p2m_mem_paging_resume(v->domain);
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_access_notification(struct vcpu *v, unsigned int port)
+{
+    p2m_mem_access_resume(v->domain);
+}
+
 int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
                      XEN_GUEST_HANDLE(void) u_domctl)
 {
@@ -391,7 +405,7 @@ int mem_event_domctl(struct domain *d, x
             if ( p2m->pod.entry_count )
                 break;
 
-            rc = mem_event_enable(d, mec, med);
+            rc = mem_event_enable(d, mec, med, mem_paging_notification);
         }
         break;
 
@@ -430,7 +444,7 @@ int mem_event_domctl(struct domain *d, x
             if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
                 break;
 
-            rc = mem_event_enable(d, mec, med);
+            rc = mem_event_enable(d, mec, med, mem_access_notification);
         }
         break;
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.