[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Allow memevent responses to be signaled via the event channel



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1323202232 0
# Node ID 0964341efd658610e725555db3474fa56641ebed
# Parent  1620291f0c4a413b7ea7546fee6802a56d703d8f
x86/mm: Allow memevent responses to be signaled via the event channel

Don't require a separate domctl to notify the memevent interface that an event
has occured.  This domctl can be taxing, particularly when you are scaling
events and paging to many domains across a single system.  Instead, we use the
existing event channel to signal when we place something in the ring (as per
normal ring operation).

Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 1620291f0c4a -r 0964341efd65 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/x86/mm/mem_event.c       Tue Dec 06 20:10:32 2011 +0000
@@ -37,9 +37,11 @@
 #define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
 #define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
 
-static int mem_event_enable(struct domain *d,
-                            xen_domctl_mem_event_op_t *mec,
-                            struct mem_event_domain *med)
+static int mem_event_enable(
+    struct domain *d,
+    xen_domctl_mem_event_op_t *mec,
+    struct mem_event_domain *med,
+    xen_event_channel_notification_t notification_fn)
 {
     int rc;
     struct domain *dom_mem_event = current->domain;
@@ -94,7 +96,7 @@
     /* Allocate event channel */
     rc = alloc_unbound_xen_event_channel(d->vcpu[0],
                                          current->domain->domain_id,
-                                         NULL);
+                                         notification_fn);
     if ( rc < 0 )
         goto err;
 
@@ -233,6 +235,18 @@
     return ring_full;
 }
 
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_paging_notification(struct vcpu *v, unsigned int port)
+{
+    p2m_mem_paging_resume(v->domain);
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_access_notification(struct vcpu *v, unsigned int port)
+{
+    p2m_mem_access_resume(v->domain);
+}
+
 int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
                      XEN_GUEST_HANDLE(void) u_domctl)
 {
@@ -294,7 +308,7 @@
             if ( p2m->pod.entry_count )
                 break;
 
-            rc = mem_event_enable(d, mec, med);
+            rc = mem_event_enable(d, mec, med, mem_paging_notification);
         }
         break;
 
@@ -333,7 +347,7 @@
             if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
                 break;
 
-            rc = mem_event_enable(d, mec, med);
+            rc = mem_event_enable(d, mec, med, mem_access_notification);
         }
         break;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.