[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Consume multiple mem event responses off the ring



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1323202232 0
# Node ID c65d1a9769b4140ea35faf27b070a75767b38db4
# Parent  0964341efd658610e725555db3474fa56641ebed
x86/mm: Consume multiple mem event responses off the ring

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 0964341efd65 -r c65d1a9769b4 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/x86/mm/mem_event.c       Tue Dec 06 20:10:32 2011 +0000
@@ -166,7 +166,7 @@
     notify_via_xen_event_channel(d, med->xen_port);
 }
 
-void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp)
+int mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp)
 {
     mem_event_front_ring_t *front_ring;
     RING_IDX rsp_cons;
@@ -176,6 +176,12 @@
     front_ring = &med->front_ring;
     rsp_cons = front_ring->rsp_cons;
 
+    if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
+    {
+        mem_event_ring_unlock(med);
+        return 0;
+    }
+
     /* Copy response */
     memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
     rsp_cons++;
@@ -185,6 +191,8 @@
     front_ring->sring->rsp_event = rsp_cons + 1;
 
     mem_event_ring_unlock(med);
+
+    return 1;
 }
 
 void mem_event_unpause_vcpus(struct domain *d)
diff -r 0964341efd65 -r c65d1a9769b4 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c     Tue Dec 06 20:10:32 2011 +0000
@@ -300,12 +300,13 @@
 {
     mem_event_response_t rsp;
 
-    /* Get request off the ring */
-    mem_event_get_response(&d->mem_event->share, &rsp);
-
-    /* Unpause domain/vcpu */
-    if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
-        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+    /* Get all requests off the ring */
+    while ( mem_event_get_response(&d->mem_event->share, &rsp) )
+    {
+        /* Unpause domain/vcpu */
+        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+            vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+    }
 
     return 0;
 }
diff -r 0964341efd65 -r c65d1a9769b4 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Tue Dec 06 20:10:32 2011 +0000
@@ -1053,31 +1053,31 @@
     p2m_access_t a;
     mfn_t mfn;
 
-    /* Pull the response off the ring */
-    mem_event_get_response(&d->mem_event->paging, &rsp);
-
-    /* Fix p2m entry if the page was not dropped */
-    if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
+    /* Pull all responses off the ring */
+    while( mem_event_get_response(&d->mem_event->paging, &rsp) )
     {
-        p2m_lock(p2m);
-        mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
-        /* Allow only pages which were prepared properly, or pages which
-         * were nominated but not evicted */
-        if ( mfn_valid(mfn) && 
-             (p2mt == p2m_ram_paging_in || p2mt == p2m_ram_paging_in_start) )
+        /* Fix p2m entry if the page was not dropped */
+        if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
         {
-            set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K, 
-                            paging_mode_log_dirty(d) ? p2m_ram_logdirty : 
p2m_ram_rw, 
-                            a);
-            set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
+            p2m_lock(p2m);
+            mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
+            /* Allow only pages which were prepared properly, or pages which
+             * were nominated but not evicted */
+            if ( mfn_valid(mfn) && 
+                 (p2mt == p2m_ram_paging_in || p2mt == 
p2m_ram_paging_in_start) )
+            {
+                set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K, 
+                                paging_mode_log_dirty(d) ? p2m_ram_logdirty : 
+                                p2m_ram_rw, a);
+                set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
+            }
+            p2m_unlock(p2m);
         }
-        p2m_unlock(p2m);
+        /* Unpause domain */
+        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+            vcpu_unpause(d->vcpu[rsp.vcpu_id]);
     }
 
-    /* Unpause domain */
-    if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
-        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
-
     /* Unpause any domains that were paused because the ring was full */
     mem_event_unpause_vcpus(d);
 }
@@ -1161,11 +1161,13 @@
 {
     mem_event_response_t rsp;
 
-    mem_event_get_response(&d->mem_event->access, &rsp);
-
-    /* Unpause domain */
-    if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
-        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+    /* Pull all responses off the ring */
+    while( mem_event_get_response(&d->mem_event->access, &rsp) )
+    {
+        /* Unpause domain */
+        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+            vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+    }
 
     /* Unpause any domains that were paused because the ring was full or no 
listener 
      * was available */
diff -r 0964341efd65 -r c65d1a9769b4 xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h   Tue Dec 06 20:10:32 2011 +0000
+++ b/xen/include/asm-x86/mem_event.h   Tue Dec 06 20:10:32 2011 +0000
@@ -29,7 +29,7 @@
 int mem_event_check_ring(struct domain *d, struct mem_event_domain *med);
 void mem_event_put_req_producers(struct mem_event_domain *med);
 void mem_event_put_request(struct domain *d, struct mem_event_domain *med, 
mem_event_request_t *req);
-void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp);
+int mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp);
 void mem_event_unpause_vcpus(struct domain *d);
 
 int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.