[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] mem_event: add ref counting for free requestslots
# HG changeset patch # User Olaf Hering <olaf@xxxxxxxxx> # Date 1315231809 -3600 # Node ID 083f10851dd85cf436eb4c26ac9afe78997b2bca # Parent 7f357e1ef60adf1e43fad97d58721cc2c5309c42 mem_event: add ref counting for free requestslots If mem_event_check_ring() is called by many vcpus at the same time before any of them called also mem_event_put_request(), all of the callers must assume there are enough free slots available in the ring. Record the number of request producers in mem_event_check_ring() to keep track of available free slots. Add a new mem_event_put_req_producers() function to release a request attempt made in mem_event_check_ring(). Its required for p2m_mem_paging_populate() because that function can only modify the p2m type if there are free request slots. But in some cases p2m_mem_paging_populate() does not actually have to produce another request when it is known that the same request was already made earlier by a different vcpu. mem_event_check_ring() can not return a reference to a free request slot because there could be multiple references for different vcpus and the order of mem_event_put_request() calls is not known. As a result, incomplete requests could be consumed by the ring user. Signed-off-by: Olaf Hering <olaf@xxxxxxxxx> --- diff -r 7f357e1ef60a -r 083f10851dd8 xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c Mon Sep 05 15:09:24 2011 +0100 +++ b/xen/arch/x86/mm/mem_event.c Mon Sep 05 15:10:09 2011 +0100 @@ -37,8 +37,6 @@ #define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock) #define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock) -#define MEM_EVENT_RING_THRESHOLD 4 - static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn) { int rc; @@ -109,6 +107,7 @@ req_prod++; /* Update ring */ + d->mem_event.req_producers--; front_ring->req_prod_pvt = req_prod; RING_PUSH_REQUESTS(front_ring); @@ -153,11 +152,18 @@ vcpu_sleep_nosync(v); } +void mem_event_put_req_producers(struct domain *d) +{ + mem_event_ring_lock(d); + d->mem_event.req_producers--; + mem_event_ring_unlock(d); +} + int mem_event_check_ring(struct domain *d) { struct vcpu *curr = current; int free_requests; - int ring_full; + int ring_full = 1; if ( !d->mem_event.ring_page ) return -1; @@ -165,12 +171,11 @@ mem_event_ring_lock(d); free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring); - if ( unlikely(free_requests < 2) ) + if ( d->mem_event.req_producers < free_requests ) { - gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests); - WARN_ON(free_requests == 0); + d->mem_event.req_producers++; + ring_full = 0; } - ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0; if ( (curr->domain->domain_id == d->domain_id) && ring_full ) { diff -r 7f357e1ef60a -r 083f10851dd8 xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c Mon Sep 05 15:09:24 2011 +0100 +++ b/xen/arch/x86/mm/mem_sharing.c Mon Sep 05 15:10:09 2011 +0100 @@ -281,7 +281,6 @@ vcpu_pause_nosync(v); req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; - /* XXX: Need to reserve a request, not just check the ring! */ if(mem_event_check_ring(d)) return page; req.gfn = gfn; diff -r 7f357e1ef60a -r 083f10851dd8 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Mon Sep 05 15:09:24 2011 +0100 +++ b/xen/arch/x86/mm/p2m.c Mon Sep 05 15:10:09 2011 +0100 @@ -803,6 +803,7 @@ else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) { /* gfn is already on its way back and vcpu is not paused */ + mem_event_put_req_producers(d); return; } diff -r 7f357e1ef60a -r 083f10851dd8 xen/include/asm-x86/mem_event.h --- a/xen/include/asm-x86/mem_event.h Mon Sep 05 15:09:24 2011 +0100 +++ b/xen/include/asm-x86/mem_event.h Mon Sep 05 15:10:09 2011 +0100 @@ -27,6 +27,7 @@ /* Pauses VCPU while marking pause flag for mem event */ void mem_event_mark_and_pause(struct vcpu *v); int mem_event_check_ring(struct domain *d); +void mem_event_put_req_producers(struct domain *d); void mem_event_put_request(struct domain *d, mem_event_request_t *req); void mem_event_get_response(struct domain *d, mem_event_response_t *rsp); void mem_event_unpause_vcpus(struct domain *d); diff -r 7f357e1ef60a -r 083f10851dd8 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Mon Sep 05 15:09:24 2011 +0100 +++ b/xen/include/xen/sched.h Mon Sep 05 15:10:09 2011 +0100 @@ -183,6 +183,7 @@ { /* ring lock */ spinlock_t ring_lock; + unsigned int req_producers; /* shared page */ mem_event_shared_page_t *shared_page; /* shared ring page */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |