[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCHv10 4/4] gnttab: use per-VCPU maptrack free lists
From: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx> Performance analysis of aggregate network throughput with many VMs shows that performance is signficantly limited by contention on the maptrack lock when obtaining/releasing maptrack handles from the free list. Instead of a single free list use a per-VCPU list. This avoids any contention when obtaining a handle. Handles must be released back to their original list and since this may occur on a different VCPU there is some contention on the destination VCPU's free list tail pointer (but this is much better than a per-domain lock). Increase the default maximum number of maptrack frames by 4 times because: a) struct grant_mapping is now 16 bytes (instead of 8); and b) a guest may not evenly distribute all the grant map operations across the VCPUs (meaning some VCPUs need more maptrack entries than others). Signed-off-by: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> --- v10: - Divide max_maptrack_frames evenly amongst the VCPUs. - Increase default max_maptrack_frames to compensate. --- xen/common/grant_table.c | 146 +++++++++++++++++++++++++---------------- xen/include/xen/grant_table.h | 8 ++- xen/include/xen/sched.h | 5 ++ 3 files changed, 99 insertions(+), 60 deletions(-) diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index d67b7f4..dd24491 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -57,7 +57,7 @@ integer_param("gnttab_max_frames", max_grant_frames); * New options allow to set max_maptrack_frames and * map_grant_table_frames independently. */ -#define DEFAULT_MAX_MAPTRACK_FRAMES 256 +#define DEFAULT_MAX_MAPTRACK_FRAMES 1024 static unsigned int __read_mostly max_maptrack_frames; integer_param("gnttab_max_maptrack_frames", max_maptrack_frames); @@ -117,12 +117,6 @@ struct gnttab_unmap_common { #define maptrack_entry(t, e) \ ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE]) -static inline unsigned int -nr_maptrack_frames(struct grant_table *t) -{ - return t->maptrack_limit / MAPTRACK_PER_PAGE; -} - #define MAPTRACK_TAIL (~0u) #define SHGNT_PER_PAGE_V1 (PAGE_SIZE / sizeof(grant_entry_v1_t)) @@ -275,66 +269,104 @@ double_gt_unlock(struct grant_table *lgt, struct grant_table *rgt) static inline int __get_maptrack_handle( - struct grant_table *t) + struct grant_table *t, + struct vcpu *v) { - unsigned int h; - if ( unlikely((h = t->maptrack_head) == MAPTRACK_TAIL) ) + unsigned int head, next; + + /* No maptrack pages allocated for this VCPU yet? */ + head = v->maptrack_head; + if ( unlikely(head == MAPTRACK_TAIL) ) return -1; - t->maptrack_head = maptrack_entry(t, h).ref; - return h; + + /* + * Always keep one entry in the free list to make it easier to add + * free entries to the tail. + */ + next = read_atomic(&maptrack_entry(t, head).ref); + if ( unlikely(next == MAPTRACK_TAIL) ) + return -1; + + v->maptrack_head = next; + + return head; } static inline void put_maptrack_handle( struct grant_table *t, int handle) { - spin_lock(&t->maptrack_lock); - maptrack_entry(t, handle).ref = t->maptrack_head; - t->maptrack_head = handle; - spin_unlock(&t->maptrack_lock); + struct domain *d = current->domain; + struct vcpu *v; + unsigned int prev_tail, cur_tail; + + /* 1. Set entry to be a tail. */ + maptrack_entry(t, handle).ref = MAPTRACK_TAIL; + + /* 2. Add entry to the tail of the list on the original VCPU. */ + v = d->vcpu[maptrack_entry(t,handle).vcpu]; + + cur_tail = read_atomic(&v->maptrack_tail); + do { + prev_tail = cur_tail; + cur_tail = cmpxchg(&v->maptrack_tail, prev_tail, handle); + } while ( cur_tail != prev_tail ); + + /* 3. Update the old tail entry to point to the new entry. */ + write_atomic(&maptrack_entry(t, prev_tail).ref, handle); } static inline int get_maptrack_handle( struct grant_table *lgt) { + struct vcpu *v = current; int i; grant_handle_t handle; struct grant_mapping *new_mt; - unsigned int new_mt_limit, nr_frames; - spin_lock(&lgt->maptrack_lock); + handle = __get_maptrack_handle(lgt, v); + if ( likely(handle != -1) ) + return handle; - while ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) ) - { - nr_frames = nr_maptrack_frames(lgt); - if ( nr_frames >= max_maptrack_frames ) - break; + /* + * max_maptrack_frames is per domain so each VCPU gets a share of + * the maximum, but allow at least one frame per VCPU. + */ + if ( v->maptrack_frames + && v->maptrack_frames >= max_maptrack_frames / v->domain->max_vcpus ) + return -1; - new_mt = alloc_xenheap_page(); - if ( !new_mt ) - break; + new_mt = alloc_xenheap_page(); + if ( !new_mt ) + return -1; + clear_page(new_mt); - clear_page(new_mt); + spin_lock(&lgt->maptrack_lock); - new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE; + for ( i = 1; i < MAPTRACK_PER_PAGE; i++ ) + { + new_mt[i - 1].ref = (lgt->maptrack_pages * MAPTRACK_PER_PAGE) + i; + new_mt[i - 1].vcpu = v->vcpu_id; + } + /* Set last entry vcpu and ref */ + new_mt[i - 1].ref = v->maptrack_head; + new_mt[i - 1].vcpu = v->vcpu_id; - for ( i = 1; i < MAPTRACK_PER_PAGE; i++ ) - new_mt[i - 1].ref = lgt->maptrack_limit + i; - new_mt[i - 1].ref = lgt->maptrack_head; - lgt->maptrack_head = lgt->maptrack_limit; + v->maptrack_head = lgt->maptrack_pages * MAPTRACK_PER_PAGE; - lgt->maptrack[nr_frames] = new_mt; - smp_wmb(); - lgt->maptrack_limit = new_mt_limit; + /* Set tail directly if this is the first page for this VCPU. */ + if ( v->maptrack_tail == MAPTRACK_TAIL ) + v->maptrack_tail = (lgt->maptrack_pages * MAPTRACK_PER_PAGE) + + MAPTRACK_PER_PAGE - 1; - gdprintk(XENLOG_INFO, "Increased maptrack size to %u frames\n", - nr_frames + 1); - } + lgt->maptrack[lgt->maptrack_pages++] = new_mt; spin_unlock(&lgt->maptrack_lock); - return handle; + v->maptrack_frames++; + + return __get_maptrack_handle(lgt, v); } /* Number of grant table entries. Caller must hold d's grant table lock. */ @@ -573,7 +605,8 @@ static void mapcount( */ ASSERT(rw_is_write_locked(&rd->grant_table->lock)); - for ( handle = 0; handle < lgt->maptrack_limit; handle++ ) + for ( handle = 0; handle < lgt->maptrack_pages * MAPTRACK_PER_PAGE; + handle++ ) { map = &maptrack_entry(lgt, handle); if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) || @@ -941,7 +974,7 @@ __gnttab_unmap_common( op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT); - if ( unlikely(op->handle >= lgt->maptrack_limit) ) + if ( unlikely(op->handle >= lgt->maptrack_pages * MAPTRACK_PER_PAGE) ) { gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle); op->status = GNTST_bad_handle; @@ -1416,6 +1449,7 @@ gnttab_setup_table( struct gnttab_setup_table op; struct domain *d; struct grant_table *gt; + struct vcpu *v; int i; xen_pfn_t gmfn; @@ -1457,6 +1491,17 @@ gnttab_setup_table( gt = d->grant_table; write_lock(>->lock); + /* Tracking of mapped foreign frames table */ + gt->maptrack = xzalloc_array(struct grant_mapping *, max_maptrack_frames); + if ( gt->maptrack == NULL ) + goto out3; + for_each_vcpu( d, v ) + { + v->maptrack_head = MAPTRACK_TAIL; + v->maptrack_tail = MAPTRACK_TAIL; + } + gt->maptrack_pages = 0; + if ( gt->gt_version == 0 ) gt->gt_version = 1; @@ -3040,18 +3085,6 @@ grant_table_create( spin_lock_init(&t->active[i][j].lock); } - /* Tracking of mapped foreign frames table */ - if ( (t->maptrack = xzalloc_array(struct grant_mapping *, - max_maptrack_frames)) == NULL ) - goto no_mem_2; - if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL ) - goto no_mem_3; - clear_page(t->maptrack[0]); - t->maptrack_limit = MAPTRACK_PER_PAGE; - for ( i = 1; i < MAPTRACK_PER_PAGE; i++ ) - t->maptrack[0][i - 1].ref = i; - t->maptrack[0][i - 1].ref = MAPTRACK_TAIL; - /* Shared grant table. */ if ( (t->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL ) goto no_mem_3; @@ -3082,8 +3115,6 @@ grant_table_create( free_xenheap_page(t->shared_raw[i]); xfree(t->shared_raw); no_mem_3: - free_xenheap_page(t->maptrack[0]); - xfree(t->maptrack); no_mem_2: for ( i = 0; i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ ) @@ -3111,7 +3142,8 @@ gnttab_release_mappings( BUG_ON(!d->is_dying); - for ( handle = 0; handle < gt->maptrack_limit; handle++ ) + for ( handle = 0; handle < gt->maptrack_pages * MAPTRACK_PER_PAGE; + handle++ ) { map = &maptrack_entry(gt, handle); if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) ) @@ -3216,7 +3248,7 @@ grant_table_destroy( free_xenheap_page(t->shared_raw[i]); xfree(t->shared_raw); - for ( i = 0; i < nr_maptrack_frames(t); i++ ) + for ( i = 0; i < t->maptrack_pages; i++ ) free_xenheap_page(t->maptrack[i]); xfree(t->maptrack); diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h index f22ebd0..c6e4ebf 100644 --- a/xen/include/xen/grant_table.h +++ b/xen/include/xen/grant_table.h @@ -60,6 +60,8 @@ struct grant_mapping { u32 ref; /* grant ref */ u16 flags; /* 0-4: GNTMAP_* ; 5-15: unused */ domid_t domid; /* granting domain */ + u32 vcpu; /* vcpu which created the grant mapping */ + u32 pad; /* round size to a power of 2 */ }; /* Per-domain grant information. */ @@ -83,10 +85,10 @@ struct grant_table { grant_status_t **status; /* Active grant table. */ struct active_grant_entry **active; - /* Mapping tracking table. */ + /* Mapping tracking table per vcpu. */ struct grant_mapping **maptrack; - unsigned int maptrack_head; - unsigned int maptrack_limit; + /* Total pages used for mapping tracking table */ + unsigned int maptrack_pages; /* Lock protecting the maptrack page list, head, and limit */ spinlock_t maptrack_lock; /* The defined versions are 1 and 2. Set to 0 if we don't know diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 80c6f62..d46a561 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -219,6 +219,11 @@ struct vcpu /* VCPU paused by system controller. */ int controller_pause_count; + /* Maptrack */ + unsigned int maptrack_head; + unsigned int maptrack_tail; + unsigned int maptrack_frames; + /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */ evtchn_port_t virq_to_evtchn[NR_VIRQS]; spinlock_t virq_lock; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |