[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 1/8] libxenguest: short-circuit "all-dirty" handling
For one it is unnecessary to fill a perhaps large chunk of memory with all ones. Add a new parameter to send_dirty_pages() for callers to indicate so. Then it is further unnecessary to allocate the dirty bitmap altogether when all that's ever going to happen is a single all-dirty run. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Juergen Gross <jgross@xxxxxxxx> Acked-by: Ian Jackson <iwj@xxxxxxxxxxxxxx> --- NOTE: Ian demands that this not be committed without also committing "libxenguest: avoid allocating unused deferred-pages bitmap" (because of the ugly -1L). --- a/tools/libs/guest/xg_sr_save.c +++ b/tools/libs/guest/xg_sr_save.c @@ -364,7 +364,7 @@ static int suspend_domain(struct xc_sr_c * Bitmap is bounded by p2m_size. */ static int send_dirty_pages(struct xc_sr_context *ctx, - unsigned long entries) + unsigned long entries, bool all_dirty) { xc_interface *xch = ctx->xch; xen_pfn_t p; @@ -375,7 +375,7 @@ static int send_dirty_pages(struct xc_sr for ( p = 0, written = 0; p < ctx->save.p2m_size; ++p ) { - if ( !test_bit(p, dirty_bitmap) ) + if ( !all_dirty && !test_bit(p, dirty_bitmap) ) continue; rc = add_to_batch(ctx, p); @@ -407,12 +407,7 @@ static int send_dirty_pages(struct xc_sr */ static int send_all_pages(struct xc_sr_context *ctx) { - DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap, - &ctx->save.dirty_bitmap_hbuf); - - bitmap_set(dirty_bitmap, ctx->save.p2m_size); - - return send_dirty_pages(ctx, ctx->save.p2m_size); + return send_dirty_pages(ctx, ctx->save.p2m_size, true /* all_dirty */); } static int enable_logdirty(struct xc_sr_context *ctx) @@ -502,9 +497,6 @@ static int send_memory_live(struct xc_sr int rc; int policy_decision; - DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap, - &ctx->save.dirty_bitmap_hbuf); - precopy_policy_t precopy_policy = ctx->save.callbacks->precopy_policy; void *data = ctx->save.callbacks->data; @@ -522,8 +514,6 @@ static int send_memory_live(struct xc_sr if ( precopy_policy == NULL ) precopy_policy = simple_precopy_policy; - bitmap_set(dirty_bitmap, ctx->save.p2m_size); - for ( ; ; ) { policy_decision = precopy_policy(*policy_stats, data); @@ -535,7 +525,7 @@ static int send_memory_live(struct xc_sr if ( rc ) goto out; - rc = send_dirty_pages(ctx, stats.dirty_count); + rc = send_dirty_pages(ctx, stats.dirty_count, x == 1); if ( rc ) goto out; } @@ -681,7 +671,8 @@ static int suspend_and_send_dirty(struct } } - rc = send_dirty_pages(ctx, stats.dirty_count + ctx->save.nr_deferred_pages); + rc = send_dirty_pages(ctx, stats.dirty_count + ctx->save.nr_deferred_pages, + false /* all_dirty */); if ( rc ) goto out; @@ -801,8 +792,11 @@ static int setup(struct xc_sr_context *c if ( rc ) goto err; - dirty_bitmap = xc_hypercall_buffer_alloc_pages( - xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))); + dirty_bitmap = ctx->save.live || ctx->stream_type != XC_STREAM_PLAIN + ? xc_hypercall_buffer_alloc_pages( + xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))) + : (void *)-1L; + ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.batch_pfns)); ctx->save.deferred_pages = bitmap_alloc(ctx->save.p2m_size);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |