[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Fix shadow bitmap sizing for logdirty mode.
# HG changeset patch # User smh22@xxxxxxxxxxxxxxxxxxxx # Node ID 8f83f7ccf185dea197f8cb21bf0bc095bb612c8e # Parent 5823dbfbb4cd6dd9111a4a4cb2198a0ee3467011 Fix shadow bitmap sizing for logdirty mode. Signed-off-by: Steven Hand <steven@xxxxxxxxxxxxx> diff -r 5823dbfbb4cd -r 8f83f7ccf185 xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Wed Nov 9 16:23:46 2005 +++ b/xen/arch/x86/shadow32.c Wed Nov 9 18:18:47 2005 @@ -997,7 +997,8 @@ if ( new_modes & SHM_log_dirty ) { ASSERT( !d->arch.shadow_dirty_bitmap ); - d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63; + d->arch.shadow_dirty_bitmap_size = + (d->shared_info->arch.max_pfn + 63) & ~63; d->arch.shadow_dirty_bitmap = xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size / (8 * sizeof(unsigned long))); @@ -1287,34 +1288,28 @@ d->arch.shadow_dirty_net_count = 0; d->arch.shadow_dirty_block_count = 0; - if ( (d->max_pages > sc->pages) || - (sc->dirty_bitmap == NULL) || + if ( (sc->dirty_bitmap == NULL) || (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; } - - sc->pages = d->max_pages; + + if(sc->pages > d->arch.shadow_dirty_bitmap_size) + sc->pages = d->arch.shadow_dirty_bitmap_size; #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */ - for ( i = 0; i < d->max_pages; i += chunk ) - { - int bytes = ((((d->max_pages - i) > chunk) ? - chunk : (d->max_pages - i)) + 7) / 8; + for ( i = 0; i < sc->pages; i += chunk ) + { + int bytes = ((((sc->pages - i) > chunk) ? + chunk : (sc->pages - i)) + 7) / 8; if (copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), bytes)) { - // copy_to_user can fail when copying to guest app memory. - // app should zero buffer after mallocing, and pin it rc = -EINVAL; - memset( - d->arch.shadow_dirty_bitmap + - (i/(8*sizeof(unsigned long))), - 0, (d->max_pages/8) - (i/(8*sizeof(unsigned long)))); break; } @@ -1331,17 +1326,19 @@ sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count; sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count; - if ( (d->max_pages > sc->pages) || - (sc->dirty_bitmap == NULL) || + + if ( (sc->dirty_bitmap == NULL) || (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; } - sc->pages = d->max_pages; - if (copy_to_user( - sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8)) + if(sc->pages > d->arch.shadow_dirty_bitmap_size) + sc->pages = d->arch.shadow_dirty_bitmap_size; + + if (copy_to_user(sc->dirty_bitmap, + d->arch.shadow_dirty_bitmap, (sc->pages+7)/8)) { rc = -EINVAL; break; diff -r 5823dbfbb4cd -r 8f83f7ccf185 xen/arch/x86/shadow_public.c --- a/xen/arch/x86/shadow_public.c Wed Nov 9 16:23:46 2005 +++ b/xen/arch/x86/shadow_public.c Wed Nov 9 18:18:47 2005 @@ -1009,7 +1009,8 @@ if ( new_modes & SHM_log_dirty ) { ASSERT( !d->arch.shadow_dirty_bitmap ); - d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63; + d->arch.shadow_dirty_bitmap_size = + (d->shared_info->arch.max_pfn + 63) & ~63; d->arch.shadow_dirty_bitmap = xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size / (8 * sizeof(unsigned long))); @@ -1163,34 +1164,29 @@ d->arch.shadow_dirty_net_count = 0; d->arch.shadow_dirty_block_count = 0; - if ( (d->max_pages > sc->pages) || - (sc->dirty_bitmap == NULL) || + + if ( (sc->dirty_bitmap == NULL) || (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; } - - sc->pages = d->max_pages; + + if(sc->pages > d->arch.shadow_dirty_bitmap_size) + sc->pages = d->arch.shadow_dirty_bitmap_size; #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */ - for ( i = 0; i < d->max_pages; i += chunk ) - { - int bytes = ((((d->max_pages - i) > chunk) ? - chunk : (d->max_pages - i)) + 7) / 8; + for ( i = 0; i < sc->pages; i += chunk ) + { + int bytes = ((((sc->pages - i) > chunk) ? + chunk : (sc->pages - i)) + 7) / 8; if (copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), bytes)) { - // copy_to_user can fail when copying to guest app memory. - // app should zero buffer after mallocing, and pin it rc = -EINVAL; - memset( - d->arch.shadow_dirty_bitmap + - (i/(8*sizeof(unsigned long))), - 0, (d->max_pages/8) - (i/(8*sizeof(unsigned long)))); break; } memset( @@ -1206,17 +1202,18 @@ sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count; sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count; - if ( (d->max_pages > sc->pages) || - (sc->dirty_bitmap == NULL) || + if ( (sc->dirty_bitmap == NULL) || (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; } - sc->pages = d->max_pages; - if (copy_to_user( - sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8)) + if(sc->pages > d->arch.shadow_dirty_bitmap_size) + sc->pages = d->arch.shadow_dirty_bitmap_size; + + if (copy_to_user(sc->dirty_bitmap, + d->arch.shadow_dirty_bitmap, (sc->pages+7)/8)) { rc = -EINVAL; break; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |