[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/paging: use clear_guest() for zero-filling guest buffers
# HG changeset patch # User Jan Beulich <jbeulich@xxxxxxxx> # Date 1329134942 -3600 # Node ID e953d536d3c6e344cf310f63ead9feda87cc67b0 # Parent 9ad1e42c341bc78463b6f6610a6300f75b535fbb x86/paging: use clear_guest() for zero-filling guest buffers While static arrays of all zeros may be tolerable (but are simply inefficient now that we have the necessary infrastructure), using on- stack arrays for this purpose (particularly when their size doesn't have an upper limit enforced) is calling for eventual problems (even if the code can be reached via administrative interfaces only). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> --- diff -r 9ad1e42c341b -r e953d536d3c6 xen/arch/x86/mm/paging.c --- a/xen/arch/x86/mm/paging.c Fri Feb 10 17:24:50 2012 +0000 +++ b/xen/arch/x86/mm/paging.c Mon Feb 13 13:09:02 2012 +0100 @@ -21,11 +21,11 @@ */ #include <xen/init.h> +#include <xen/guest_access.h> #include <asm/paging.h> #include <asm/shadow.h> #include <asm/p2m.h> #include <asm/hap.h> -#include <asm/guest_access.h> #include <asm/hvm/nestedhvm.h> #include <xen/numa.h> #include <xsm/xsm.h> @@ -383,26 +383,30 @@ (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ ) { - static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG]; unsigned int bytes = PAGE_SIZE; l1 = ((l2 && mfn_valid(l2[i2])) ? - map_domain_page(mfn_x(l2[i2])) : zeroes); + map_domain_page(mfn_x(l2[i2])) : NULL); if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) ) bytes = (unsigned int)((sc->pages - pages + 7) >> 3); if ( likely(peek) ) { - if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, - (uint8_t *)l1, bytes) != 0 ) + if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap, + pages >> 3, (uint8_t *)l1, + bytes) + : clear_guest_offset(sc->dirty_bitmap, + pages >> 3, bytes)) != 0 ) { rv = -EFAULT; goto out; } } - if ( clean && l1 != zeroes ) - clear_page(l1); pages += bytes << 3; - if ( l1 != zeroes ) + if ( l1 ) + { + if ( clean ) + clear_page(l1); unmap_domain_page(l1); + } } if ( l2 ) unmap_domain_page(l2); @@ -462,12 +466,9 @@ if ( !d->arch.paging.log_dirty.fault_count && !d->arch.paging.log_dirty.dirty_count ) { - int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG; - unsigned long zeroes[size]; - memset(zeroes, 0x00, size * BYTES_PER_LONG); - rv = 0; - if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes, - size * BYTES_PER_LONG) != 0 ) + unsigned int size = BITS_TO_LONGS(nr); + + if ( clear_guest(dirty_bitmap, size * BYTES_PER_LONG) != 0 ) rv = -EFAULT; goto out; } @@ -495,11 +496,10 @@ (pages < nr) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ ) { - static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG]; unsigned int bytes = PAGE_SIZE; uint8_t *s; l1 = ((l2 && mfn_valid(l2[i2])) ? - map_domain_page(mfn_x(l2[i2])) : zeroes); + map_domain_page(mfn_x(l2[i2])) : NULL); s = ((uint8_t*)l1) + (b1 >> 3); bytes -= b1 >> 3; @@ -507,9 +507,18 @@ if ( likely(((nr - pages + 7) >> 3) < bytes) ) bytes = (unsigned int)((nr - pages + 7) >> 3); + if ( !l1 ) + { + if ( clear_guest_offset(dirty_bitmap, pages >> 3, + bytes) != 0 ) + { + rv = -EFAULT; + goto out; + } + } /* begin_pfn is not 32K aligned, hence we have to bit * shift the bitmap */ - if ( b1 & 0x7 ) + else if ( b1 & 0x7 ) { int i, j; uint32_t *l = (uint32_t*) s; @@ -553,11 +562,12 @@ } } - if ( l1 != zeroes ) + pages += bytes << 3; + if ( l1 ) + { clear_page(l1); - pages += bytes << 3; - if ( l1 != zeroes ) unmap_domain_page(l1); + } b1 = b1 & 0x7; } b2 = 0; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |