diff -r d1d05cb59a76 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Wed Nov 14 16:27:58 2012 +0000 +++ b/xen/arch/x86/hvm/hvm.c Wed Nov 28 16:04:19 2012 +0900 @@ -513,6 +513,7 @@ int hvm_domain_initialise(struct domain spin_lock_init(&d->arch.hvm_domain.pbuf_lock); spin_lock_init(&d->arch.hvm_domain.irq_lock); spin_lock_init(&d->arch.hvm_domain.uc_lock); + spin_lock_init(&d->arch.hvm_domain.dirty_vram_lock); INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); diff -r d1d05cb59a76 xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c Wed Nov 14 16:27:58 2012 +0000 +++ b/xen/arch/x86/mm/hap/hap.c Wed Nov 28 16:04:19 2012 +0900 @@ -122,7 +122,12 @@ int hap_track_dirty_vram(struct domain * XEN_GUEST_HANDLE_64(uint8) dirty_bitmap) { long rc = 0; - struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; + struct sh_dirty_vram *dirty_vram; + + if ( !spin_trylock(&d->arch.hvm_domain.dirty_vram_lock) ) + return -ENODATA; + + dirty_vram = d->arch.hvm_domain.dirty_vram; if ( nr ) { @@ -174,6 +179,7 @@ int hap_track_dirty_vram(struct domain * rc = 0; } + spin_unlock(&d->arch.hvm_domain.dirty_vram_lock); return rc; param_fail: @@ -182,6 +188,8 @@ param_fail: xfree(dirty_vram); dirty_vram = d->arch.hvm_domain.dirty_vram = NULL; } + + spin_unlock(&d->arch.hvm_domain.dirty_vram_lock); return rc; } diff -r d1d05cb59a76 xen/arch/x86/mm/paging.c --- a/xen/arch/x86/mm/paging.c Wed Nov 14 16:27:58 2012 +0000 +++ b/xen/arch/x86/mm/paging.c Wed Nov 28 16:04:19 2012 +0900 @@ -697,14 +697,21 @@ int paging_domctl(struct domain *d, xen_ break; /* Else fall through... */ case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY: + spin_lock(&d->arch.hvm_domain.dirty_vram_lock); if ( hap_enabled(d) ) hap_logdirty_init(d); - return paging_log_dirty_enable(d); + rc = paging_log_dirty_enable(d); + spin_unlock(&d->arch.hvm_domain.dirty_vram_lock); + return rc; case XEN_DOMCTL_SHADOW_OP_OFF: + spin_lock(&d->arch.hvm_domain.dirty_vram_lock); + rc = 0; if ( paging_mode_log_dirty(d) ) - if ( (rc = paging_log_dirty_disable(d)) != 0 ) - return rc; + rc = paging_log_dirty_disable(d); + spin_unlock(&d->arch.hvm_domain.dirty_vram_lock); + if ( rc != 0 ) + return rc; break; case XEN_DOMCTL_SHADOW_OP_CLEAN: diff -r d1d05cb59a76 xen/include/asm-x86/hvm/domain.h --- a/xen/include/asm-x86/hvm/domain.h Wed Nov 14 16:27:58 2012 +0000 +++ b/xen/include/asm-x86/hvm/domain.h Wed Nov 28 16:04:19 2012 +0900 @@ -75,6 +75,7 @@ struct hvm_domain { /* VRAM dirty support. */ struct sh_dirty_vram *dirty_vram; + spinlock_t dirty_vram_lock; /* If one of vcpus of this domain is in no_fill_mode or * mtrr/pat between vcpus is not the same, set is_in_uc_mode