[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/hvm: use unlocked p2m lookups in hvmemul_rep_movs()
# HG changeset patch # User Tim Deegan <tim@xxxxxxx> # Date 1337246694 -3600 # Node ID b7705eecb6f2c0009b00b645802150f9b6d0c4e9 # Parent 1e5cf32b7b7114e88507085ff7a88fc89b9a2c15 x86/hvm: use unlocked p2m lookups in hvmemul_rep_movs() The eventual hvm_copy or IO emulations will re-check the p2m and DTRT. Signed-off-by: Tim Deegan <tim@xxxxxxx> --- diff -r 1e5cf32b7b71 -r b7705eecb6f2 xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c Thu May 17 10:24:54 2012 +0100 +++ b/xen/arch/x86/hvm/emulate.c Thu May 17 10:24:54 2012 +0100 @@ -681,7 +681,6 @@ static int hvmemul_rep_movs( p2m_type_t sp2mt, dp2mt; int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); char *buf; - struct two_gfns tg; rc = hvmemul_virtual_to_linear( src_seg, src_offset, bytes_per_rep, reps, hvm_access_read, @@ -709,25 +708,17 @@ static int hvmemul_rep_movs( if ( rc != X86EMUL_OKAY ) return rc; - get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL, NULL, - current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL, NULL, - P2M_ALLOC, &tg); + /* Check for MMIO ops */ + (void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt); + (void) get_gfn_query_unlocked(current->domain, dgpa >> PAGE_SHIFT, &dp2mt); - if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) ) - { - rc = hvmemul_do_mmio( + if ( sp2mt == p2m_mmio_dm ) + return hvmemul_do_mmio( sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL); - put_two_gfns(&tg); - return rc; - } - if ( !p2m_is_ram(dp2mt) && !p2m_is_grant(dp2mt) ) - { - rc = hvmemul_do_mmio( + if ( dp2mt == p2m_mmio_dm ) + return hvmemul_do_mmio( dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL); - put_two_gfns(&tg); - return rc; - } /* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */ bytes = *reps * bytes_per_rep; @@ -742,10 +733,7 @@ static int hvmemul_rep_movs( * can be emulated by a source-to-buffer-to-destination block copy. */ if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) ) - { - put_two_gfns(&tg); return X86EMUL_UNHANDLEABLE; - } /* Adjust destination address for reverse copy. */ if ( df ) @@ -754,10 +742,7 @@ static int hvmemul_rep_movs( /* Allocate temporary buffer. Fall back to slow emulation if this fails. */ buf = xmalloc_bytes(bytes); if ( buf == NULL ) - { - put_two_gfns(&tg); return X86EMUL_UNHANDLEABLE; - } /* * We do a modicum of checking here, just for paranoia's sake and to @@ -768,7 +753,6 @@ static int hvmemul_rep_movs( rc = hvm_copy_to_guest_phys(dgpa, buf, bytes); xfree(buf); - put_two_gfns(&tg); if ( rc == HVMCOPY_gfn_paged_out ) return X86EMUL_RETRY; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |