[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 2/2] x86/emulate: Send vm_event from emulate
This patch aims to have mem access vm events sent from the emulator. This is useful in the case of emulated instructions that cause page-walks on access protected pages. We use hvmemul_map_linear_addr() ro intercept r/w access and hvmemul_insn_fetch() to intercept exec access. First we try to send a vm event and if the event is sent then emulation returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the emulation goes on as expected. Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> --- Changes since V3: - Calculate gpa in hvmemul_send_vm_event() - Move hvmemul_linear_to_phys() call inside hvmemul_send_vm_event() - Check only if hvmemul_virtual_to_linear() returns X86EMUL_OKAY - Add commnet for X86EMUL_ACCESS_EXCEPTION. --- xen/arch/x86/hvm/emulate.c | 89 +++++++++++++++++++++++++- xen/arch/x86/hvm/vm_event.c | 2 +- xen/arch/x86/mm/mem_access.c | 3 +- xen/arch/x86/x86_emulate/x86_emulate.h | 2 + xen/include/asm-x86/hvm/emulate.h | 4 +- 5 files changed, 95 insertions(+), 5 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 254ff6515d..75403ebc9b 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -15,6 +15,7 @@ #include <xen/paging.h> #include <xen/trace.h> #include <xen/vm_event.h> +#include <xen/monitor.h> #include <asm/event.h> #include <asm/i387.h> #include <asm/xstate.h> @@ -26,6 +27,7 @@ #include <asm/hvm/support.h> #include <asm/hvm/svm/svm.h> #include <asm/vm_event.h> +#include <asm/altp2m.h> static void hvmtrace_io_assist(const ioreq_t *p) { @@ -619,6 +621,68 @@ static int hvmemul_linear_to_phys( return X86EMUL_OKAY; } +static bool hvmemul_send_vm_event(unsigned long gla, + uint32_t pfec, unsigned int bytes, + struct hvm_emulate_ctxt ctxt) +{ + xenmem_access_t access; + vm_event_request_t req = {}; + gfn_t gfn; + paddr_t gpa; + unsigned long reps = 1; + int rc; + + if ( !ctxt.send_event || !pfec ) + return false; + + rc = hvmemul_linear_to_phys(gla, &gpa, bytes, &reps, pfec, &ctxt); + + if ( rc != X86EMUL_OKAY ) + return false; + + gfn = gaddr_to_gfn(gpa); + + if ( p2m_get_mem_access(current->domain, gfn, &access, + altp2m_vcpu_idx(current)) != 0 ) + return false; + + switch ( access ) { + case XENMEM_access_x: + case XENMEM_access_rx: + if ( pfec & PFEC_write_access ) + req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W; + break; + + case XENMEM_access_w: + case XENMEM_access_rw: + if ( pfec & PFEC_insn_fetch ) + req.u.mem_access.flags = MEM_ACCESS_X; + break; + + case XENMEM_access_r: + case XENMEM_access_n: + if ( pfec & PFEC_write_access ) + req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W; + if ( pfec & PFEC_insn_fetch ) + req.u.mem_access.flags |= MEM_ACCESS_X; + break; + + default: + return false; + } + + if ( !req.u.mem_access.flags ) + return false; /* no violation */ + + req.reason = VM_EVENT_REASON_MEM_ACCESS; + req.u.mem_access.gfn = gfn_x(gfn); + req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA | MEM_ACCESS_GLA_VALID; + req.u.mem_access.gla = gla; + req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); + + return monitor_traps(current, true, &req) >= 0; +} + /* * Map the frame(s) covering an individual linear access, for writeable * access. May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors @@ -636,6 +700,7 @@ static void *hvmemul_map_linear_addr( unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) - (linear >> PAGE_SHIFT) + 1; unsigned int i; + gfn_t gfn; /* * mfn points to the next free slot. All used slots have a page reference @@ -674,7 +739,7 @@ static void *hvmemul_map_linear_addr( ASSERT(mfn_x(*mfn) == 0); res = hvm_translate_get_page(curr, addr, true, pfec, - &pfinfo, &page, NULL, &p2mt); + &pfinfo, &page, &gfn, &p2mt); switch ( res ) { @@ -704,6 +769,11 @@ static void *hvmemul_map_linear_addr( if ( pfec & PFEC_write_access ) { + if ( hvmemul_send_vm_event(addr, pfec, bytes, *hvmemul_ctxt) ) + { + err = ERR_PTR(~X86EMUL_ACCESS_EXCEPTION); + goto out; + } if ( p2m_is_discard_write(p2mt) ) { err = ERR_PTR(~X86EMUL_OKAY); @@ -1248,7 +1318,21 @@ int hvmemul_insn_fetch( container_of(ctxt, struct hvm_emulate_ctxt, ctxt); /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */ uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip; + uint32_t pfec = PFEC_page_present | PFEC_insn_fetch; + unsigned long addr, reps = 1; + int rc = 0; + + rc = hvmemul_virtual_to_linear( + seg, offset, bytes, &reps, hvm_access_insn_fetch, hvmemul_ctxt, &addr); + + if ( rc != X86EMUL_OKAY || !bytes ) + return rc; + + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 ) + pfec |= PFEC_user_mode; + if ( hvmemul_send_vm_event(addr, pfec, bytes, *hvmemul_ctxt) ) + return X86EMUL_ACCESS_EXCEPTION; /* * Fall back if requested bytes are not in the prefetch cache. * But always perform the (fake) read when bytes == 0. @@ -2508,12 +2592,13 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla) } void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, - unsigned int errcode) + unsigned int errcode, bool send_event) { struct hvm_emulate_ctxt ctx = {{ 0 }}; int rc; hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs()); + ctx.send_event = send_event; switch ( kind ) { diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c index 121de23071..6d203e8db5 100644 --- a/xen/arch/x86/hvm/vm_event.c +++ b/xen/arch/x86/hvm/vm_event.c @@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v) kind = EMUL_KIND_SET_CONTEXT_INSN; hvm_emulate_one_vm_event(kind, TRAP_invalid_op, - X86_EVENT_NO_EC); + X86_EVENT_NO_EC, false); v->arch.vm_event->emulate_flags = 0; } diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c index 0144f92b98..c9972bab8c 100644 --- a/xen/arch/x86/mm/mem_access.c +++ b/xen/arch/x86/mm/mem_access.c @@ -214,7 +214,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, d->arch.monitor.inguest_pagefault_disabled && npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */ { - hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC); + hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, + X86_EVENT_NO_EC, true); return true; } diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h index 08645762cc..8a20e733fa 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -162,6 +162,8 @@ struct x86_emul_fpu_aux { #define X86EMUL_UNRECOGNIZED X86EMUL_UNIMPLEMENTED /* (cmpxchg accessor): CMPXCHG failed. */ #define X86EMUL_CMPXCHG_FAILED 7 +/* Emulator tried to access a protected page. */ +#define X86EMUL_ACCESS_EXCEPTION 8 /* FPU sub-types which may be requested via ->get_fpu(). */ enum x86_emulate_fpu_type { diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h index b39a1a0331..ed22ed0baf 100644 --- a/xen/include/asm-x86/hvm/emulate.h +++ b/xen/include/asm-x86/hvm/emulate.h @@ -47,6 +47,7 @@ struct hvm_emulate_ctxt { uint32_t intr_shadow; bool_t set_context; + bool send_event; }; enum emul_kind { @@ -63,7 +64,8 @@ int hvm_emulate_one( struct hvm_emulate_ctxt *hvmemul_ctxt); void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, - unsigned int errcode); + unsigned int errcode, + bool send_event); /* Must be called once to set up hvmemul state. */ void hvm_emulate_init_once( struct hvm_emulate_ctxt *hvmemul_ctxt, -- 2.17.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |