[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2] x86/vm_event: block interrupt injection for sync vm_events
Block interrupts (in vmx_intr_assist()) for the duration of processing a sync vm_event (similarly to the strategy currently used for single-stepping). Otherwise, attempting to emulate an instruction when requested by a vm_event reply may legitimately need to call e.g. hvm_inject_page_fault(), which then overwrites the active interrupt in the VMCS. Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> --- Changes since V1: - Added "e.g." to the patch description to counter the impression that the only possibility of overwriting the active interrupt in the VMCS is hvm_inject_page_fault(). - Added the early return logic to svm_intr_assist() as well (also support AMD). - Removed redundant vm_event_check_ring(v->domain->vm_event_monitor) test (on re-reading the code, it turns out that there's no way that v->arch.vm_event == NULL && vm_event_check_ring() == true. --- xen/arch/x86/hvm/svm/intr.c | 5 +++++ xen/arch/x86/hvm/vm_event.c | 2 ++ xen/arch/x86/hvm/vmx/intr.c | 5 +++++ xen/arch/x86/vm_event.c | 5 +++++ xen/common/monitor.c | 6 ++++++ xen/include/asm-arm/vm_event.h | 6 ++++++ xen/include/asm-x86/vm_event.h | 8 ++++++++ 7 files changed, 37 insertions(+) diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c index 7967353..e1cc465 100644 --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -32,6 +32,7 @@ #include <asm/hvm/svm/svm.h> #include <asm/hvm/svm/intr.h> #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */ +#include <asm/vm_event.h> #include <xen/event.h> #include <xen/kernel.h> #include <public/hvm/ioreq.h> @@ -137,6 +138,10 @@ void svm_intr_assist(void) struct hvm_intack intack; enum hvm_intblk intblk; + /* Block event injection while handling a sync vm_event. */ + if ( unlikely(v->arch.vm_event) && v->arch.vm_event->intr_block ) + return; + /* Crank the handle on interrupt state. */ pt_update_irq(v); diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c index 0df8ab4..6e4233b 100644 --- a/xen/arch/x86/hvm/vm_event.c +++ b/xen/arch/x86/hvm/vm_event.c @@ -124,6 +124,8 @@ void hvm_vm_event_do_resume(struct vcpu *v) w->do_write.msr = 0; } + + vm_event_block_interrupts(v, false); } /* diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index 5e8cbd4..9102064 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -37,6 +37,7 @@ #include <asm/hvm/nestedhvm.h> #include <public/hvm/ioreq.h> #include <asm/hvm/trace.h> +#include <asm/vm_event.h> /* * A few notes on virtual NMI and INTR delivery, and interactions with @@ -239,6 +240,10 @@ void vmx_intr_assist(void) return; } + /* Block event injection while handling a sync vm_event. */ + if ( unlikely(v->arch.vm_event) && v->arch.vm_event->intr_block ) + return; + /* Crank the handle on interrupt state. */ pt_vector = pt_update_irq(v); diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c index 713e684..d71881f 100644 --- a/xen/arch/x86/vm_event.c +++ b/xen/arch/x86/vm_event.c @@ -122,6 +122,11 @@ void vm_event_monitor_next_interrupt(struct vcpu *v) v->arch.monitor.next_interrupt_enabled = true; } +void vm_event_block_interrupts(struct vcpu *v, bool value) +{ + v->arch.vm_event->intr_block = value; +} + #ifdef CONFIG_HVM static void vm_event_pack_segment_register(enum x86_segment segment, struct vm_event_regs_x86 *reg) diff --git a/xen/common/monitor.c b/xen/common/monitor.c index c606683..af52b07 100644 --- a/xen/common/monitor.c +++ b/xen/common/monitor.c @@ -113,6 +113,12 @@ int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req) if ( sync ) { req->flags |= VM_EVENT_FLAG_VCPU_PAUSED; + /* + * It only makes sense to block interrupts for the duration of + * processing blocking vm_events, since that is the only case where + * emulating the current instruction really applies. + */ + vm_event_block_interrupts(v, true); vm_event_vcpu_pause(v); rc = 1; } diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h index 66f2474..b63249e 100644 --- a/xen/include/asm-arm/vm_event.h +++ b/xen/include/asm-arm/vm_event.h @@ -52,4 +52,10 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) /* Not supported on ARM. */ } +static inline +void vm_event_block_interrupts(struct vcpu *v, bool value) +{ + /* Not supported on ARM. */ +} + #endif /* __ASM_ARM_VM_EVENT_H__ */ diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h index 39e73c8..0f20cf0 100644 --- a/xen/include/asm-x86/vm_event.h +++ b/xen/include/asm-x86/vm_event.h @@ -34,6 +34,12 @@ struct arch_vm_event { struct monitor_write_data write_data; struct vm_event_regs_x86 gprs; bool set_gprs; + /* + * Block interrupts until this vm_event is done handling (after the + * fashion of single-step). Meant for the cases where the vm_event + * reply asks for emulation of the current instruction. + */ + bool intr_block; }; int vm_event_init_domain(struct domain *d); @@ -47,4 +53,6 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp); void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp); +void vm_event_block_interrupts(struct vcpu *v, bool value); + #endif /* __ASM_X86_VM_EVENT_H__ */ -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |