[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/vm_event: block interrupt injection for sync vm_events
commit ff9b9d540f1bddab278daa103291a217270cc587 Author: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> AuthorDate: Mon Jan 21 12:13:22 2019 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Jan 21 12:13:22 2019 +0100 x86/vm_event: block interrupt injection for sync vm_events Block interrupts (in vmx_intr_assist()) for the duration of processing a sync vm_event (similarly to the strategy currently used for single-stepping). Otherwise, attempting to emulate an instruction when requested by a vm_event reply may legitimately need to call e.g. hvm_inject_page_fault(), which then overwrites the active interrupt in the VMCS. The sync vm_event handling path on x86/VMX is (roughly): monitor_traps() -> process vm_event -> vmx_intr_assist() (possibly writing VM_ENTRY_INTR_INFO) -> hvm_vm_event_do_resume() -> hvm_emulate_one_vm_event() (possibly overwriting the VM_ENTRY_INTR_INFO value). This patch may also be helpful for the future removal of may_defer in hvm_set_cr{0,3,4} and hvm_set_msr(). Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Acked-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx> Release-acked-by: Juergen Gross <jgross@xxxxxxxx> --- xen/arch/x86/hvm/svm/intr.c | 5 +++++ xen/arch/x86/hvm/vm_event.c | 2 ++ xen/arch/x86/hvm/vmx/intr.c | 5 +++++ xen/arch/x86/vm_event.c | 5 +++++ xen/common/monitor.c | 1 + xen/include/asm-arm/vm_event.h | 6 ++++++ xen/include/asm-x86/vm_event.h | 4 ++++ 7 files changed, 28 insertions(+) diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c index 79673535d1..ff755165cd 100644 --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -32,6 +32,7 @@ #include <asm/hvm/svm/svm.h> #include <asm/hvm/svm/intr.h> #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */ +#include <asm/vm_event.h> #include <xen/event.h> #include <xen/kernel.h> #include <public/hvm/ioreq.h> @@ -137,6 +138,10 @@ void svm_intr_assist(void) struct hvm_intack intack; enum hvm_intblk intblk; + /* Block event injection while handling a sync vm_event. */ + if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event ) + return; + /* Crank the handle on interrupt state. */ pt_update_irq(v); diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c index 0df8ab40e6..121de23071 100644 --- a/xen/arch/x86/hvm/vm_event.c +++ b/xen/arch/x86/hvm/vm_event.c @@ -124,6 +124,8 @@ void hvm_vm_event_do_resume(struct vcpu *v) w->do_write.msr = 0; } + + vm_event_sync_event(v, false); } /* diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index 5e8cbd4b4a..0d097cf1f2 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -37,6 +37,7 @@ #include <asm/hvm/nestedhvm.h> #include <public/hvm/ioreq.h> #include <asm/hvm/trace.h> +#include <asm/vm_event.h> /* * A few notes on virtual NMI and INTR delivery, and interactions with @@ -239,6 +240,10 @@ void vmx_intr_assist(void) return; } + /* Block event injection while handling a sync vm_event. */ + if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event ) + return; + /* Crank the handle on interrupt state. */ pt_vector = pt_update_irq(v); diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c index 713e684abe..51c3493b1d 100644 --- a/xen/arch/x86/vm_event.c +++ b/xen/arch/x86/vm_event.c @@ -122,6 +122,11 @@ void vm_event_monitor_next_interrupt(struct vcpu *v) v->arch.monitor.next_interrupt_enabled = true; } +void vm_event_sync_event(struct vcpu *v, bool value) +{ + v->arch.vm_event->sync_event = value; +} + #ifdef CONFIG_HVM static void vm_event_pack_segment_register(enum x86_segment segment, struct vm_event_regs_x86 *reg) diff --git a/xen/common/monitor.c b/xen/common/monitor.c index c6066830e3..cb5f37fdb2 100644 --- a/xen/common/monitor.c +++ b/xen/common/monitor.c @@ -113,6 +113,7 @@ int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req) if ( sync ) { req->flags |= VM_EVENT_FLAG_VCPU_PAUSED; + vm_event_sync_event(v, true); vm_event_vcpu_pause(v); rc = 1; } diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h index 66f2474fe1..14d1d341cc 100644 --- a/xen/include/asm-arm/vm_event.h +++ b/xen/include/asm-arm/vm_event.h @@ -52,4 +52,10 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) /* Not supported on ARM. */ } +static inline +void vm_event_sync_event(struct vcpu *v, bool value) +{ + /* Not supported on ARM. */ +} + #endif /* __ASM_ARM_VM_EVENT_H__ */ diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h index 39e73c83ca..23e655710b 100644 --- a/xen/include/asm-x86/vm_event.h +++ b/xen/include/asm-x86/vm_event.h @@ -34,6 +34,8 @@ struct arch_vm_event { struct monitor_write_data write_data; struct vm_event_regs_x86 gprs; bool set_gprs; + /* A sync vm_event has been sent and we're not done handling it. */ + bool sync_event; }; int vm_event_init_domain(struct domain *d); @@ -47,4 +49,6 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp); void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp); +void vm_event_sync_event(struct vcpu *v, bool value); + #endif /* __ASM_X86_VM_EVENT_H__ */ -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |