[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V3] x86/vm_event: block interrupt injection for sync vm_events



Block interrupts (in vmx_intr_assist()) for the duration of
processing a sync vm_event (similarly to the strategy
currently used for single-stepping). Otherwise, attempting
to emulate an instruction when requested by a vm_event
reply may legitimately need to call e.g.
hvm_inject_page_fault(), which then overwrites the active
interrupt in the VMCS.

The sync vm_event handling path on x86/VMX is (roughly):
monitor_traps() -> process vm_event -> vmx_intr_assist()
(possibly writing VM_ENTRY_INTR_INFO) ->
hvm_vm_event_do_resume() -> hvm_emulate_one_vm_event()
(possibly overwriting the VM_ENTRY_INTR_INFO value).

This patch may also be helpful for the future removal
of may_defer in hvm_set_cr{0,3,4} and hvm_set_msr().

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>

---
Changes since V2:
 - Renamed intr_block to sync_event and
   vm_event_block_interrupts() to vm_event_sync_event().
 - Rephrased and / or removed stale comments.
 - Added more details to the commit description.
---
 xen/arch/x86/hvm/svm/intr.c    | 5 +++++
 xen/arch/x86/hvm/vm_event.c    | 2 ++
 xen/arch/x86/hvm/vmx/intr.c    | 5 +++++
 xen/arch/x86/vm_event.c        | 5 +++++
 xen/common/monitor.c           | 1 +
 xen/include/asm-arm/vm_event.h | 6 ++++++
 xen/include/asm-x86/vm_event.h | 4 ++++
 7 files changed, 28 insertions(+)

diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 7967353..ff75516 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -32,6 +32,7 @@
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/intr.h>
 #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
+#include <asm/vm_event.h>
 #include <xen/event.h>
 #include <xen/kernel.h>
 #include <public/hvm/ioreq.h>
@@ -137,6 +138,10 @@ void svm_intr_assist(void)
     struct hvm_intack intack;
     enum hvm_intblk intblk;
 
+    /* Block event injection while handling a sync vm_event. */
+    if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+        return;
+
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
 
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 0df8ab4..121de23 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -124,6 +124,8 @@ void hvm_vm_event_do_resume(struct vcpu *v)
 
         w->do_write.msr = 0;
     }
+
+    vm_event_sync_event(v, false);
 }
 
 /*
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 5e8cbd4..0d097cf 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -37,6 +37,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <public/hvm/ioreq.h>
 #include <asm/hvm/trace.h>
+#include <asm/vm_event.h>
 
 /*
  * A few notes on virtual NMI and INTR delivery, and interactions with
@@ -239,6 +240,10 @@ void vmx_intr_assist(void)
         return;
     }
 
+    /* Block event injection while handling a sync vm_event. */
+    if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+        return;
+
     /* Crank the handle on interrupt state. */
     pt_vector = pt_update_irq(v);
 
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 713e684..51c3493 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -122,6 +122,11 @@ void vm_event_monitor_next_interrupt(struct vcpu *v)
     v->arch.monitor.next_interrupt_enabled = true;
 }
 
+void vm_event_sync_event(struct vcpu *v, bool value)
+{
+    v->arch.vm_event->sync_event = value;
+}
+
 #ifdef CONFIG_HVM
 static void vm_event_pack_segment_register(enum x86_segment segment,
                                            struct vm_event_regs_x86 *reg)
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index c606683..cb5f37f 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -113,6 +113,7 @@ int monitor_traps(struct vcpu *v, bool sync, 
vm_event_request_t *req)
     if ( sync )
     {
         req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
+        vm_event_sync_event(v, true);
         vm_event_vcpu_pause(v);
         rc = 1;
     }
diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
index 66f2474..14d1d34 100644
--- a/xen/include/asm-arm/vm_event.h
+++ b/xen/include/asm-arm/vm_event.h
@@ -52,4 +52,10 @@ void vm_event_emulate_check(struct vcpu *v, 
vm_event_response_t *rsp)
     /* Not supported on ARM. */
 }
 
+static inline
+void vm_event_sync_event(struct vcpu *v, bool value)
+{
+    /* Not supported on ARM. */
+}
+
 #endif /* __ASM_ARM_VM_EVENT_H__ */
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 39e73c8..23e6557 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -34,6 +34,8 @@ struct arch_vm_event {
     struct monitor_write_data write_data;
     struct vm_event_regs_x86 gprs;
     bool set_gprs;
+    /* A sync vm_event has been sent and we're not done handling it. */
+    bool sync_event;
 };
 
 int vm_event_init_domain(struct domain *d);
@@ -47,4 +49,6 @@ void vm_event_register_write_resume(struct vcpu *v, 
vm_event_response_t *rsp);
 
 void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
 
+void vm_event_sync_event(struct vcpu *v, bool value);
+
 #endif /* __ASM_X86_VM_EVENT_H__ */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.