[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Correctly combine hardware exceptions when one is raised during



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1206529446 0
# Node ID ced23158093a48a85c2538272b882643eac2de40
# Parent  5d25187bac941611a8a836b668a398a72df0afb0
hvm: Correctly combine hardware exceptions when one is raised during
attempted delivery of another.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |   52 +++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c        |   10 ++++++
 xen/arch/x86/hvm/vmx/vmx.c        |   56 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h     |   29 ++-----------------
 xen/include/asm-x86/hvm/vmx/vmx.h |   47 ++-----------------------------
 5 files changed, 123 insertions(+), 71 deletions(-)

diff -r 5d25187bac94 -r ced23158093a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Mar 26 10:14:50 2008 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Wed Mar 26 11:04:06 2008 +0000
@@ -79,6 +79,58 @@ void hvm_enable(struct hvm_function_tabl
 
     if ( hvm_funcs.hap_supported )
         printk("HVM: Hardware Assisted Paging detected.\n");
+}
+
+/*
+ * Need to re-inject a given event? We avoid re-injecting software exceptions
+ * and interrupts because the faulting/trapping instruction can simply be
+ * re-executed (neither VMX nor SVM update RIP when they VMEXIT during
+ * INT3/INTO/INTn).
+ */
+int hvm_event_needs_reinjection(uint8_t type, uint8_t vector)
+{
+    switch ( type )
+    {
+    case X86_EVENTTYPE_EXT_INTR:
+    case X86_EVENTTYPE_NMI:
+        return 1;
+    case X86_EVENTTYPE_HW_EXCEPTION:
+        /*
+         * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly
+         * check for these vectors, as they are really SW Exceptions. SVM has
+         * not updated RIP to point after the trapping instruction (INT3/INTO).
+         */
+        return (vector != 3) && (vector != 4);
+    default:
+        /* Software exceptions/interrupts can be re-executed (e.g., INT n). */
+        break;
+    }
+    return 0;
+}
+
+/*
+ * Combine two hardware exceptions: @vec2 was raised during delivery of @vec1.
+ * This means we can assume that @vec2 is contributory or a page fault.
+ */
+uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2)
+{
+    /* Exception during double-fault delivery always causes a triple fault. */
+    if ( vec1 == TRAP_double_fault )
+    {
+        hvm_triple_fault();
+        return TRAP_double_fault; /* dummy return */
+    }
+
+    /* Exception during page-fault delivery always causes a double fault. */
+    if ( vec1 == TRAP_page_fault )
+        return TRAP_double_fault;
+
+    /* Discard the first exception if it's benign or if we now have a #PF. */
+    if ( !((1u << vec1) & 0x7c01u) || (vec2 == TRAP_page_fault) )
+        return vec2;
+
+    /* Cannot combine the exceptions: double fault. */
+    return TRAP_double_fault;
 }
 
 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
diff -r 5d25187bac94 -r ced23158093a xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Mar 26 10:14:50 2008 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Mar 26 11:04:06 2008 +0000
@@ -725,7 +725,15 @@ static void svm_inject_exception(
 {
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
-    eventinj_t event;
+    eventinj_t event = vmcb->eventinj;
+
+    if ( unlikely(event.fields.v) &&
+         (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
+    {
+        trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr);
+        if ( trapnr == TRAP_double_fault )
+            errcode = 0;
+    }
 
     event.bytes = 0;
     event.fields.v = 1;
diff -r 5d25187bac94 -r ced23158093a xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Mar 26 10:14:50 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Mar 26 11:04:06 2008 +0000
@@ -983,6 +983,62 @@ static void vmx_flush_guest_tlbs(void)
      * because VMRESUME will flush it for us. */
 }
 
+
+
+static void __vmx_inject_exception(
+    struct vcpu *v, int trap, int type, int error_code)
+{
+    unsigned long intr_fields;
+
+    /*
+     * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
+     *  "If the VM entry is injecting, there is no blocking by STI or by
+     *   MOV SS following the VM entry, regardless of the contents of the
+     *   interruptibility-state field [in the guest-state area before the
+     *   VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
+     */
+
+    intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
+    if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
+        __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+        intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
+    }
+
+    __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
+
+    if ( trap == TRAP_page_fault )
+        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
+    else
+        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
+}
+
+void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
+{
+    unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
+
+    if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
+         (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) )
+    {
+        trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap);
+        if ( trap == TRAP_double_fault )
+            error_code = 0;
+    }
+
+    __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
+}
+
+void vmx_inject_extint(struct vcpu *v, int trap)
+{
+    __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
+                           HVM_DELIVER_NO_ERROR_CODE);
+}
+
+void vmx_inject_nmi(struct vcpu *v)
+{
+    __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
+                           HVM_DELIVER_NO_ERROR_CODE);
+}
+
 static void vmx_inject_exception(
     unsigned int trapnr, int errcode, unsigned long cr2)
 {
diff -r 5d25187bac94 -r ced23158093a xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Mar 26 10:14:50 2008 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Mar 26 11:04:06 2008 +0000
@@ -270,32 +270,9 @@ static inline int hvm_do_pmu_interrupt(s
 #define X86_EVENTTYPE_SW_INTERRUPT          4    /* software interrupt */
 #define X86_EVENTTYPE_SW_EXCEPTION          6    /* software exception */
 
-/*
- * Need to re-inject a given event? We avoid re-injecting software exceptions
- * and interrupts because the faulting/trapping instruction can simply be
- * re-executed (neither VMX nor SVM update RIP when they VMEXIT during
- * INT3/INTO/INTn).
- */
-static inline int hvm_event_needs_reinjection(uint8_t type, uint8_t vector)
-{
-    switch ( type )
-    {
-    case X86_EVENTTYPE_EXT_INTR:
-    case X86_EVENTTYPE_NMI:
-        return 1;
-    case X86_EVENTTYPE_HW_EXCEPTION:
-        /*
-         * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly
-         * check for these vectors, as they are really SW Exceptions. SVM has
-         * not updated RIP to point after the trapping instruction (INT3/INTO).
-         */
-        return (vector != 3) && (vector != 4);
-    default:
-        /* Software exceptions/interrupts can be re-executed (e.g., INT n). */
-        break;
-    }
-    return 0;
-}
+int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
+
+uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
 
 static inline int hvm_cpu_up(void)
 {
diff -r 5d25187bac94 -r ced23158093a xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Mar 26 10:14:50 2008 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Mar 26 11:04:06 2008 +0000
@@ -264,49 +264,8 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
-static inline void __vmx_inject_exception(
-    struct vcpu *v, int trap, int type, int error_code)
-{
-    unsigned long intr_fields;
-
-    /*
-     * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
-     *  "If the VM entry is injecting, there is no blocking by STI or by
-     *   MOV SS following the VM entry, regardless of the contents of the
-     *   interruptibility-state field [in the guest-state area before the
-     *   VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
-     */
-
-    intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
-    if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
-        __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
-        intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
-    }
-
-    __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
-
-    if ( trap == TRAP_page_fault )
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
-    else
-        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
-}
-
-static inline void vmx_inject_hw_exception(
-    struct vcpu *v, int trap, int error_code)
-{
-    __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
-}
-
-static inline void vmx_inject_extint(struct vcpu *v, int trap)
-{
-    __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
-                           HVM_DELIVER_NO_ERROR_CODE);
-}
-
-static inline void vmx_inject_nmi(struct vcpu *v)
-{
-    __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
-                           HVM_DELIVER_NO_ERROR_CODE);
-}
+void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code);
+void vmx_inject_extint(struct vcpu *v, int trap);
+void vmx_inject_nmi(struct vcpu *v);
 
 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.