[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: Define new struct hvm_trap and cleanup vmx exception


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Wed, 30 May 2012 13:22:21 +0000
  • Delivery-date: Wed, 30 May 2012 13:22:28 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1338366471 -3600
# Node ID a418c32885ab45dff20e35f118341e422b782df8
# Parent  ff16fb989c14a040134d2e49e739b703dc1bfa06
xen: Define new struct hvm_trap and cleanup vmx exception

Define new struct hvm_trap to represent information of trap, and
renames hvm_inject_exception to hvm_inject_trap, then define a couple
of wrappers around that function for existing callers.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
Signed-off-by: Xudong Hao <xudong.hao@xxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Wed May 30 09:27:51 2012 +0100
@@ -326,7 +326,7 @@ static int hvmemul_linear_to_phys(
     {
         if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
             return X86EMUL_RETRY;
-        hvm_inject_exception(TRAP_page_fault, pfec, addr);
+        hvm_inject_page_fault(pfec, addr);
         return X86EMUL_EXCEPTION;
     }
 
@@ -349,7 +349,7 @@ static int hvmemul_linear_to_phys(
                 ASSERT(!reverse);
                 if ( npfn != INVALID_GFN )
                     return X86EMUL_UNHANDLEABLE;
-                hvm_inject_exception(TRAP_page_fault, pfec, addr & PAGE_MASK);
+                hvm_inject_page_fault(pfec, addr & PAGE_MASK);
                 return X86EMUL_EXCEPTION;
             }
             *reps = done;
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed May 30 09:27:51 2012 +0100
@@ -347,12 +347,10 @@ void hvm_do_resume(struct vcpu *v)
     }
 
     /* Inject pending hw/sw trap */
-    if (v->arch.hvm_vcpu.inject_trap != -1) 
+    if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) 
     {
-        hvm_inject_exception(v->arch.hvm_vcpu.inject_trap, 
-                             v->arch.hvm_vcpu.inject_error_code, 
-                             v->arch.hvm_vcpu.inject_cr2);
-        v->arch.hvm_vcpu.inject_trap = -1;
+        hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap);
+        v->arch.hvm_vcpu.inject_trap.vector = -1;
     }
 }
 
@@ -1047,7 +1045,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
     INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
 
-    v->arch.hvm_vcpu.inject_trap = -1;
+    v->arch.hvm_vcpu.inject_trap.vector = -1;
 
 #ifdef CONFIG_COMPAT
     rc = setup_compat_arg_xlat(v);
@@ -1194,18 +1192,19 @@ void hvm_triple_fault(void)
     domain_shutdown(v->domain, SHUTDOWN_reboot);
 }
 
-void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
+void hvm_inject_trap(struct hvm_trap *trap)
 {
     struct vcpu *curr = current;
 
     if ( nestedhvm_enabled(curr->domain) &&
          !nestedhvm_vmswitch_in_progress(curr) &&
          nestedhvm_vcpu_in_guestmode(curr) &&
-         nhvm_vmcx_guest_intercepts_trap(curr, trapnr, errcode) )
+         nhvm_vmcx_guest_intercepts_trap(
+             curr, trap->vector, trap->error_code) )
     {
         enum nestedhvm_vmexits nsret;
 
-        nsret = nhvm_vcpu_vmexit_trap(curr, trapnr, errcode, cr2);
+        nsret = nhvm_vcpu_vmexit_trap(curr, trap);
 
         switch ( nsret )
         {
@@ -1221,7 +1220,26 @@ void hvm_inject_exception(unsigned int t
         }
     }
 
-    hvm_funcs.inject_exception(trapnr, errcode, cr2);
+    hvm_funcs.inject_trap(trap);
+}
+
+void hvm_inject_hw_exception(unsigned int trapnr, int errcode)
+{
+    struct hvm_trap trap = {
+        .vector = trapnr,
+        .type = X86_EVENTTYPE_HW_EXCEPTION,
+        .error_code = errcode };
+    hvm_inject_trap(&trap);
+}
+
+void hvm_inject_page_fault(int errcode, unsigned long cr2)
+{
+    struct hvm_trap trap = {
+        .vector = TRAP_page_fault,
+        .type = X86_EVENTTYPE_HW_EXCEPTION,
+        .error_code = errcode,
+        .cr2 = cr2 };
+    hvm_inject_trap(&trap);
 }
 
 int hvm_hap_nested_page_fault(unsigned long gpa,
@@ -1270,7 +1288,7 @@ int hvm_hap_nested_page_fault(unsigned l
             return -1;
         case NESTEDHVM_PAGEFAULT_MMIO:
             if ( !handle_mmio() )
-                hvm_inject_exception(TRAP_gp_fault, 0, 0);
+                hvm_inject_hw_exception(TRAP_gp_fault, 0);
             return 1;
         }
     }
@@ -1337,7 +1355,7 @@ int hvm_hap_nested_page_fault(unsigned l
     {
         put_gfn(p2m->domain, gfn);
         if ( !handle_mmio() )
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
         rc = 1;
         goto out;
     }
@@ -1380,7 +1398,7 @@ int hvm_hap_nested_page_fault(unsigned l
     {
         gdprintk(XENLOG_WARNING,
                  "trying to write to read-only grant mapping\n");
-        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
         rc = 1;
         goto out_put_gfn;
     }
@@ -1441,7 +1459,7 @@ int hvm_handle_xsetbv(u64 new_bv)
 
     return 0;
 err:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return -1;
 }
 
@@ -1457,7 +1475,7 @@ int hvm_set_efer(uint64_t value)
     {
         gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
                  "EFER: 0x%"PRIx64"\n", value);
-        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
         return X86EMUL_EXCEPTION;
     }
 
@@ -1466,7 +1484,7 @@ int hvm_set_efer(uint64_t value)
     {
         gdprintk(XENLOG_WARNING,
                  "Trying to change EFER.LME with paging enabled\n");
-        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
         return X86EMUL_EXCEPTION;
     }
 
@@ -1722,7 +1740,7 @@ int hvm_set_cr0(unsigned long value)
     return X86EMUL_OKAY;
 
  gpf:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1808,7 +1826,7 @@ int hvm_set_cr4(unsigned long value)
     return X86EMUL_OKAY;
 
  gpf:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -2104,7 +2122,7 @@ static int hvm_load_segment_selector(
  unmap_and_fail:
     hvm_unmap_entry(pdesc);
  fail:
-    hvm_inject_exception(fault_type, sel & 0xfffc, 0);
+    hvm_inject_hw_exception(fault_type, sel & 0xfffc);
  hvm_map_fail:
     return 1;
 }
@@ -2137,9 +2155,9 @@ void hvm_task_switch(
 
     if ( ((tss_sel & 0xfff8) + 7) > gdt.limit )
     {
-        hvm_inject_exception((taskswitch_reason == TSW_iret) ?
+        hvm_inject_hw_exception((taskswitch_reason == TSW_iret) ?
                              TRAP_invalid_tss : TRAP_gp_fault,
-                             tss_sel & 0xfff8, 0);
+                             tss_sel & 0xfff8);
         goto out;
     }
 
@@ -2164,21 +2182,21 @@ void hvm_task_switch(
 
     if ( !tr.attr.fields.p )
     {
-        hvm_inject_exception(TRAP_no_segment, tss_sel & 0xfff8, 0);
+        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
         goto out;
     }
 
     if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
     {
-        hvm_inject_exception(
+        hvm_inject_hw_exception(
             (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault,
-            tss_sel & 0xfff8, 0);
+            tss_sel & 0xfff8);
         goto out;
     }
 
     if ( tr.limit < (sizeof(tss)-1) )
     {
-        hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0);
+        hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8);
         goto out;
     }
 
@@ -2283,7 +2301,7 @@ void hvm_task_switch(
         goto out;
 
     if ( (tss.trace & 1) && !exn_raised )
-        hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
+        hvm_inject_hw_exception(TRAP_debug, tss_sel & 0xfff8);
 
     tr.attr.fields.type = 0xb; /* busy 32-bit tss */
     hvm_set_segment_register(v, x86_seg_tr, &tr);
@@ -2362,7 +2380,7 @@ static enum hvm_copy_result __hvm_copy(
                 if ( pfec == PFEC_page_shared )
                     return HVMCOPY_gfn_shared;
                 if ( flags & HVMCOPY_fault )
-                    hvm_inject_exception(TRAP_page_fault, pfec, addr);
+                    hvm_inject_page_fault(pfec, addr);
                 return HVMCOPY_bad_gva_to_gfn;
             }
         }
@@ -2849,7 +2867,7 @@ int hvm_msr_read_intercept(unsigned int 
     return ret;
 
  gp_fault:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     ret = X86EMUL_EXCEPTION;
     *msr_content = -1ull;
     goto out;
@@ -2962,7 +2980,7 @@ int hvm_msr_write_intercept(unsigned int
     return ret;
 
 gp_fault:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -4267,13 +4285,13 @@ long do_hvm_op(unsigned long op, XEN_GUE
         if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
             goto param_fail8;
         
-        if ( v->arch.hvm_vcpu.inject_trap != -1 )
+        if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
             rc = -EBUSY;
         else 
         {
-            v->arch.hvm_vcpu.inject_trap       = tr.trap;
-            v->arch.hvm_vcpu.inject_error_code = tr.error_code;
-            v->arch.hvm_vcpu.inject_cr2        = tr.cr2;
+            v->arch.hvm_vcpu.inject_trap.vector = tr.trap;
+            v->arch.hvm_vcpu.inject_trap.error_code = tr.error_code;
+            v->arch.hvm_vcpu.inject_trap.cr2 = tr.cr2;
         }
 
     param_fail8:
@@ -4431,11 +4449,9 @@ int nhvm_vcpu_vmexit(struct vcpu *v, str
     return -EOPNOTSUPP;
 }
 
-int
-nhvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
-                       int errcode, unsigned long cr2)
+int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
 {
-    return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trapnr, errcode, cr2);
+    return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap);
 }
 
 uint64_t nhvm_vcpu_guestcr3(struct vcpu *v)
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/io.c     Wed May 30 09:27:51 2012 +0100
@@ -200,7 +200,7 @@ int handle_mmio(void)
         return 0;
     case X86EMUL_EXCEPTION:
         if ( ctxt.exn_pending )
-            hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+            hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code);
         break;
     default:
         break;
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c    Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/svm/emulate.c    Wed May 30 09:27:51 2012 +0100
@@ -147,7 +147,7 @@ static int fetch(struct vcpu *v, u8 *buf
         /* Not OK: fetches from non-RAM pages are not supportable. */
         gdprintk(XENLOG_WARNING, "Bad instruction fetch at %#lx (%#lx)\n",
                  (unsigned long) guest_cpu_user_regs()->eip, addr);
-        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
         return 0;
     }
     return 1;
@@ -216,7 +216,7 @@ int __get_instruction_length_from_list(s
     gdprintk(XENLOG_WARNING,
              "%s: Mismatch between expected and actual instruction bytes: "
              "eip = %lx\n",  __func__, (unsigned long)vmcb->rip);
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return 0;
 
  done:
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Wed May 30 09:27:51 2012 +0100
@@ -735,8 +735,8 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct c
     default:
         gdprintk(XENLOG_ERR,
             "nsvm_vcpu_vmentry failed, injecting #UD\n");
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
-        /* Must happen after hvm_inject_exception or it doesn't work right. */
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+        /* Must happen after hvm_inject_hw_exception or it doesn't work right. 
*/
         nv->nv_vmswitch_in_progress = 0;
         return 1;
     }
@@ -796,12 +796,12 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, 
 }
 
 int
-nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
-                      int errcode, unsigned long cr2)
+nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
 {
     ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL);
 
-    nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trapnr, errcode, cr2);
+    nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trap->vector,
+                           trap->error_code, trap->cr2);
     return NESTEDHVM_VMEXIT_DONE;
 }
 
@@ -1176,7 +1176,7 @@ enum hvm_intblk nsvm_intr_blocked(struct
     }
 
     if ( nv->nv_vmexit_pending ) {
-        /* hvm_inject_exception() must have run before.
+        /* hvm_inject_hw_exception() must have run before.
          * exceptions have higher priority than interrupts.
          */
         return hvm_intblk_rflags_ie;
@@ -1509,7 +1509,7 @@ void svm_vmexit_do_stgi(struct cpu_user_
     unsigned int inst_len;
 
     if ( !nestedhvm_enabled(v->domain) ) {
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         return;
     }
 
@@ -1529,7 +1529,7 @@ void svm_vmexit_do_clgi(struct cpu_user_
     vintr_t intr;
 
     if ( !nestedhvm_enabled(v->domain) ) {
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         return;
     }
 
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed May 30 09:27:51 2012 +0100
@@ -109,7 +109,7 @@ void __update_guest_eip(struct cpu_user_
     curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
 
     if ( regs->eflags & X86_EFLAGS_TF )
-        hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
 }
 
 static void svm_cpu_down(void)
@@ -1066,14 +1066,14 @@ static void svm_vcpu_destroy(struct vcpu
     passive_domain_destroy(v);
 }
 
-static void svm_inject_exception(
-    unsigned int trapnr, int errcode, unsigned long cr2)
+static void svm_inject_trap(struct hvm_trap *trap)
 {
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
     eventinj_t event = vmcb->eventinj;
+    struct hvm_trap _trap = *trap;
 
-    switch ( trapnr )
+    switch ( _trap.vector )
     {
     case TRAP_debug:
         if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
@@ -1081,6 +1081,9 @@ static void svm_inject_exception(
             __restore_debug_registers(curr);
             vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000);
         }
+        if ( cpu_has_monitor_trap_flag )
+            break;
+        /* fall through */
     case TRAP_int3:
         if ( curr->domain->debugger_attached )
         {
@@ -1093,29 +1096,30 @@ static void svm_inject_exception(
     if ( unlikely(event.fields.v) &&
          (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
     {
-        trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr);
-        if ( trapnr == TRAP_double_fault )
-            errcode = 0;
+        _trap.vector = hvm_combine_hw_exceptions(
+            event.fields.vector, _trap.vector);
+        if ( _trap.vector == TRAP_double_fault )
+            _trap.error_code = 0;
     }
 
     event.bytes = 0;
     event.fields.v = 1;
     event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
-    event.fields.vector = trapnr;
-    event.fields.ev = (errcode != HVM_DELIVER_NO_ERROR_CODE);
-    event.fields.errorcode = errcode;
+    event.fields.vector = _trap.vector;
+    event.fields.ev = (_trap.error_code != HVM_DELIVER_NO_ERROR_CODE);
+    event.fields.errorcode = _trap.error_code;
 
     vmcb->eventinj = event;
 
-    if ( trapnr == TRAP_page_fault )
+    if ( _trap.vector == TRAP_page_fault )
     {
-        curr->arch.hvm_vcpu.guest_cr[2] = cr2;
-        vmcb_set_cr2(vmcb, cr2);
-        HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2));
+        curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2;
+        vmcb_set_cr2(vmcb, _trap.cr2);
+        HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, TRC_PAR_LONG(_trap.cr2));
     }
     else
     {
-        HVMTRACE_2D(INJ_EXC, trapnr, errcode);
+        HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code);
     }
 }
 
@@ -1361,7 +1365,7 @@ static void svm_fpu_dirty_intercept(void
     {
        /* Check if l1 guest must make FPU ready for the l2 guest */
        if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS )
-           hvm_inject_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE, 0);
+           hvm_inject_hw_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE);
        else
            vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS);
        return;
@@ -1579,7 +1583,7 @@ static int svm_msr_read_intercept(unsign
     return X86EMUL_OKAY;
 
  gpf:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1708,7 +1712,7 @@ static int svm_msr_write_intercept(unsig
     return X86EMUL_OKAY;
 
  gpf:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1784,13 +1788,13 @@ svm_vmexit_do_vmrun(struct cpu_user_regs
 {
     if (!nestedhvm_enabled(v->domain)) {
         gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n");
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         return;
     }
 
     if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
         gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n");
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         return;
     }
 
@@ -1830,7 +1834,7 @@ svm_vmexit_do_vmload(struct vmcb_struct 
     return;
 
  inject:
-    hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0);
+    hvm_inject_hw_exception(ret, HVM_DELIVER_NO_ERROR_CODE);
     return;
 }
 
@@ -1864,7 +1868,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct 
     return;
 
  inject:
-    hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0);
+    hvm_inject_hw_exception(ret, HVM_DELIVER_NO_ERROR_CODE);
     return;
 }
 
@@ -1880,11 +1884,11 @@ static void svm_vmexit_ud_intercept(stru
     switch ( rc )
     {
     case X86EMUL_UNHANDLEABLE:
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         break;
     case X86EMUL_EXCEPTION:
         if ( ctxt.exn_pending )
-            hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+            hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code);
         /* fall through */
     default:
         hvm_emulate_writeback(&ctxt);
@@ -1998,7 +2002,7 @@ static struct hvm_function_table __read_
     .set_guest_pat        = svm_set_guest_pat,
     .get_guest_pat        = svm_get_guest_pat,
     .set_tsc_offset       = svm_set_tsc_offset,
-    .inject_exception     = svm_inject_exception,
+    .inject_trap          = svm_inject_trap,
     .init_hypercall_page  = svm_init_hypercall_page,
     .event_pending        = svm_event_pending,
     .do_pmu_interrupt     = svm_do_pmu_interrupt,
@@ -2212,7 +2216,7 @@ void svm_vmexit_handler(struct cpu_user_
             break;
         }
 
-        hvm_inject_exception(TRAP_page_fault, regs->error_code, va);
+        hvm_inject_page_fault(regs->error_code, va);
         break;
     }
 
@@ -2285,7 +2289,7 @@ void svm_vmexit_handler(struct cpu_user_
                 __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip);
         }
         else if ( !handle_mmio() )
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
         break;
 
     case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
@@ -2293,7 +2297,7 @@ void svm_vmexit_handler(struct cpu_user_
         if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
             svm_vmexit_do_cr_access(vmcb, regs);
         else if ( !handle_mmio() ) 
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
         break;
 
     case VMEXIT_INVLPG:
@@ -2303,7 +2307,7 @@ void svm_vmexit_handler(struct cpu_user_
             __update_guest_eip(regs, vmcb->nextrip - vmcb->rip);
         }
         else if ( !handle_mmio() )
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
         break;
 
     case VMEXIT_INVLPGA:
@@ -2349,7 +2353,7 @@ void svm_vmexit_handler(struct cpu_user_
 
     case VMEXIT_MONITOR:
     case VMEXIT_MWAIT:
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         break;
 
     case VMEXIT_VMRUN:
@@ -2368,7 +2372,7 @@ void svm_vmexit_handler(struct cpu_user_
         svm_vmexit_do_clgi(regs, v);
         break;
     case VMEXIT_SKINIT:
-        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         break;
 
     case VMEXIT_XSETBV:
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Wed May 30 09:27:51 2012 +0100
@@ -251,7 +251,7 @@ void vmx_intr_assist(void)
     }
     else if ( intack.source == hvm_intsrc_mce )
     {
-        vmx_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE);
+        hvm_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE);
     }
     else
     {
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed May 30 09:27:51 2012 +0100
@@ -268,7 +268,7 @@ long_mode_do_msr_write(unsigned int msr,
 
  uncanonical_address:
     HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", msr);
-    vmx_inject_hw_exception(TRAP_gp_fault, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return HNDL_exception_raised;
 }
 
@@ -1310,10 +1310,9 @@ void nvmx_enqueue_n2_exceptions(struct v
                  nvmx->intr.intr_info, nvmx->intr.error_code);
 }
 
-static int nvmx_vmexit_exceptions(struct vcpu *v, unsigned int trapnr,
-                      int errcode, unsigned long cr2)
+static int nvmx_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
 {
-    nvmx_enqueue_n2_exceptions(v, trapnr, errcode);
+    nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code);
     return NESTEDHVM_VMEXIT_DONE;
 }
 
@@ -1344,78 +1343,6 @@ static void __vmx_inject_exception(int t
         curr->arch.hvm_vmx.vmx_emulate = 1;
 }
 
-void vmx_inject_hw_exception(int trap, int error_code)
-{
-    unsigned long intr_info;
-    struct vcpu *curr = current;
-
-    int type = X86_EVENTTYPE_HW_EXCEPTION;
-
-    if ( nestedhvm_vcpu_in_guestmode(curr) )
-        intr_info = vcpu_2_nvmx(curr).intr.intr_info;
-    else
-        intr_info = __vmread(VM_ENTRY_INTR_INFO);
-
-    switch ( trap )
-    {
-    case TRAP_debug:
-        type = X86_EVENTTYPE_SW_EXCEPTION;
-        if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
-        {
-            __restore_debug_registers(curr);
-            write_debugreg(6, read_debugreg(6) | 0x4000);
-        }
-        if ( cpu_has_monitor_trap_flag )
-            break;
-        /* fall through */
-
-    case TRAP_int3:
-        if ( curr->domain->debugger_attached )
-        {
-            /* Debug/Int3: Trap to debugger. */
-            domain_pause_for_debugger();
-            return;
-        }
-
-        type = X86_EVENTTYPE_SW_EXCEPTION;
-        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
-        break;
-
-    default:
-        if ( trap > TRAP_last_reserved )
-        {
-            type = X86_EVENTTYPE_SW_EXCEPTION;
-            __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 2); /* int imm8 */
-        }
-        break;
-    }
-
-    if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
-         (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) )
-    {
-        trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap);
-        if ( trap == TRAP_double_fault )
-            error_code = 0;
-    }
-
-    if ( nestedhvm_vcpu_in_guestmode(curr) &&
-         nvmx_intercepts_exception(curr, trap, error_code) )
-    {
-        nvmx_enqueue_n2_exceptions (curr, 
-            INTR_INFO_VALID_MASK | (type<<8) | trap,
-            error_code); 
-        return;
-    }
-    else
-        __vmx_inject_exception(trap, type, error_code);
-
-    if ( trap == TRAP_page_fault )
-        HVMTRACE_LONG_2D(PF_INJECT, error_code,
-                         TRC_PAR_LONG(current->arch.hvm_vcpu.guest_cr[2]));
-    else
-        HVMTRACE_2D(INJ_EXC, trap, error_code);
-}
-
 void vmx_inject_extint(int trap)
 {
     struct vcpu *v = current;
@@ -1454,13 +1381,67 @@ void vmx_inject_nmi(void)
                            HVM_DELIVER_NO_ERROR_CODE);
 }
 
-static void vmx_inject_exception(
-    unsigned int trapnr, int errcode, unsigned long cr2)
+static void vmx_inject_trap(struct hvm_trap *trap)
 {
-    if ( trapnr == TRAP_page_fault )
-        current->arch.hvm_vcpu.guest_cr[2] = cr2;
-
-    vmx_inject_hw_exception(trapnr, errcode);
+    unsigned long intr_info;
+    struct vcpu *curr = current;
+    struct hvm_trap _trap = *trap;
+
+    if ( (_trap.vector == TRAP_page_fault) &&
+         (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) )
+        current->arch.hvm_vcpu.guest_cr[2] = _trap.cr2;
+
+    if ( nestedhvm_vcpu_in_guestmode(curr) )
+        intr_info = vcpu_2_nvmx(curr).intr.intr_info;
+    else
+        intr_info = __vmread(VM_ENTRY_INTR_INFO);
+
+    switch ( _trap.vector )
+    {
+    case TRAP_debug:
+        if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
+        {
+            __restore_debug_registers(curr);
+            write_debugreg(6, read_debugreg(6) | 0x4000);
+        }
+        if ( cpu_has_monitor_trap_flag )
+            break;
+        /* fall through */
+    case TRAP_int3:
+        if ( curr->domain->debugger_attached )
+        {
+            /* Debug/Int3: Trap to debugger. */
+            domain_pause_for_debugger();
+            return;
+        }
+    }
+
+    if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
+         (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) )
+    {
+        _trap.vector = hvm_combine_hw_exceptions(
+            (uint8_t)intr_info, _trap.vector);
+        if ( _trap.vector == TRAP_double_fault )
+            _trap.error_code = 0;
+    }
+
+    if ( nestedhvm_vcpu_in_guestmode(curr) &&
+         nvmx_intercepts_exception(curr, _trap.vector, _trap.error_code) )
+    {
+        nvmx_enqueue_n2_exceptions (curr, 
+            INTR_INFO_VALID_MASK | (_trap.type<<8) | _trap.vector,
+            _trap.error_code); 
+        return;
+    }
+    else
+        __vmx_inject_exception(_trap.vector, _trap.type, _trap.error_code);
+
+    if ( (_trap.vector == TRAP_page_fault) &&
+         (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) )
+        HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code,
+                         TRC_PAR_LONG(current->arch.hvm_vcpu.guest_cr[2]));
+    else
+        HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code);
 }
 
 static int vmx_event_pending(struct vcpu *v)
@@ -1532,7 +1513,7 @@ static struct hvm_function_table __read_
     .set_guest_pat        = vmx_set_guest_pat,
     .get_guest_pat        = vmx_get_guest_pat,
     .set_tsc_offset       = vmx_set_tsc_offset,
-    .inject_exception     = vmx_inject_exception,
+    .inject_trap          = vmx_inject_trap,
     .init_hypercall_page  = vmx_init_hypercall_page,
     .event_pending        = vmx_event_pending,
     .do_pmu_interrupt     = vmx_do_pmu_interrupt,
@@ -1554,7 +1535,7 @@ static struct hvm_function_table __read_
     .nhvm_vcpu_hostcr3    = nvmx_vcpu_hostcr3,
     .nhvm_vcpu_asid       = nvmx_vcpu_asid,
     .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
-    .nhvm_vcpu_vmexit_trap = nvmx_vmexit_exceptions,
+    .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
     .nhvm_intr_blocked    = nvmx_intr_blocked
 };
 
@@ -1618,7 +1599,7 @@ static void update_guest_eip(void)
     }
 
     if ( regs->eflags & X86_EFLAGS_TF )
-        vmx_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
 }
 
 static void vmx_fpu_dirty_intercept(void)
@@ -1922,7 +1903,7 @@ done:
     return X86EMUL_OKAY;
 
 gp_fault:
-    vmx_inject_hw_exception(TRAP_gp_fault, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -2030,7 +2011,7 @@ static int vmx_msr_write_intercept(unsig
 
         if ( (rc < 0) ||
              (vmx_add_host_load_msr(msr) < 0) )
-            vmx_inject_hw_exception(TRAP_machine_check, 0);
+            hvm_inject_hw_exception(TRAP_machine_check, 0);
         else
         {
             __vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
@@ -2073,7 +2054,7 @@ static int vmx_msr_write_intercept(unsig
     return X86EMUL_OKAY;
 
 gp_fault:
-    vmx_inject_hw_exception(TRAP_gp_fault, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -2222,11 +2203,11 @@ static void vmx_vmexit_ud_intercept(stru
     switch ( rc )
     {
     case X86EMUL_UNHANDLEABLE:
-        vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         break;
     case X86EMUL_EXCEPTION:
         if ( ctxt.exn_pending )
-            hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+            hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code);
         /* fall through */
     default:
         hvm_emulate_writeback(&ctxt);
@@ -2440,7 +2421,12 @@ void vmx_vmexit_handler(struct cpu_user_
                 
                 if ( handled < 0 ) 
                 {
-                    vmx_inject_exception(TRAP_int3, HVM_DELIVER_NO_ERROR_CODE, 
0);
+                    struct hvm_trap trap = {
+                        .vector = TRAP_int3,
+                        .type = X86_EVENTTYPE_SW_EXCEPTION,
+                        .error_code = HVM_DELIVER_NO_ERROR_CODE
+                    };
+                    hvm_inject_trap(&trap);
                     break;
                 }
                 else if ( handled )
@@ -2476,8 +2462,7 @@ void vmx_vmexit_handler(struct cpu_user_
                 break;
             }
 
-            v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
-            vmx_inject_hw_exception(TRAP_page_fault, regs->error_code);
+            hvm_inject_page_fault(regs->error_code, exit_qualification);
             break;
         case TRAP_nmi:
             if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
@@ -2658,7 +2643,7 @@ void vmx_vmexit_handler(struct cpu_user_
          * as far as vmexit.
          */
         WARN_ON(exit_reason == EXIT_REASON_GETSEC);
-        vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+        hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         break;
 
     case EXIT_REASON_TPR_BELOW_THRESHOLD:
@@ -2666,7 +2651,7 @@ void vmx_vmexit_handler(struct cpu_user_
 
     case EXIT_REASON_APIC_ACCESS:
         if ( !vmx_handle_eoi_write() && !handle_mmio() )
-            vmx_inject_hw_exception(TRAP_gp_fault, 0);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
         break;
 
     case EXIT_REASON_IO_INSTRUCTION:
@@ -2675,7 +2660,7 @@ void vmx_vmexit_handler(struct cpu_user_
         {
             /* INS, OUTS */
             if ( !handle_mmio() )
-                vmx_inject_hw_exception(TRAP_gp_fault, 0);
+                hvm_inject_hw_exception(TRAP_gp_fault, 0);
         }
         else
         {
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed May 30 09:27:51 2012 +0100
@@ -421,7 +421,7 @@ static int core2_vpmu_do_wrmsr(unsigned 
                 if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
                     return 1;
                 gdprintk(XENLOG_WARNING, "Debug Store is not supported on this 
cpu\n");
-                vmx_inject_hw_exception(TRAP_gp_fault, 0);
+                hvm_inject_hw_exception(TRAP_gp_fault, 0);
                 return 0;
             }
         }
@@ -437,7 +437,7 @@ static int core2_vpmu_do_wrmsr(unsigned 
     case MSR_CORE_PERF_GLOBAL_STATUS:
         gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
                  "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
-        vmx_inject_hw_exception(TRAP_gp_fault, 0);
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
         return 1;
     case MSR_IA32_PEBS_ENABLE:
         if ( msr_content & 1 )
@@ -452,7 +452,7 @@ static int core2_vpmu_do_wrmsr(unsigned 
                 gdprintk(XENLOG_WARNING,
                          "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
                          msr_content);
-                vmx_inject_hw_exception(TRAP_gp_fault, 0);
+                hvm_inject_hw_exception(TRAP_gp_fault, 0);
                 return 1;
             }
             core2_vpmu_cxt->pmu_enable->ds_area_enable = msr_content ? 1 : 0;
@@ -544,7 +544,7 @@ static int core2_vpmu_do_wrmsr(unsigned 
             break;
         }
         if (inject_gp)
-            vmx_inject_hw_exception(TRAP_gp_fault, 0);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
         else
             wrmsrl(msr, msr_content);
     }
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Wed May 30 09:27:51 2012 +0100
@@ -304,12 +304,12 @@ vmexit:
     
 invalid_op:
     gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n");
-    hvm_inject_exception(TRAP_invalid_op, 0, 0);
+    hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
     return X86EMUL_EXCEPTION;
 
 gp_fault:
     gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: gp_fault\n");
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -386,7 +386,7 @@ static int decode_vmx_inst(struct cpu_us
     return X86EMUL_OKAY;
 
 gp_fault:
-    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Wed May 30 09:27:51 2012 +0100
@@ -135,7 +135,7 @@ static int hvm_translate_linear_addr(
 
     if ( !okay )
     {
-        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
         return X86EMUL_EXCEPTION;
     }
 
diff -r ff16fb989c14 -r a418c32885ab xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed May 30 09:26:02 2012 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed May 30 09:27:51 2012 +0100
@@ -4825,7 +4825,7 @@ static mfn_t emulate_gva_to_mfn(struct v
     if ( gfn == INVALID_GFN ) 
     {
         if ( is_hvm_vcpu(v) )
-            hvm_inject_exception(TRAP_page_fault, pfec, vaddr);
+            hvm_inject_page_fault(pfec, vaddr);
         else
             propagate_page_fault(vaddr, pfec);
         return _mfn(BAD_GVA_TO_GFN);
diff -r ff16fb989c14 -r a418c32885ab xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed May 30 09:26:02 2012 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed May 30 09:27:51 2012 +0100
@@ -71,6 +71,13 @@ enum hvm_intblk {
 #define HVM_HAP_SUPERPAGE_2MB   0x00000001
 #define HVM_HAP_SUPERPAGE_1GB   0x00000002
 
+struct hvm_trap {
+    int           vector;
+    unsigned int  type;         /* X86_EVENTTYPE_* */
+    int           error_code;   /* HVM_DELIVER_NO_ERROR_CODE if n/a */
+    unsigned long cr2;          /* Only for TRAP_page_fault h/w exception */
+};
+
 /*
  * The hardware virtual machine (HVM) interface abstracts away from the
  * x86/x86_64 CPU virtualization assist specifics. Currently this interface
@@ -124,8 +131,7 @@ struct hvm_function_table {
 
     void (*set_tsc_offset)(struct vcpu *v, u64 offset);
 
-    void (*inject_exception)(unsigned int trapnr, int errcode,
-                             unsigned long cr2);
+    void (*inject_trap)(struct hvm_trap *trap);
 
     void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
 
@@ -162,10 +168,7 @@ struct hvm_function_table {
                                 struct cpu_user_regs *regs);
     int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs,
                                 uint64_t exitcode);
-    int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v,
-                                unsigned int trapnr,
-                                int errcode,
-                                unsigned long cr2);
+    int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap);
     uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v);
     uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v);
     uint32_t (*nhvm_vcpu_asid)(struct vcpu *v);
@@ -320,7 +323,9 @@ void hvm_migrate_timers(struct vcpu *v);
 void hvm_do_resume(struct vcpu *v);
 void hvm_migrate_pirqs(struct vcpu *v);
 
-void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2);
+void hvm_inject_trap(struct hvm_trap *trap);
+void hvm_inject_hw_exception(unsigned int trapnr, int errcode);
+void hvm_inject_page_fault(int errcode, unsigned long cr2);
 
 static inline int hvm_event_pending(struct vcpu *v)
 {
@@ -479,8 +484,7 @@ int nhvm_vcpu_vmexit(struct vcpu *v, str
 /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
  * 'trapnr' exception.
  */ 
-int nhvm_vcpu_vmexit_trap(struct vcpu *v,
-    unsigned int trapnr, int errcode, unsigned long cr2);
+int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap);
 
 /* returns l2 guest cr3 in l2 guest physical address space. */
 uint64_t nhvm_vcpu_guestcr3(struct vcpu *v);
diff -r ff16fb989c14 -r a418c32885ab xen/include/asm-x86/hvm/svm/nestedsvm.h
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h   Wed May 30 09:26:02 2012 +0100
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h   Wed May 30 09:27:51 2012 +0100
@@ -114,8 +114,7 @@ int nsvm_vcpu_hostrestore(struct vcpu *v
 int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs);
 int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs,
     uint64_t exitcode);
-int nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
-                      int errcode, unsigned long cr2);
+int nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap);
 uint64_t nsvm_vcpu_guestcr3(struct vcpu *v);
 uint64_t nsvm_vcpu_hostcr3(struct vcpu *v);
 uint32_t nsvm_vcpu_asid(struct vcpu *v);
diff -r ff16fb989c14 -r a418c32885ab xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Wed May 30 09:26:02 2012 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h    Wed May 30 09:27:51 2012 +0100
@@ -164,10 +164,9 @@ struct hvm_vcpu {
     /* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
     void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
     void *fpu_exception_callback_arg;
-    /* Pending hw/sw interrupt */
-    int           inject_trap;       /* -1 for nothing to inject */
-    int           inject_error_code;
-    unsigned long inject_cr2;
+
+    /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
+    struct hvm_trap     inject_trap;
 
     struct viridian_vcpu viridian;
 };
diff -r ff16fb989c14 -r a418c32885ab xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed May 30 09:26:02 2012 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed May 30 09:27:51 2012 +0100
@@ -387,7 +387,6 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
-void vmx_inject_hw_exception(int trap, int error_code);
 void vmx_inject_extint(int trap);
 void vmx_inject_nmi(void);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.