[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 03/15] x86/emul: Rename hvm_trap to x86_event and move it into the emulation infrastructure
> -----Original Message----- > From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] > Sent: 23 November 2016 15:39 > To: Xen-devel <xen-devel@xxxxxxxxxxxxx> > Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich > <JBeulich@xxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Jun > Nakajima <jun.nakajima@xxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>; > Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>; Suravee Suthikulpanit > <suravee.suthikulpanit@xxxxxxx> > Subject: [PATCH 03/15] x86/emul: Rename hvm_trap to x86_event and > move it into the emulation infrastructure > > The x86 emulator needs to gain an understanding of interrupts and > exceptions > generated by its actions. The naming choice is to match both the Intel and > AMD terms, and to avoid 'trap' specifically as it has an architectural meaning > different to its current usage. > > While making this change, make other changes for consistency > > * Rename *_trap() infrastructure to *_event() > * Rename trapnr/trap parameters to vector > * Convert hvm_inject_hw_exception() and hvm_inject_page_fault() to > being > static inlines, as they are only thin wrappers around hvm_inject_event() > > No functional change. > > Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > --- > CC: Jan Beulich <JBeulich@xxxxxxxx> > CC: Paul Durrant <paul.durrant@xxxxxxxxxx> > CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> > CC: Kevin Tian <kevin.tian@xxxxxxxxx> > CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> > CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> > --- > xen/arch/x86/hvm/emulate.c | 6 +-- > xen/arch/x86/hvm/hvm.c | 33 ++++------------ > xen/arch/x86/hvm/io.c | 2 +- > xen/arch/x86/hvm/svm/nestedsvm.c | 7 ++-- > xen/arch/x86/hvm/svm/svm.c | 62 ++++++++++++++--------------- > xen/arch/x86/hvm/vmx/vmx.c | 66 +++++++++++++++---------------- > xen/arch/x86/hvm/vmx/vvmx.c | 11 +++--- > xen/arch/x86/x86_emulate/x86_emulate.c | 11 ++++++ > xen/arch/x86/x86_emulate/x86_emulate.h | 22 +++++++++++ > xen/include/asm-x86/hvm/emulate.h | 2 +- > xen/include/asm-x86/hvm/hvm.h | 69 ++++++++++++++++-------------- > --- > xen/include/asm-x86/hvm/svm/nestedsvm.h | 6 +-- > xen/include/asm-x86/hvm/vcpu.h | 2 +- > xen/include/asm-x86/hvm/vmx/vvmx.h | 4 +- > 14 files changed, 159 insertions(+), 144 deletions(-) > > diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c > index f24e211..d0c3185 100644 > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -1679,7 +1679,7 @@ static int hvmemul_invlpg( > * violations, so squash them. > */ > hvmemul_ctxt->exn_pending = 0; > - hvmemul_ctxt->trap = (struct hvm_trap){}; > + hvmemul_ctxt->trap = (struct x86_event){}; Can't say I like that way of initializing but... Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > rc = X86EMUL_OKAY; > } > > @@ -1868,7 +1868,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, > unsigned long gla) > break; > case X86EMUL_EXCEPTION: > if ( ctxt.exn_pending ) > - hvm_inject_trap(&ctxt.trap); > + hvm_inject_event(&ctxt.trap); > /* fallthrough */ > default: > hvm_emulate_writeback(&ctxt); > @@ -1928,7 +1928,7 @@ void hvm_emulate_one_vm_event(enum > emul_kind kind, unsigned int trapnr, > break; > case X86EMUL_EXCEPTION: > if ( ctx.exn_pending ) > - hvm_inject_trap(&ctx.trap); > + hvm_inject_event(&ctx.trap); > break; > } > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 25dc759..7b434aa 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -535,7 +535,7 @@ void hvm_do_resume(struct vcpu *v) > /* Inject pending hw/sw trap */ > if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) > { > - hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap); > + hvm_inject_event(&v->arch.hvm_vcpu.inject_trap); > v->arch.hvm_vcpu.inject_trap.vector = -1; > } > } > @@ -1676,19 +1676,19 @@ void hvm_triple_fault(void) > domain_shutdown(d, reason); > } > > -void hvm_inject_trap(const struct hvm_trap *trap) > +void hvm_inject_event(const struct x86_event *event) > { > struct vcpu *curr = current; > > if ( nestedhvm_enabled(curr->domain) && > !nestedhvm_vmswitch_in_progress(curr) && > nestedhvm_vcpu_in_guestmode(curr) && > - nhvm_vmcx_guest_intercepts_trap( > - curr, trap->vector, trap->error_code) ) > + nhvm_vmcx_guest_intercepts_event( > + curr, event->vector, event->error_code) ) > { > enum nestedhvm_vmexits nsret; > > - nsret = nhvm_vcpu_vmexit_trap(curr, trap); > + nsret = nhvm_vcpu_vmexit_event(curr, event); > > switch ( nsret ) > { > @@ -1704,26 +1704,7 @@ void hvm_inject_trap(const struct hvm_trap > *trap) > } > } > > - hvm_funcs.inject_trap(trap); > -} > - > -void hvm_inject_hw_exception(unsigned int trapnr, int errcode) > -{ > - struct hvm_trap trap = { > - .vector = trapnr, > - .type = X86_EVENTTYPE_HW_EXCEPTION, > - .error_code = errcode }; > - hvm_inject_trap(&trap); > -} > - > -void hvm_inject_page_fault(int errcode, unsigned long cr2) > -{ > - struct hvm_trap trap = { > - .vector = TRAP_page_fault, > - .type = X86_EVENTTYPE_HW_EXCEPTION, > - .error_code = errcode, > - .cr2 = cr2 }; > - hvm_inject_trap(&trap); > + hvm_funcs.inject_event(event); > } > > int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, > @@ -4096,7 +4077,7 @@ void hvm_ud_intercept(struct cpu_user_regs > *regs) > break; > case X86EMUL_EXCEPTION: > if ( ctxt.exn_pending ) > - hvm_inject_trap(&ctxt.trap); > + hvm_inject_event(&ctxt.trap); > /* fall through */ > default: > hvm_emulate_writeback(&ctxt); > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c > index 7305801..1279f68 100644 > --- a/xen/arch/x86/hvm/io.c > +++ b/xen/arch/x86/hvm/io.c > @@ -103,7 +103,7 @@ int handle_mmio(void) > return 0; > case X86EMUL_EXCEPTION: > if ( ctxt.exn_pending ) > - hvm_inject_trap(&ctxt.trap); > + hvm_inject_event(&ctxt.trap); > break; > default: > break; > diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c > b/xen/arch/x86/hvm/svm/nestedsvm.c > index f9b38ab..b6b8526 100644 > --- a/xen/arch/x86/hvm/svm/nestedsvm.c > +++ b/xen/arch/x86/hvm/svm/nestedsvm.c > @@ -821,7 +821,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct > cpu_user_regs *regs, > } > > int > -nsvm_vcpu_vmexit_trap(struct vcpu *v, const struct hvm_trap *trap) > +nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *trap) > { > ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL); > > @@ -994,10 +994,11 @@ nsvm_vmcb_guest_intercepts_exitcode(struct > vcpu *v, > } > > bool_t > -nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr, int > errcode) > +nsvm_vmcb_guest_intercepts_event( > + struct vcpu *v, unsigned int vector, int errcode) > { > return nsvm_vmcb_guest_intercepts_exitcode(v, > - guest_cpu_user_regs(), VMEXIT_EXCEPTION_DE + trapnr); > + guest_cpu_user_regs(), VMEXIT_EXCEPTION_DE + vector); > } > > static int > diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c > index f737d8c..66eb30b 100644 > --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -1203,15 +1203,15 @@ static void svm_vcpu_destroy(struct vcpu *v) > passive_domain_destroy(v); > } > > -static void svm_inject_trap(const struct hvm_trap *trap) > +static void svm_inject_event(const struct x86_event *event) > { > struct vcpu *curr = current; > struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; > - eventinj_t event = vmcb->eventinj; > - struct hvm_trap _trap = *trap; > + eventinj_t eventinj = vmcb->eventinj; > + struct x86_event _event = *event; > const struct cpu_user_regs *regs = guest_cpu_user_regs(); > > - switch ( _trap.vector ) > + switch ( _event.vector ) > { > case TRAP_debug: > if ( regs->eflags & X86_EFLAGS_TF ) > @@ -1229,21 +1229,21 @@ static void svm_inject_trap(const struct > hvm_trap *trap) > } > } > > - if ( unlikely(event.fields.v) && > - (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) ) > + if ( unlikely(eventinj.fields.v) && > + (eventinj.fields.type == X86_EVENTTYPE_HW_EXCEPTION) ) > { > - _trap.vector = hvm_combine_hw_exceptions( > - event.fields.vector, _trap.vector); > - if ( _trap.vector == TRAP_double_fault ) > - _trap.error_code = 0; > + _event.vector = hvm_combine_hw_exceptions( > + eventinj.fields.vector, _event.vector); > + if ( _event.vector == TRAP_double_fault ) > + _event.error_code = 0; > } > > - event.bytes = 0; > - event.fields.v = 1; > - event.fields.vector = _trap.vector; > + eventinj.bytes = 0; > + eventinj.fields.v = 1; > + eventinj.fields.vector = _event.vector; > > /* Refer to AMD Vol 2: System Programming, 15.20 Event Injection. */ > - switch ( _trap.type ) > + switch ( _event.type ) > { > case X86_EVENTTYPE_SW_INTERRUPT: /* int $n */ > /* > @@ -1253,8 +1253,8 @@ static void svm_inject_trap(const struct hvm_trap > *trap) > * moved eip forward if appropriate. > */ > if ( cpu_has_svm_nrips ) > - vmcb->nextrip = regs->eip + _trap.insn_len; > - event.fields.type = X86_EVENTTYPE_SW_INTERRUPT; > + vmcb->nextrip = regs->eip + _event.insn_len; > + eventinj.fields.type = X86_EVENTTYPE_SW_INTERRUPT; > break; > > case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */ > @@ -1265,7 +1265,7 @@ static void svm_inject_trap(const struct hvm_trap > *trap) > */ > if ( cpu_has_svm_nrips ) > vmcb->nextrip = regs->eip; > - event.fields.type = X86_EVENTTYPE_HW_EXCEPTION; > + eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION; > break; > > case X86_EVENTTYPE_SW_EXCEPTION: /* int3, into */ > @@ -1279,28 +1279,28 @@ static void svm_inject_trap(const struct > hvm_trap *trap) > * the correct faulting eip should a fault occur. > */ > if ( cpu_has_svm_nrips ) > - vmcb->nextrip = regs->eip + _trap.insn_len; > - event.fields.type = X86_EVENTTYPE_HW_EXCEPTION; > + vmcb->nextrip = regs->eip + _event.insn_len; > + eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION; > break; > > default: > - event.fields.type = X86_EVENTTYPE_HW_EXCEPTION; > - event.fields.ev = (_trap.error_code != > HVM_DELIVER_NO_ERROR_CODE); > - event.fields.errorcode = _trap.error_code; > + eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION; > + eventinj.fields.ev = (_event.error_code != > HVM_DELIVER_NO_ERROR_CODE); > + eventinj.fields.errorcode = _event.error_code; > break; > } > > - vmcb->eventinj = event; > + vmcb->eventinj = eventinj; > > - if ( _trap.vector == TRAP_page_fault ) > + if ( _event.vector == TRAP_page_fault ) > { > - curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2; > - vmcb_set_cr2(vmcb, _trap.cr2); > - HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, > TRC_PAR_LONG(_trap.cr2)); > + curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2; > + vmcb_set_cr2(vmcb, _event.cr2); > + HVMTRACE_LONG_2D(PF_INJECT, _event.error_code, > TRC_PAR_LONG(_event.cr2)); > } > else > { > - HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code); > + HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code); > } > } > > @@ -2250,7 +2250,7 @@ static struct hvm_function_table __initdata > svm_function_table = { > .set_guest_pat = svm_set_guest_pat, > .get_guest_pat = svm_get_guest_pat, > .set_tsc_offset = svm_set_tsc_offset, > - .inject_trap = svm_inject_trap, > + .inject_event = svm_inject_event, > .init_hypercall_page = svm_init_hypercall_page, > .event_pending = svm_event_pending, > .invlpg = svm_invlpg, > @@ -2265,9 +2265,9 @@ static struct hvm_function_table __initdata > svm_function_table = { > .nhvm_vcpu_initialise = nsvm_vcpu_initialise, > .nhvm_vcpu_destroy = nsvm_vcpu_destroy, > .nhvm_vcpu_reset = nsvm_vcpu_reset, > - .nhvm_vcpu_vmexit_trap = nsvm_vcpu_vmexit_trap, > + .nhvm_vcpu_vmexit_event = nsvm_vcpu_vmexit_event, > .nhvm_vcpu_p2m_base = nsvm_vcpu_hostcr3, > - .nhvm_vmcx_guest_intercepts_trap = > nsvm_vmcb_guest_intercepts_trap, > + .nhvm_vmcx_guest_intercepts_event = > nsvm_vmcb_guest_intercepts_event, > .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled, > .nhvm_intr_blocked = nsvm_intr_blocked, > .nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m, > diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c > index 0a52624..b1d8a0b 100644 > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -1623,9 +1623,9 @@ void nvmx_enqueue_n2_exceptions(struct vcpu > *v, > nvmx->intr.intr_info, nvmx->intr.error_code); > } > > -static int nvmx_vmexit_trap(struct vcpu *v, const struct hvm_trap *trap) > +static int nvmx_vmexit_event(struct vcpu *v, const struct x86_event > *event) > { > - nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code, > + nvmx_enqueue_n2_exceptions(v, event->vector, event->error_code, > hvm_intsrc_none); > return NESTEDHVM_VMEXIT_DONE; > } > @@ -1707,13 +1707,13 @@ void vmx_inject_nmi(void) > * - #DB is X86_EVENTTYPE_HW_EXCEPTION, except when generated by > * opcode 0xf1 (which is X86_EVENTTYPE_PRI_SW_EXCEPTION) > */ > -static void vmx_inject_trap(const struct hvm_trap *trap) > +static void vmx_inject_event(const struct x86_event *event) > { > unsigned long intr_info; > struct vcpu *curr = current; > - struct hvm_trap _trap = *trap; > + struct x86_event _event = *event; > > - switch ( _trap.vector | -(_trap.type == X86_EVENTTYPE_SW_INTERRUPT) ) > + switch ( _event.vector | -(_event.type == > X86_EVENTTYPE_SW_INTERRUPT) ) > { > case TRAP_debug: > if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF ) > @@ -1722,7 +1722,7 @@ static void vmx_inject_trap(const struct hvm_trap > *trap) > write_debugreg(6, read_debugreg(6) | DR_STEP); > } > if ( !nestedhvm_vcpu_in_guestmode(curr) || > - !nvmx_intercepts_exception(curr, TRAP_debug, _trap.error_code) ) > + !nvmx_intercepts_exception(curr, TRAP_debug, _event.error_code) > ) > { > unsigned long val; > > @@ -1744,8 +1744,8 @@ static void vmx_inject_trap(const struct hvm_trap > *trap) > break; > > case TRAP_page_fault: > - ASSERT(_trap.type == X86_EVENTTYPE_HW_EXCEPTION); > - curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2; > + ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION); > + curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2; > break; > } > > @@ -1758,34 +1758,34 @@ static void vmx_inject_trap(const struct > hvm_trap *trap) > (MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) == > X86_EVENTTYPE_HW_EXCEPTION) ) > { > - _trap.vector = hvm_combine_hw_exceptions( > - (uint8_t)intr_info, _trap.vector); > - if ( _trap.vector == TRAP_double_fault ) > - _trap.error_code = 0; > + _event.vector = hvm_combine_hw_exceptions( > + (uint8_t)intr_info, _event.vector); > + if ( _event.vector == TRAP_double_fault ) > + _event.error_code = 0; > } > > - if ( _trap.type >= X86_EVENTTYPE_SW_INTERRUPT ) > - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, _trap.insn_len); > + if ( _event.type >= X86_EVENTTYPE_SW_INTERRUPT ) > + __vmwrite(VM_ENTRY_INSTRUCTION_LEN, _event.insn_len); > > if ( nestedhvm_vcpu_in_guestmode(curr) && > - nvmx_intercepts_exception(curr, _trap.vector, _trap.error_code) ) > + nvmx_intercepts_exception(curr, _event.vector, _event.error_code) ) > { > nvmx_enqueue_n2_exceptions (curr, > INTR_INFO_VALID_MASK | > - MASK_INSR(_trap.type, INTR_INFO_INTR_TYPE_MASK) | > - MASK_INSR(_trap.vector, INTR_INFO_VECTOR_MASK), > - _trap.error_code, hvm_intsrc_none); > + MASK_INSR(_event.type, INTR_INFO_INTR_TYPE_MASK) | > + MASK_INSR(_event.vector, INTR_INFO_VECTOR_MASK), > + _event.error_code, hvm_intsrc_none); > return; > } > else > - __vmx_inject_exception(_trap.vector, _trap.type, _trap.error_code); > + __vmx_inject_exception(_event.vector, _event.type, > _event.error_code); > > - if ( (_trap.vector == TRAP_page_fault) && > - (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) ) > - HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, > + if ( (_event.vector == TRAP_page_fault) && > + (_event.type == X86_EVENTTYPE_HW_EXCEPTION) ) > + HVMTRACE_LONG_2D(PF_INJECT, _event.error_code, > TRC_PAR_LONG(curr->arch.hvm_vcpu.guest_cr[2])); > else > - HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code); > + HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code); > } > > static int vmx_event_pending(struct vcpu *v) > @@ -2162,7 +2162,7 @@ static struct hvm_function_table __initdata > vmx_function_table = { > .set_guest_pat = vmx_set_guest_pat, > .get_guest_pat = vmx_get_guest_pat, > .set_tsc_offset = vmx_set_tsc_offset, > - .inject_trap = vmx_inject_trap, > + .inject_event = vmx_inject_event, > .init_hypercall_page = vmx_init_hypercall_page, > .event_pending = vmx_event_pending, > .invlpg = vmx_invlpg, > @@ -2182,8 +2182,8 @@ static struct hvm_function_table __initdata > vmx_function_table = { > .nhvm_vcpu_reset = nvmx_vcpu_reset, > .nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base, > .nhvm_vmcx_hap_enabled = nvmx_ept_enabled, > - .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception, > - .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap, > + .nhvm_vmcx_guest_intercepts_event = nvmx_intercepts_exception, > + .nhvm_vcpu_vmexit_event = nvmx_vmexit_event, > .nhvm_intr_blocked = nvmx_intr_blocked, > .nhvm_domain_relinquish_resources = > nvmx_domain_relinquish_resources, > .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap, > @@ -3201,7 +3201,7 @@ static int vmx_handle_eoi_write(void) > */ > static void vmx_propagate_intr(unsigned long intr) > { > - struct hvm_trap trap = { > + struct x86_event event = { > .vector = MASK_EXTR(intr, INTR_INFO_VECTOR_MASK), > .type = MASK_EXTR(intr, INTR_INFO_INTR_TYPE_MASK), > }; > @@ -3210,20 +3210,20 @@ static void vmx_propagate_intr(unsigned long > intr) > if ( intr & INTR_INFO_DELIVER_CODE_MASK ) > { > __vmread(VM_EXIT_INTR_ERROR_CODE, &tmp); > - trap.error_code = tmp; > + event.error_code = tmp; > } > else > - trap.error_code = HVM_DELIVER_NO_ERROR_CODE; > + event.error_code = HVM_DELIVER_NO_ERROR_CODE; > > - if ( trap.type >= X86_EVENTTYPE_SW_INTERRUPT ) > + if ( event.type >= X86_EVENTTYPE_SW_INTERRUPT ) > { > __vmread(VM_EXIT_INSTRUCTION_LEN, &tmp); > - trap.insn_len = tmp; > + event.insn_len = tmp; > } > else > - trap.insn_len = 0; > + event.insn_len = 0; > > - hvm_inject_trap(&trap); > + hvm_inject_event(&event); > } > > static void vmx_idtv_reinject(unsigned long idtv_info) > diff --git a/xen/arch/x86/hvm/vmx/vvmx.c > b/xen/arch/x86/hvm/vmx/vvmx.c > index bed2e0a..b5837d4 100644 > --- a/xen/arch/x86/hvm/vmx/vvmx.c > +++ b/xen/arch/x86/hvm/vmx/vvmx.c > @@ -491,18 +491,19 @@ static void vmreturn(struct cpu_user_regs *regs, > enum vmx_ops_result ops_res) > regs->eflags = eflags; > } > > -bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap, > - int error_code) > +bool_t nvmx_intercepts_exception( > + struct vcpu *v, unsigned int vector, int error_code) > { > u32 exception_bitmap, pfec_match=0, pfec_mask=0; > int r; > > - ASSERT ( trap < 32 ); > + ASSERT(vector < 32); > > exception_bitmap = get_vvmcs(v, EXCEPTION_BITMAP); > - r = exception_bitmap & (1 << trap) ? 1: 0; > + r = exception_bitmap & (1 << vector) ? 1: 0; > > - if ( trap == TRAP_page_fault ) { > + if ( vector == TRAP_page_fault ) > + { > pfec_match = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MATCH); > pfec_mask = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK); > if ( (error_code & pfec_mask) != pfec_match ) > diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c > b/xen/arch/x86/x86_emulate/x86_emulate.c > index c5d9664..2e65322 100644 > --- a/xen/arch/x86/x86_emulate/x86_emulate.c > +++ b/xen/arch/x86/x86_emulate/x86_emulate.c > @@ -5453,6 +5453,17 @@ static void __init __maybe_unused > build_assertions(void) > BUILD_BUG_ON(x86_seg_ds != 3); > BUILD_BUG_ON(x86_seg_fs != 4); > BUILD_BUG_ON(x86_seg_gs != 5); > + > + /* > + * Check X86_EVENTTYPE_* against VMCB EVENTINJ and VMCS > INTR_INFO type > + * fields. > + */ > + BUILD_BUG_ON(X86_EVENTTYPE_EXT_INTR != 0); > + BUILD_BUG_ON(X86_EVENTTYPE_NMI != 2); > + BUILD_BUG_ON(X86_EVENTTYPE_HW_EXCEPTION != 3); > + BUILD_BUG_ON(X86_EVENTTYPE_SW_INTERRUPT != 4); > + BUILD_BUG_ON(X86_EVENTTYPE_PRI_SW_EXCEPTION != 5); > + BUILD_BUG_ON(X86_EVENTTYPE_SW_EXCEPTION != 6); > } > > #ifdef __XEN__ > diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h > b/xen/arch/x86/x86_emulate/x86_emulate.h > index 93b268e..b0d8e6f 100644 > --- a/xen/arch/x86/x86_emulate/x86_emulate.h > +++ b/xen/arch/x86/x86_emulate/x86_emulate.h > @@ -67,6 +67,28 @@ enum x86_swint_emulation { > x86_swint_emulate_all, /* Help needed with all software events */ > }; > > +/* > + * x86 event types. This enumeration is valid for: > + * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8] > + * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only) > + */ > +enum x86_event_type { > + X86_EVENTTYPE_EXT_INTR, /* External interrupt */ > + X86_EVENTTYPE_NMI = 2, /* NMI */ > + X86_EVENTTYPE_HW_EXCEPTION, /* Hardware exception */ > + X86_EVENTTYPE_SW_INTERRUPT, /* Software interrupt (CD nn) */ > + X86_EVENTTYPE_PRI_SW_EXCEPTION, /* ICEBP (F1) */ > + X86_EVENTTYPE_SW_EXCEPTION, /* INT3 (CC), INTO (CE) */ > +}; > + > +struct x86_event { > + int16_t vector; > + uint8_t type; /* X86_EVENTTYPE_* */ > + uint8_t insn_len; /* Instruction length */ > + uint32_t error_code; /* HVM_DELIVER_NO_ERROR_CODE if n/a */ > + unsigned long cr2; /* Only for TRAP_page_fault h/w exception */ > +}; > + > /* > * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the > * segment descriptor. It happens to match the format of an AMD SVM > VMCB. > diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm- > x86/hvm/emulate.h > index d4186a2..3b7ec33 100644 > --- a/xen/include/asm-x86/hvm/emulate.h > +++ b/xen/include/asm-x86/hvm/emulate.h > @@ -30,7 +30,7 @@ struct hvm_emulate_ctxt { > unsigned long seg_reg_dirty; > > bool_t exn_pending; > - struct hvm_trap trap; > + struct x86_event trap; > > uint32_t intr_shadow; > > diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm- > x86/hvm/hvm.h > index 7e7462e..51a64f7 100644 > --- a/xen/include/asm-x86/hvm/hvm.h > +++ b/xen/include/asm-x86/hvm/hvm.h > @@ -77,14 +77,6 @@ enum hvm_intblk { > #define HVM_HAP_SUPERPAGE_2MB 0x00000001 > #define HVM_HAP_SUPERPAGE_1GB 0x00000002 > > -struct hvm_trap { > - int16_t vector; > - uint8_t type; /* X86_EVENTTYPE_* */ > - uint8_t insn_len; /* Instruction length */ > - uint32_t error_code; /* HVM_DELIVER_NO_ERROR_CODE if n/a */ > - unsigned long cr2; /* Only for TRAP_page_fault h/w exception */ > -}; > - > /* > * The hardware virtual machine (HVM) interface abstracts away from the > * x86/x86_64 CPU virtualization assist specifics. Currently this interface > @@ -152,7 +144,7 @@ struct hvm_function_table { > > void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc); > > - void (*inject_trap)(const struct hvm_trap *trap); > + void (*inject_event)(const struct x86_event *event); > > void (*init_hypercall_page)(struct domain *d, void *hypercall_page); > > @@ -185,11 +177,10 @@ struct hvm_function_table { > int (*nhvm_vcpu_initialise)(struct vcpu *v); > void (*nhvm_vcpu_destroy)(struct vcpu *v); > int (*nhvm_vcpu_reset)(struct vcpu *v); > - int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, const struct hvm_trap > *trap); > + int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event > *event); > uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v); > - bool_t (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v, > - unsigned int trapnr, > - int errcode); > + bool_t (*nhvm_vmcx_guest_intercepts_event)( > + struct vcpu *v, unsigned int vector, int errcode); > > bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v); > > @@ -419,9 +410,30 @@ void hvm_migrate_timers(struct vcpu *v); > void hvm_do_resume(struct vcpu *v); > void hvm_migrate_pirqs(struct vcpu *v); > > -void hvm_inject_trap(const struct hvm_trap *trap); > -void hvm_inject_hw_exception(unsigned int trapnr, int errcode); > -void hvm_inject_page_fault(int errcode, unsigned long cr2); > +void hvm_inject_event(const struct x86_event *event); > + > +static inline void hvm_inject_hw_exception(unsigned int vector, int > errcode) > +{ > + struct x86_event event = { > + .vector = vector, > + .type = X86_EVENTTYPE_HW_EXCEPTION, > + .error_code = errcode, > + }; > + > + hvm_inject_event(&event); > +} > + > +static inline void hvm_inject_page_fault(int errcode, unsigned long cr2) > +{ > + struct x86_event event = { > + .vector = TRAP_page_fault, > + .type = X86_EVENTTYPE_HW_EXCEPTION, > + .error_code = errcode, > + .cr2 = cr2, > + }; > + > + hvm_inject_event(&event); > +} > > static inline int hvm_event_pending(struct vcpu *v) > { > @@ -437,18 +449,6 @@ static inline int hvm_event_pending(struct vcpu *v) > (1U << TRAP_alignment_check) | \ > (1U << TRAP_machine_check)) > > -/* > - * x86 event types. This enumeration is valid for: > - * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8] > - * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only) > - */ > -#define X86_EVENTTYPE_EXT_INTR 0 /* external interrupt */ > -#define X86_EVENTTYPE_NMI 2 /* NMI */ > -#define X86_EVENTTYPE_HW_EXCEPTION 3 /* hardware exception */ > -#define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt (CD nn) > */ > -#define X86_EVENTTYPE_PRI_SW_EXCEPTION 5 /* ICEBP (F1) */ > -#define X86_EVENTTYPE_SW_EXCEPTION 6 /* INT3 (CC), INTO (CE) */ > - > int hvm_event_needs_reinjection(uint8_t type, uint8_t vector); > > uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2); > @@ -542,10 +542,10 @@ int hvm_x2apic_msr_write(struct vcpu *v, > unsigned int msr, uint64_t msr_content) > /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to > * 'trapnr' exception. > */ > -static inline int nhvm_vcpu_vmexit_trap(struct vcpu *v, > - const struct hvm_trap *trap) > +static inline int nhvm_vcpu_vmexit_event( > + struct vcpu *v, const struct x86_event *event) > { > - return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap); > + return hvm_funcs.nhvm_vcpu_vmexit_event(v, event); > } > > /* returns l1 guest's cr3 that points to the page table used to > @@ -557,11 +557,10 @@ static inline uint64_t nhvm_vcpu_p2m_base(struct > vcpu *v) > } > > /* returns true, when l1 guest intercepts the specified trap */ > -static inline bool_t nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, > - unsigned int trap, > - int errcode) > +static inline bool_t nhvm_vmcx_guest_intercepts_event( > + struct vcpu *v, unsigned int vector, int errcode) > { > - return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap, errcode); > + return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, > errcode); > } > > /* returns true when l1 guest wants to use hap to run l2 guest */ > diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h > b/xen/include/asm-x86/hvm/svm/nestedsvm.h > index 0dbc5ec..4b36c25 100644 > --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h > +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h > @@ -110,10 +110,10 @@ void nsvm_vcpu_destroy(struct vcpu *v); > int nsvm_vcpu_initialise(struct vcpu *v); > int nsvm_vcpu_reset(struct vcpu *v); > int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs); > -int nsvm_vcpu_vmexit_trap(struct vcpu *v, const struct hvm_trap *trap); > +int nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event > *event); > uint64_t nsvm_vcpu_hostcr3(struct vcpu *v); > -bool_t nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int > trapnr, > - int errcode); > +bool_t nsvm_vmcb_guest_intercepts_event( > + struct vcpu *v, unsigned int vector, int errcode); > bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); > enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); > > diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm- > x86/hvm/vcpu.h > index 84d9406..d485536 100644 > --- a/xen/include/asm-x86/hvm/vcpu.h > +++ b/xen/include/asm-x86/hvm/vcpu.h > @@ -206,7 +206,7 @@ struct hvm_vcpu { > void *fpu_exception_callback_arg; > > /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */ > - struct hvm_trap inject_trap; > + struct x86_event inject_trap; > > struct viridian_vcpu viridian; > }; > diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm- > x86/hvm/vmx/vvmx.h > index aca8b4b..ead586e 100644 > --- a/xen/include/asm-x86/hvm/vmx/vvmx.h > +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h > @@ -112,8 +112,8 @@ void nvmx_vcpu_destroy(struct vcpu *v); > int nvmx_vcpu_reset(struct vcpu *v); > uint64_t nvmx_vcpu_eptp_base(struct vcpu *v); > enum hvm_intblk nvmx_intr_blocked(struct vcpu *v); > -bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap, > - int error_code); > +bool_t nvmx_intercepts_exception( > + struct vcpu *v, unsigned int vector, int error_code); > void nvmx_domain_relinquish_resources(struct domain *d); > > bool_t nvmx_ept_enabled(struct vcpu *v); > -- > 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |