[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] hvm: Large-scale cleanups and fixes to event deliver logic.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1185904740 -3600 # Node ID 0636f262ecd8a38f46e97e0ec74fb00a4ffe4ab8 # Parent 7c5c3aa858ccf47bbaa119d7f688854a762e662e hvm: Large-scale cleanups and fixes to event deliver logic. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/svm/intr.c | 79 ++++++++++++++-------------- xen/arch/x86/hvm/svm/svm.c | 70 ++++++++++++------------- xen/arch/x86/hvm/vmx/intr.c | 79 ++++++++-------------------- xen/arch/x86/hvm/vmx/vmcs.c | 2 xen/arch/x86/hvm/vmx/vmx.c | 101 +++++++++++++++---------------------- xen/arch/x86/mm/shadow/multi.c | 2 xen/include/asm-x86/hvm/hvm.h | 35 +++++++++++- xen/include/asm-x86/hvm/vmx/vmcs.h | 7 -- xen/include/asm-x86/hvm/vmx/vmx.h | 3 - 9 files changed, 172 insertions(+), 206 deletions(-) diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/arch/x86/hvm/svm/intr.c Tue Jul 31 18:59:00 2007 +0100 @@ -79,27 +79,32 @@ static void svm_inject_extint(struct vcp vmcb->eventinj = event; } +static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + ASSERT(intr_source != hvm_intack_none); + + /* + * Create a dummy virtual interrupt to intercept as soon as the + * guest can accept the real interrupt. + * + * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt + * shadow. This is hard to do without hardware support. We should also + * track 'NMI blocking' from NMI injection until IRET. This can be done + * quite easily in software by intercepting the unblocking IRET. + */ + vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; + HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); + svm_inject_dummy_vintr(v); +} + asmlinkage void svm_intr_assist(void) { struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; enum hvm_intack intr_source; int intr_vector; - - /* - * Previous event delivery caused this intercept? - * This will happen if the injection is latched by the processor (hence - * clearing vintr.fields.irq or eventinj.v) but then subsequently a fault - * occurs (e.g., due to lack of shadow mapping of guest IDT or guest-kernel - * stack). - */ - if ( vmcb->exitintinfo.fields.v ) - { - vmcb->eventinj = vmcb->exitintinfo; - vmcb->exitintinfo.bytes = 0; - HVMTRACE_1D(REINJ_VIRQ, v, intr_vector); - return; - } /* Crank the handle on interrupt state. */ pt_update_irq(v); @@ -111,32 +116,23 @@ asmlinkage void svm_intr_assist(void) return; /* - * If the guest can't take an interrupt right now, create a 'fake' - * virtual interrupt on to intercept as soon as the guest _can_ take - * interrupts. Do not obtain the next interrupt from the vlapic/pic - * if unable to inject. - * - * Also do this if there is an injection already pending. This is - * because the event delivery can arbitrarily delay the injection - * of the vintr (for example, if the exception is handled via an - * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: - * - the vTPR could be modified upwards, so we need to wait until the - * exception is delivered before we can safely decide that an - * interrupt is deliverable; and - * - the guest might look at the APIC/PIC state, so we ought not to - * have cleared the interrupt out of the IRR. - * - * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt - * shadow. This is hard to do without hardware support. We should also - * track 'NMI blocking' from NMI injection until IRET. This can be done - * quite easily in software by intercepting the unblocking IRET. + * Pending IRQs must be delayed if: + * 1. An event is already pending. This is despite the fact that SVM + * provides a VINTR delivery method quite separate from the EVENTINJ + * mechanism. The event delivery can arbitrarily delay the injection + * of the vintr (for example, if the exception is handled via an + * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: + * - the vTPR could be modified upwards, so we need to wait until + * the exception is delivered before we can safely decide that an + * interrupt is deliverable; and + * - the guest might look at the APIC/PIC state, so we ought not to + * have cleared the interrupt out of the IRR. + * 2. The IRQ is masked. */ - if ( !hvm_interrupts_enabled(v, intr_source) || - vmcb->eventinj.fields.v ) + if ( unlikely(vmcb->eventinj.fields.v) || + !hvm_interrupts_enabled(v, intr_source) ) { - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; - HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); - svm_inject_dummy_vintr(v); + enable_intr_window(v, intr_source); return; } } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) ); @@ -151,6 +147,11 @@ asmlinkage void svm_intr_assist(void) svm_inject_extint(v, intr_vector); pt_intr_post(v, intr_vector, intr_source); } + + /* Is there another IRQ to queue up behind this one? */ + intr_source = hvm_vcpu_has_pending_irq(v); + if ( unlikely(intr_source != hvm_intack_none) ) + enable_intr_window(v, intr_source); } /* diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Jul 31 18:59:00 2007 +0100 @@ -89,8 +89,6 @@ static void svm_inject_exception( event.fields.ev = ev; event.fields.errorcode = error_code; - ASSERT(vmcb->eventinj.fields.v == 0); - vmcb->eventinj = event; } @@ -362,21 +360,14 @@ int svm_vmcb_save(struct vcpu *v, struct c->sysenter_esp = vmcb->sysenter_esp; c->sysenter_eip = vmcb->sysenter_eip; - /* Save any event/interrupt that was being injected when we last exited. */ - if ( vmcb->exitintinfo.fields.v ) - { - c->pending_event = vmcb->exitintinfo.bytes & 0xffffffff; - c->error_code = vmcb->exitintinfo.fields.errorcode; - } - else if ( vmcb->eventinj.fields.v ) - { - c->pending_event = vmcb->eventinj.bytes & 0xffffffff; + c->pending_event = 0; + c->error_code = 0; + if ( vmcb->eventinj.fields.v && + hvm_event_needs_reinjection(vmcb->eventinj.fields.type, + vmcb->eventinj.fields.vector) ) + { + c->pending_event = (uint32_t)vmcb->eventinj.bytes; c->error_code = vmcb->eventinj.fields.errorcode; - } - else - { - c->pending_event = 0; - c->error_code = 0; } return 1; @@ -495,11 +486,11 @@ int svm_vmcb_restore(struct vcpu *v, str vmcb->sysenter_esp = c->sysenter_esp; vmcb->sysenter_eip = c->sysenter_eip; - /* update VMCB for nested paging restore */ - if ( paging_mode_hap(v->domain) ) { + if ( paging_mode_hap(v->domain) ) + { vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0; - vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 | - (HVM_CR4_HOST_MASK & ~X86_CR4_PAE); + vmcb->cr4 = (v->arch.hvm_svm.cpu_shadow_cr4 | + (HVM_CR4_HOST_MASK & ~X86_CR4_PAE)); vmcb->cr3 = c->cr3; vmcb->np_enable = 1; vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */ @@ -514,26 +505,23 @@ int svm_vmcb_restore(struct vcpu *v, str gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n", c->pending_event, c->error_code); - /* VMX uses a different type for #OF and #BP; fold into "Exception" */ - if ( c->pending_type == 6 ) - c->pending_type = 3; - /* Sanity check */ - if ( c->pending_type == 1 || c->pending_type > 4 - || c->pending_reserved != 0 ) + if ( (c->pending_type == 1) || (c->pending_type > 6) || + (c->pending_reserved != 0) ) { gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32"\n", c->pending_event); return -EINVAL; } - /* Put this pending event in exitintinfo and svm_intr_assist() - * will reinject it when we return to the guest. */ - vmcb->exitintinfo.bytes = c->pending_event; - vmcb->exitintinfo.fields.errorcode = c->error_code; + + if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) ) + { + vmcb->eventinj.bytes = c->pending_event; + vmcb->eventinj.fields.errorcode = c->error_code; + } } paging_update_paging_modes(v); - /* signal paging update to ASID handler */ - svm_asid_g_update_paging (v); + svm_asid_g_update_paging(v); return 0; @@ -965,10 +953,10 @@ static void svm_hvm_inject_exception( svm_inject_exception(v, trapnr, (errcode != -1), errcode); } -static int svm_event_injection_faulted(struct vcpu *v) -{ - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - return vmcb->exitintinfo.fields.v; +static int svm_event_pending(struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + return vmcb->eventinj.fields.v; } static struct hvm_function_table svm_function_table = { @@ -1000,7 +988,7 @@ static struct hvm_function_table svm_fun .inject_exception = svm_hvm_inject_exception, .init_ap_context = svm_init_ap_context, .init_hypercall_page = svm_init_hypercall_page, - .event_injection_faulted = svm_event_injection_faulted + .event_pending = svm_event_pending }; static void svm_npt_detect(void) @@ -2431,6 +2419,7 @@ asmlinkage void svm_vmexit_handler(struc unsigned long eip; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + eventinj_t eventinj; int inst_len, rc; exit_reason = vmcb->exitcode; @@ -2445,6 +2434,13 @@ asmlinkage void svm_vmexit_handler(struc perfc_incra(svmexits, exit_reason); eip = vmcb->rip; + + /* Event delivery caused this intercept? Queue for redelivery. */ + eventinj = vmcb->exitintinfo; + if ( unlikely(eventinj.fields.v) && + hvm_event_needs_reinjection(eventinj.fields.type, + eventinj.fields.vector) ) + vmcb->eventinj = eventinj; switch ( exit_reason ) { diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/intr.c Tue Jul 31 18:59:00 2007 +0100 @@ -76,10 +76,9 @@ static void enable_intr_window(struct vc u32 *cpu_exec_control = &v->arch.hvm_vmx.exec_control; u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING; - if ( unlikely(intr_source == hvm_intack_none) ) - return; + ASSERT(intr_source != hvm_intack_none); - if ( unlikely(intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi ) + if ( (intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi ) { /* * We set MOV-SS blocking in lieu of STI blocking when delivering an @@ -131,69 +130,27 @@ asmlinkage void vmx_intr_assist(void) int intr_vector; enum hvm_intack intr_source; struct vcpu *v = current; - unsigned int idtv_info_field; - unsigned long inst_len; + unsigned int intr_info; + /* Crank the handle on interrupt state. */ pt_update_irq(v); - hvm_set_callback_irq_level(); - - update_tpr_threshold(vcpu_vlapic(v)); do { intr_source = hvm_vcpu_has_pending_irq(v); + if ( likely(intr_source == hvm_intack_none) ) + goto out; - if ( unlikely(v->arch.hvm_vmx.vector_injected) ) - { - v->arch.hvm_vmx.vector_injected = 0; - enable_intr_window(v, intr_source); - return; - } - - /* This could be moved earlier in the VMX resume sequence. */ - idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD); - if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) ) - { - /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */ - __vmwrite(VM_ENTRY_INTR_INFO_FIELD, - idtv_info_field & ~INTR_INFO_RESVD_BITS_MASK); - - /* - * Safe: the length will only be interpreted for software - * exceptions and interrupts. If we get here then delivery of some - * event caused a fault, and this always results in defined - * VM_EXIT_INSTRUCTION_LEN. - */ - inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */ - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); - - if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */ - __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, - __vmread(IDT_VECTORING_ERROR_CODE)); - - /* - * Clear NMI-blocking interruptibility info if an NMI delivery - * faulted. Re-delivery will re-set it (see SDM 3B 25.7.1.2). - */ - if ( (idtv_info_field&INTR_INFO_INTR_TYPE_MASK) == - (X86_EVENTTYPE_NMI << 8) ) - __vmwrite(GUEST_INTERRUPTIBILITY_INFO, - __vmread(GUEST_INTERRUPTIBILITY_INFO) & - ~VMX_INTR_SHADOW_NMI); - - enable_intr_window(v, intr_source); - - HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); - return; - } - - if ( likely(intr_source == hvm_intack_none) ) - return; - - if ( !hvm_interrupts_enabled(v, intr_source) ) + /* + * An event is already pending or the pending interrupt is masked? + * Then the pending interrupt must be delayed. + */ + intr_info = __vmread(VM_ENTRY_INTR_INFO); + if ( unlikely(intr_info & INTR_INFO_VALID_MASK) || + !hvm_interrupts_enabled(v, intr_source) ) { enable_intr_window(v, intr_source); - return; + goto out; } } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) ); @@ -207,6 +164,14 @@ asmlinkage void vmx_intr_assist(void) vmx_inject_extint(v, intr_vector); pt_intr_post(v, intr_vector, intr_source); } + + /* Is there another IRQ to queue up behind this one? */ + intr_source = hvm_vcpu_has_pending_irq(v); + if ( unlikely(intr_source != hvm_intack_none) ) + enable_intr_window(v, intr_source); + + out: + update_tpr_threshold(vcpu_vlapic(v)); } /* diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Jul 31 18:59:00 2007 +0100 @@ -433,7 +433,7 @@ static void construct_vmcs(struct vcpu * __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); - __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); + __vmwrite(VM_ENTRY_INTR_INFO, 0); __vmwrite(CR0_GUEST_HOST_MASK, ~0UL); __vmwrite(CR4_GUEST_HOST_MASK, ~0UL); diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Jul 31 18:59:00 2007 +0100 @@ -613,28 +613,13 @@ void vmx_vmcs_save(struct vcpu *v, struc c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP); c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP); - /* - * Save any event/interrupt that was being injected when we last - * exited. IDT_VECTORING_INFO_FIELD has priority, as anything in - * VM_ENTRY_INTR_INFO_FIELD is either a fault caused by the first - * event, which will happen the next time, or an interrupt, which we - * never inject when IDT_VECTORING_INFO_FIELD is valid. - */ - if ( (ev = __vmread(IDT_VECTORING_INFO_FIELD)) & INTR_INFO_VALID_MASK ) - { - c->pending_event = ev; - c->error_code = __vmread(IDT_VECTORING_ERROR_CODE); - } - else if ( (ev = __vmread(VM_ENTRY_INTR_INFO_FIELD)) & - INTR_INFO_VALID_MASK ) + c->pending_event = 0; + c->error_code = 0; + if ( ((ev = __vmread(VM_ENTRY_INTR_INFO)) & INTR_INFO_VALID_MASK) && + hvm_event_needs_reinjection((ev >> 8) & 7, ev & 0xff) ) { c->pending_event = ev; c->error_code = __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE); - } - else - { - c->pending_event = 0; - c->error_code = 0; } vmx_vmcs_exit(v); @@ -754,34 +739,9 @@ int vmx_vmcs_restore(struct vcpu *v, str if ( c->pending_valid ) { - vmx_vmcs_enter(v); - gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n", c->pending_event, c->error_code); - /* SVM uses type 3 ("Exception") for #OF and #BP; VMX uses type 6 */ - if ( (c->pending_type == 3) && - ((c->pending_vector == 3) || (c->pending_vector == 4)) ) - c->pending_type = 6; - - /* For software exceptions, we need to tell the hardware the - * instruction length as well (hmmm). */ - if ( c->pending_type > 4 ) - { - int addrbytes, ilen; - if ( (c->cs_arbytes & X86_SEG_AR_CS_LM_ACTIVE) && - (c->msr_efer & EFER_LMA) ) - addrbytes = 8; - else if ( c->cs_arbytes & X86_SEG_AR_DEF_OP_SIZE ) - addrbytes = 4; - else - addrbytes = 2; - - ilen = hvm_instruction_fetch(c->rip, addrbytes, NULL); - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen); - } - - /* Sanity check */ if ( (c->pending_type == 1) || (c->pending_type > 6) || (c->pending_reserved != 0) ) { @@ -790,12 +750,13 @@ int vmx_vmcs_restore(struct vcpu *v, str return -EINVAL; } - /* Re-inject the exception */ - __vmwrite(VM_ENTRY_INTR_INFO_FIELD, c->pending_event); - __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, c->error_code); - v->arch.hvm_vmx.vector_injected = 1; - - vmx_vmcs_exit(v); + if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) ) + { + vmx_vmcs_enter(v); + __vmwrite(VM_ENTRY_INTR_INFO, c->pending_event); + __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, c->error_code); + vmx_vmcs_exit(v); + } } return 0; @@ -1203,14 +1164,10 @@ static void vmx_update_vtpr(struct vcpu /* VMX doesn't have a V_TPR field */ } -static int vmx_event_injection_faulted(struct vcpu *v) -{ - unsigned int idtv_info_field; - +static int vmx_event_pending(struct vcpu *v) +{ ASSERT(v == current); - - idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD); - return (idtv_info_field & INTR_INFO_VALID_MASK); + return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK); } static void disable_intercept_for_msr(u32 msr) @@ -1261,7 +1218,7 @@ static struct hvm_function_table vmx_fun .inject_exception = vmx_inject_exception, .init_ap_context = vmx_init_ap_context, .init_hypercall_page = vmx_init_hypercall_page, - .event_injection_faulted = vmx_event_injection_faulted, + .event_pending = vmx_event_pending, .cpu_up = vmx_cpu_up, .cpu_down = vmx_cpu_down, }; @@ -2889,7 +2846,7 @@ static void vmx_failed_vmentry(unsigned asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs) { - unsigned int exit_reason; + unsigned int exit_reason, idtv_info; unsigned long exit_qualification, inst_len = 0; struct vcpu *v = current; @@ -2904,6 +2861,30 @@ asmlinkage void vmx_vmexit_handler(struc if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) ) return vmx_failed_vmentry(exit_reason, regs); + + /* Event delivery caused this intercept? Queue for redelivery. */ + idtv_info = __vmread(IDT_VECTORING_INFO); + if ( unlikely(idtv_info & INTR_INFO_VALID_MASK) ) + { + if ( hvm_event_needs_reinjection((idtv_info>>8)&7, idtv_info&0xff) ) + { + /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */ + __vmwrite(VM_ENTRY_INTR_INFO, + idtv_info & ~INTR_INFO_RESVD_BITS_MASK); + if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK ) + __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, + __vmread(IDT_VECTORING_ERROR_CODE)); + } + + /* + * Clear NMI-blocking interruptibility info if an NMI delivery faulted. + * Re-delivery will re-set it (see SDM 3B 25.7.1.2). + */ + if ( (idtv_info & INTR_INFO_INTR_TYPE_MASK) == (X86_EVENTTYPE_NMI<<8) ) + __vmwrite(GUEST_INTERRUPTIBILITY_INFO, + __vmread(GUEST_INTERRUPTIBILITY_INFO) & + ~VMX_INTR_SHADOW_NMI); + } switch ( exit_reason ) { @@ -2927,7 +2908,7 @@ asmlinkage void vmx_vmexit_handler(struc * (NB. If we emulate this IRET for any reason, we should re-clear!) */ if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) && - !(__vmread(IDT_VECTORING_INFO_FIELD) & INTR_INFO_VALID_MASK) && + !(__vmread(IDT_VECTORING_INFO) & INTR_INFO_VALID_MASK) && (vector != TRAP_double_fault) ) __vmwrite(GUEST_INTERRUPTIBILITY_INFO, __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI); diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Tue Jul 31 18:59:00 2007 +0100 @@ -2905,7 +2905,7 @@ static int sh_page_fault(struct vcpu *v, * stack is currently considered to be a page table, so we should * unshadow the faulting page before exiting. */ - if ( unlikely(hvm_event_injection_faulted(v)) ) + if ( unlikely(hvm_event_pending(v)) ) { gdprintk(XENLOG_DEBUG, "write to pagetable during event " "injection: cr2=%#lx, mfn=%#lx\n", diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Tue Jul 31 18:59:00 2007 +0100 @@ -154,7 +154,7 @@ struct hvm_function_table { void (*init_hypercall_page)(struct domain *d, void *hypercall_page); - int (*event_injection_faulted)(struct vcpu *v); + int (*event_pending)(struct vcpu *v); int (*cpu_up)(void); void (*cpu_down)(void); @@ -296,9 +296,9 @@ hvm_inject_exception(unsigned int trapnr int hvm_bringup_ap(int vcpuid, int trampoline_vector); -static inline int hvm_event_injection_faulted(struct vcpu *v) -{ - return hvm_funcs.event_injection_faulted(v); +static inline int hvm_event_pending(struct vcpu *v) +{ + return hvm_funcs.event_pending(v); } /* These reserved bits in lower 32 remain 0 after any load of CR0 */ @@ -335,6 +335,33 @@ static inline int hvm_event_injection_fa #define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt */ #define X86_EVENTTYPE_SW_EXCEPTION 6 /* software exception */ +/* + * Need to re-inject a given event? We avoid re-injecting software exceptions + * and interrupts because the faulting/trapping instruction can simply be + * re-executed (neither VMX nor SVM update RIP when they VMEXIT during + * INT3/INTO/INTn). + */ +static inline int hvm_event_needs_reinjection(uint8_t type, uint8_t vector) +{ + switch ( type ) + { + case X86_EVENTTYPE_EXT_INTR: + case X86_EVENTTYPE_NMI: + return 1; + case X86_EVENTTYPE_HW_EXCEPTION: + /* + * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly + * check for these vectors, as they are really SW Exceptions. SVM has + * not updated RIP to point after the trapping instruction (INT3/INTO). + */ + return (vector != 3) && (vector != 4); + default: + /* Software exceptions/interrupts can be re-executed (e.g., INT n). */ + break; + } + return 0; +} + static inline int hvm_cpu_up(void) { if ( hvm_funcs.cpu_up ) diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Jul 31 18:59:00 2007 +0100 @@ -66,9 +66,6 @@ struct arch_vmx_struct { /* Cache of cpu execution control. */ u32 exec_control; - - /* If there is vector installed in the INTR_INFO_FIELD. */ - u32 vector_injected; unsigned long cpu_cr0; /* copy of guest CR0 */ unsigned long cpu_shadow_cr0; /* copy of guest read shadow CR0 */ @@ -198,7 +195,7 @@ enum vmcs_field { VM_EXIT_MSR_LOAD_COUNT = 0x00004010, VM_ENTRY_CONTROLS = 0x00004012, VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, - VM_ENTRY_INTR_INFO_FIELD = 0x00004016, + VM_ENTRY_INTR_INFO = 0x00004016, VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, TPR_THRESHOLD = 0x0000401c, @@ -207,7 +204,7 @@ enum vmcs_field { VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, VM_EXIT_INTR_ERROR_CODE = 0x00004406, - IDT_VECTORING_INFO_FIELD = 0x00004408, + IDT_VECTORING_INFO = 0x00004408, IDT_VECTORING_ERROR_CODE = 0x0000440a, VM_EXIT_INSTRUCTION_LEN = 0x0000440c, VMX_INSTRUCTION_INFO = 0x0000440e, diff -r 7c5c3aa858cc -r 0636f262ecd8 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Jul 31 15:09:45 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Jul 31 18:59:00 2007 +0100 @@ -277,7 +277,7 @@ static inline void __vmx_inject_exceptio intr_fields |= INTR_INFO_DELIVER_CODE_MASK; } - __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields); + __vmwrite(VM_ENTRY_INTR_INFO, intr_fields); if (trap == TRAP_page_fault) HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code); @@ -288,7 +288,6 @@ static inline void vmx_inject_hw_excepti static inline void vmx_inject_hw_exception( struct vcpu *v, int trap, int error_code) { - v->arch.hvm_vmx.vector_injected = 1; __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |