[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1182336616 -3600 # Node ID 50358c4b37f4fcaac1061f1c84a865932401c1be # Parent 739d698986e9acd6ca5cf0be831a515f93c9d5bc hvm: Support injection of virtual NMIs and clean up ExtInt handling in general. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/irq.c | 81 +++++++++++---------- xen/arch/x86/hvm/svm/intr.c | 146 +++++++++++++++++++++----------------- xen/arch/x86/hvm/svm/svm.c | 42 ++++------ xen/arch/x86/hvm/vioapic.c | 34 ++++---- xen/arch/x86/hvm/vlapic.c | 7 - xen/arch/x86/hvm/vmx/intr.c | 106 ++++++++++++++------------- xen/arch/x86/hvm/vmx/vmx.c | 26 ++++-- xen/arch/x86/hvm/vpic.c | 3 xen/arch/x86/hvm/vpt.c | 9 +- xen/include/asm-x86/event.h | 1 xen/include/asm-x86/hvm/hvm.h | 18 +++- xen/include/asm-x86/hvm/irq.h | 12 +-- xen/include/asm-x86/hvm/vcpu.h | 4 - xen/include/asm-x86/hvm/vlapic.h | 2 xen/include/asm-x86/hvm/vmx/vmx.h | 13 ++- xen/include/asm-x86/hvm/vpic.h | 2 xen/include/asm-x86/hvm/vpt.h | 3 17 files changed, 279 insertions(+), 230 deletions(-) diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/irq.c --- a/xen/arch/x86/hvm/irq.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/irq.c Wed Jun 20 11:50:16 2007 +0100 @@ -285,43 +285,49 @@ void hvm_set_callback_via(struct domain } } -int cpu_has_pending_irq(struct vcpu *v) +enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) { struct hvm_domain *plat = &v->domain->arch.hvm_domain; - /* APIC */ + if ( unlikely(v->arch.hvm_vcpu.nmi_pending) ) + return hvm_intack_nmi; + if ( vlapic_has_interrupt(v) != -1 ) - return 1; - - /* PIC */ + return hvm_intack_lapic; + if ( !vlapic_accept_pic_intr(v) ) - return 0; - - return plat->vpic[0].int_output; -} - -int cpu_get_interrupt(struct vcpu *v, int *type) -{ - int vector; - - if ( (vector = cpu_get_apic_interrupt(v, type)) != -1 ) - return vector; - - if ( (v->vcpu_id == 0) && - ((vector = cpu_get_pic_interrupt(v, type)) != -1) ) - return vector; - - return -1; -} - -int get_isa_irq_vector(struct vcpu *v, int isa_irq, int type) + return hvm_intack_none; + + return plat->vpic[0].int_output ? hvm_intack_pic : hvm_intack_none; +} + +int hvm_vcpu_ack_pending_irq(struct vcpu *v, enum hvm_intack type, int *vector) +{ + switch ( type ) + { + case hvm_intack_nmi: + return test_and_clear_bool(v->arch.hvm_vcpu.nmi_pending); + case hvm_intack_lapic: + return ((*vector = cpu_get_apic_interrupt(v)) != -1); + case hvm_intack_pic: + ASSERT(v->vcpu_id == 0); + return ((*vector = cpu_get_pic_interrupt(v)) != -1); + default: + break; + } + + return 0; +} + +int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intack src) { unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); - if ( type == APIC_DM_EXTINT ) + if ( src == hvm_intack_pic ) return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base + (isa_irq & 7)); + ASSERT(src == hvm_intack_lapic); return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector; } @@ -337,19 +343,20 @@ int is_isa_irq_masked(struct vcpu *v, in domain_vioapic(v->domain)->redirtbl[gsi].fields.mask); } -/* - * TODO: 1. Should not need special treatment of event-channel events. - * 2. Should take notice of interrupt shadows (or clear them). - */ int hvm_local_events_need_delivery(struct vcpu *v) { - int pending; - - pending = (vcpu_info(v, evtchn_upcall_pending) || cpu_has_pending_irq(v)); - if ( unlikely(pending) ) - pending = hvm_interrupts_enabled(v); - - return pending; + enum hvm_intack type; + + /* TODO: Get rid of event-channel special case. */ + if ( vcpu_info(v, evtchn_upcall_pending) ) + type = hvm_intack_pic; + else + type = hvm_vcpu_has_pending_irq(v); + + if ( likely(type == hvm_intack_none) ) + return 0; + + return hvm_interrupts_enabled(v, type); } #if 0 /* Keep for debugging */ diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/svm/intr.c Wed Jun 20 11:50:16 2007 +0100 @@ -15,7 +15,6 @@ * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. - * */ #include <xen/config.h> @@ -39,100 +38,119 @@ #include <xen/domain_page.h> #include <asm/hvm/trace.h> -/* - * Most of this code is copied from vmx_io.c and modified - * to be suitable for SVM. - */ - -static inline int svm_inject_extint(struct vcpu *v, int trap) +static void svm_inject_dummy_vintr(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; vintr_t intr = vmcb->vintr; - /* Update only relevant fields */ intr.fields.irq = 1; intr.fields.intr_masking = 1; - intr.fields.vector = trap; + intr.fields.vector = 0; intr.fields.prio = 0xF; intr.fields.ign_tpr = 1; vmcb->vintr = intr; +} + +static void svm_inject_nmi(struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + eventinj_t event; - return 0; + event.bytes = 0; + event.fields.v = 1; + event.fields.type = EVENTTYPE_NMI; + event.fields.vector = 2; + + ASSERT(vmcb->eventinj.fields.v == 0); + vmcb->eventinj = event; +} + +static void svm_inject_extint(struct vcpu *v, int vector) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + eventinj_t event; + + event.bytes = 0; + event.fields.v = 1; + event.fields.type = EVENTTYPE_INTR; + event.fields.vector = vector; + + ASSERT(vmcb->eventinj.fields.v == 0); + vmcb->eventinj = event; } asmlinkage void svm_intr_assist(void) { struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - int intr_type = APIC_DM_EXTINT; - int intr_vector = -1; + enum hvm_intack intr_source; + int intr_vector; /* - * Previous Interrupt delivery caused this intercept? + * Previous event delivery caused this intercept? * This will happen if the injection is latched by the processor (hence - * clearing vintr.fields.irq) but then subsequently a fault occurs (e.g., - * due to lack of shadow mapping of guest IDT or guest-kernel stack). - * - * NB. Exceptions that fault during delivery are lost. This needs to be - * fixed but we'll usually get away with it since faults are usually - * idempotent. But this isn't the case for e.g. software interrupts! + * clearing vintr.fields.irq or eventinj.v) but then subsequently a fault + * occurs (e.g., due to lack of shadow mapping of guest IDT or guest-kernel + * stack). */ - if ( vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0) ) + if ( vmcb->exitintinfo.fields.v ) { - intr_vector = vmcb->exitintinfo.fields.vector; + vmcb->eventinj = vmcb->exitintinfo; vmcb->exitintinfo.bytes = 0; HVMTRACE_1D(REINJ_VIRQ, v, intr_vector); - svm_inject_extint(v, intr_vector); return; } - /* - * Previous interrupt still pending? This occurs if we return from VMRUN - * very early in the entry-to-guest process. Usually this is because an - * external physical interrupt was pending when we executed VMRUN. - */ - if ( vmcb->vintr.fields.irq ) - return; - - /* Crank the handle on interrupt state and check for new interrrupts. */ + /* Crank the handle on interrupt state. */ pt_update_irq(v); hvm_set_callback_irq_level(); - if ( !cpu_has_pending_irq(v) ) - return; - /* - * If the guest can't take an interrupt right now, create a 'fake' - * virtual interrupt on to intercept as soon as the guest _can_ take - * interrupts. Do not obtain the next interrupt from the vlapic/pic - * if unable to inject. - * - * Also do this if there is an exception pending. This is because - * the delivery of the exception can arbitrarily delay the injection - * of the vintr (for example, if the exception is handled via an - * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: - * - the vTPR could be modified upwards, so we need to wait until the - * exception is delivered before we can safely decide that an - * interrupt is deliverable; and - * - the guest might look at the APIC/PIC state, so we ought not to have - * cleared the interrupt out of the IRR. - */ - if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow - || vmcb->eventinj.fields.v ) + do { + intr_source = hvm_vcpu_has_pending_irq(v); + if ( likely(intr_source == hvm_intack_none) ) + return; + + /* + * If the guest can't take an interrupt right now, create a 'fake' + * virtual interrupt on to intercept as soon as the guest _can_ take + * interrupts. Do not obtain the next interrupt from the vlapic/pic + * if unable to inject. + * + * Also do this if there is an injection already pending. This is + * because the event delivery can arbitrarily delay the injection + * of the vintr (for example, if the exception is handled via an + * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: + * - the vTPR could be modified upwards, so we need to wait until the + * exception is delivered before we can safely decide that an + * interrupt is deliverable; and + * - the guest might look at the APIC/PIC state, so we ought not to + * have cleared the interrupt out of the IRR. + * + * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt + * shadow. This is hard to do without hardware support. We should also + * track 'NMI blocking' from NMI injection until IRET. This can be done + * quite easily in software by intercepting the unblocking IRET. + */ + if ( !hvm_interrupts_enabled(v, intr_source) || + vmcb->eventinj.fields.v ) + { + vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; + HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); + svm_inject_dummy_vintr(v); + return; + } + } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) ); + + if ( intr_source == hvm_intack_nmi ) { - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; - HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); - svm_inject_extint(v, 0x0); /* actual vector doesn't matter */ - return; + svm_inject_nmi(v); } - - /* Okay, we can deliver the interrupt: grab it and update PIC state. */ - intr_vector = cpu_get_interrupt(v, &intr_type); - BUG_ON(intr_vector < 0); - - HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); - svm_inject_extint(v, intr_vector); - - pt_intr_post(v, intr_vector, intr_type); + else + { + HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); + svm_inject_extint(v, intr_vector); + pt_intr_post(v, intr_vector, intr_source); + } } /* diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Jun 20 11:50:16 2007 +0100 @@ -312,26 +312,8 @@ int svm_vmcb_save(struct vcpu *v, struct c->sysenter_esp = vmcb->sysenter_esp; c->sysenter_eip = vmcb->sysenter_eip; - /* Save any event/interrupt that was being injected when we last - * exited. Although there are three(!) VMCB fields that can contain - * active events, we only need to save at most one: because the - * intr_assist logic never delivers an IRQ when any other event is - * active, we know that the only possible collision is if we inject - * a fault while exitintinfo contains a valid event (the delivery of - * which caused the last exit). In that case replaying just the - * first event should cause the same behaviour when we restore. */ - if ( vmcb->vintr.fields.irq - && /* Check it's not a fake interrupt (see svm_intr_assist()) */ - !(vmcb->general1_intercepts & GENERAL1_INTERCEPT_VINTR) ) - { - c->pending_vector = vmcb->vintr.fields.vector; - c->pending_type = 0; /* External interrupt */ - c->pending_error_valid = 0; - c->pending_reserved = 0; - c->pending_valid = 1; - c->error_code = 0; - } - else if ( vmcb->exitintinfo.fields.v ) + /* Save any event/interrupt that was being injected when we last exited. */ + if ( vmcb->exitintinfo.fields.v ) { c->pending_event = vmcb->exitintinfo.bytes & 0xffffffff; c->error_code = vmcb->exitintinfo.fields.errorcode; @@ -569,10 +551,15 @@ static inline void svm_restore_dr(struct __restore_debug_registers(v); } -static int svm_interrupts_enabled(struct vcpu *v) -{ - unsigned long eflags = v->arch.hvm_svm.vmcb->rflags; - return !irq_masked(eflags); +static int svm_interrupts_enabled(struct vcpu *v, enum hvm_intack type) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + if ( type == hvm_intack_nmi ) + return !vmcb->interrupt_shadow; + + ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic)); + return !irq_masked(vmcb->rflags) && !vmcb->interrupt_shadow; } static int svm_guest_x86_mode(struct vcpu *v) @@ -2160,11 +2147,14 @@ static inline void svm_do_msr_access( static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb) { + enum hvm_intack type = hvm_vcpu_has_pending_irq(current); + __update_guest_eip(vmcb, 1); /* Check for interrupt not handled or new interrupt. */ - if ( (vmcb->rflags & X86_EFLAGS_IF) && - (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) ) { + if ( vmcb->eventinj.fields.v || + ((type != hvm_intack_none) && hvm_interrupts_enabled(current, type)) ) + { HVMTRACE_1D(HLT, current, /*int pending=*/ 1); return; } diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/vioapic.c --- a/xen/arch/x86/hvm/vioapic.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/vioapic.c Wed Jun 20 11:50:16 2007 +0100 @@ -254,17 +254,11 @@ static void ioapic_inj_irq( HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d", vector, trig_mode, delivery_mode); - switch ( delivery_mode ) - { - case dest_Fixed: - case dest_LowestPrio: - if ( vlapic_set_irq(target, vector, trig_mode) ) - vcpu_kick(vlapic_vcpu(target)); - break; - default: - gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode); - break; - } + ASSERT((delivery_mode == dest_Fixed) || + (delivery_mode == dest_LowestPrio)); + + if ( vlapic_set_irq(target, vector, trig_mode) ) + vcpu_kick(vlapic_vcpu(target)); } static uint32_t ioapic_get_delivery_bitmask( @@ -368,7 +362,6 @@ static void vioapic_deliver(struct hvm_h } case dest_Fixed: - case dest_ExtINT: { uint8_t bit; for ( bit = 0; deliver_bitmask != 0; bit++ ) @@ -393,10 +386,21 @@ static void vioapic_deliver(struct hvm_h break; } - case dest_SMI: case dest_NMI: - case dest_INIT: - case dest__reserved_2: + { + uint8_t bit; + for ( bit = 0; deliver_bitmask != 0; bit++ ) + { + if ( !(deliver_bitmask & (1 << bit)) ) + continue; + deliver_bitmask &= ~(1 << bit); + if ( ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) && + !test_and_set_bool(v->arch.hvm_vcpu.nmi_pending) ) + vcpu_kick(v); + } + break; + } + default: gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n", delivery_mode); diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/vlapic.c --- a/xen/arch/x86/hvm/vlapic.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/vlapic.c Wed Jun 20 11:50:16 2007 +0100 @@ -294,7 +294,8 @@ static int vlapic_accept_irq(struct vcpu break; case APIC_DM_NMI: - gdprintk(XENLOG_WARNING, "Ignoring guest NMI\n"); + if ( !test_and_set_bool(v->arch.hvm_vcpu.nmi_pending) ) + vcpu_kick(v); break; case APIC_DM_INIT: @@ -747,7 +748,7 @@ int vlapic_has_interrupt(struct vcpu *v) return highest_irr; } -int cpu_get_apic_interrupt(struct vcpu *v, int *mode) +int cpu_get_apic_interrupt(struct vcpu *v) { int vector = vlapic_has_interrupt(v); struct vlapic *vlapic = vcpu_vlapic(v); @@ -757,8 +758,6 @@ int cpu_get_apic_interrupt(struct vcpu * vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]); vlapic_clear_irr(vector, vlapic); - - *mode = APIC_DM_FIXED; return vector; } diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/intr.c Wed Jun 20 11:50:16 2007 +0100 @@ -102,8 +102,8 @@ static void update_tpr_threshold(struct asmlinkage void vmx_intr_assist(void) { - int has_ext_irq, intr_vector, intr_type = 0; - unsigned long eflags, intr_shadow; + int intr_vector; + enum hvm_intack intr_source; struct vcpu *v = current; unsigned int idtv_info_field; unsigned long inst_len; @@ -114,65 +114,67 @@ asmlinkage void vmx_intr_assist(void) update_tpr_threshold(vcpu_vlapic(v)); - has_ext_irq = cpu_has_pending_irq(v); + do { + intr_source = hvm_vcpu_has_pending_irq(v); - if ( unlikely(v->arch.hvm_vmx.vector_injected) ) - { - v->arch.hvm_vmx.vector_injected = 0; - if ( unlikely(has_ext_irq) ) - enable_irq_window(v); - return; - } + if ( unlikely(v->arch.hvm_vmx.vector_injected) ) + { + v->arch.hvm_vmx.vector_injected = 0; + if ( unlikely(intr_source != hvm_intack_none) ) + enable_irq_window(v); + return; + } - /* This could be moved earlier in the VMX resume sequence. */ - idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD); - if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) ) - { - __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); + /* This could be moved earlier in the VMX resume sequence. */ + idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD); + if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) ) + { + __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); + + /* + * Safe: the length will only be interpreted for software + * exceptions and interrupts. If we get here then delivery of some + * event caused a fault, and this always results in defined + * VM_EXIT_INSTRUCTION_LEN. + */ + inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */ + __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); + + if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */ + __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, + __vmread(IDT_VECTORING_ERROR_CODE)); + if ( unlikely(intr_source != hvm_intack_none) ) + enable_irq_window(v); + + HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); + return; + } + + if ( likely(intr_source == hvm_intack_none) ) + return; /* - * Safe: the length will only be interpreted for software exceptions - * and interrupts. If we get here then delivery of some event caused a - * fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN. + * TODO: Better NMI handling. Shouldn't wait for EFLAGS.IF==1, but + * should wait for exit from 'NMI blocking' window (NMI injection to + * next IRET). This requires us to use the new 'virtual NMI' support. */ - inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */ - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); + if ( !hvm_interrupts_enabled(v, intr_source) ) + { + enable_irq_window(v); + return; + } + } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) ); - if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */ - __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, - __vmread(IDT_VECTORING_ERROR_CODE)); - if ( unlikely(has_ext_irq) ) - enable_irq_window(v); - - HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); - return; + if ( intr_source == hvm_intack_nmi ) + { + vmx_inject_nmi(v); } - - if ( likely(!has_ext_irq) ) - return; - - intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); - if ( unlikely(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)) ) + else { - enable_irq_window(v); - HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility"); - return; + HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); + vmx_inject_extint(v, intr_vector); + pt_intr_post(v, intr_vector, intr_source); } - - eflags = __vmread(GUEST_RFLAGS); - if ( irq_masked(eflags) ) - { - enable_irq_window(v); - return; - } - - intr_vector = cpu_get_interrupt(v, &intr_type); - BUG_ON(intr_vector < 0); - - HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); - vmx_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE); - - pt_intr_post(v, intr_vector, intr_type); } /* diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 20 11:50:16 2007 +0100 @@ -1115,16 +1115,26 @@ static int vmx_nx_enabled(struct vcpu *v return v->arch.hvm_vmx.efer & EFER_NX; } -static int vmx_interrupts_enabled(struct vcpu *v) -{ - unsigned long eflags = __vmread(GUEST_RFLAGS); - return !irq_masked(eflags); -} - +static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type) +{ + unsigned long intr_shadow, eflags; + + ASSERT(v == current); + + intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); + intr_shadow &= VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS; + + if ( type == hvm_intack_nmi ) + return !intr_shadow; + + ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic)); + eflags = __vmread(GUEST_RFLAGS); + return !irq_masked(eflags) && !intr_shadow; +} static void vmx_update_host_cr3(struct vcpu *v) { - ASSERT( (v == current) || !vcpu_runnable(v) ); + ASSERT((v == current) || !vcpu_runnable(v)); vmx_vmcs_enter(v); __vmwrite(HOST_CR3, v->arch.cr3); vmx_vmcs_exit(v); @@ -1132,7 +1142,7 @@ static void vmx_update_host_cr3(struct v static void vmx_update_guest_cr3(struct vcpu *v) { - ASSERT( (v == current) || !vcpu_runnable(v) ); + ASSERT((v == current) || !vcpu_runnable(v)); vmx_vmcs_enter(v); __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3); vmx_vmcs_exit(v); diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/vpic.c --- a/xen/arch/x86/hvm/vpic.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/vpic.c Wed Jun 20 11:50:16 2007 +0100 @@ -499,7 +499,7 @@ void vpic_irq_negative_edge(struct domai vpic_update_int_output(vpic); } -int cpu_get_pic_interrupt(struct vcpu *v, int *type) +int cpu_get_pic_interrupt(struct vcpu *v) { int irq, vector; struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0]; @@ -512,6 +512,5 @@ int cpu_get_pic_interrupt(struct vcpu *v return -1; vector = vpic[irq >> 3].irq_base + (irq & 7); - *type = APIC_DM_EXTINT; return vector; } diff -r 739d698986e9 -r 50358c4b37f4 xen/arch/x86/hvm/vpt.c --- a/xen/arch/x86/hvm/vpt.c Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/arch/x86/hvm/vpt.c Wed Jun 20 11:50:16 2007 +0100 @@ -155,7 +155,8 @@ void pt_update_irq(struct vcpu *v) } } -static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type) +static struct periodic_time *is_pt_irq( + struct vcpu *v, int vector, enum hvm_intack src) { struct list_head *head = &v->arch.hvm_vcpu.tm_list; struct periodic_time *pt; @@ -174,7 +175,7 @@ static struct periodic_time *is_pt_irq(s return pt; } - vec = get_isa_irq_vector(v, pt->irq, type); + vec = get_isa_irq_vector(v, pt->irq, src); /* RTC irq need special care */ if ( (vector != vec) || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) ) @@ -186,7 +187,7 @@ static struct periodic_time *is_pt_irq(s return NULL; } -void pt_intr_post(struct vcpu *v, int vector, int type) +void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src) { struct periodic_time *pt; time_cb *cb; @@ -194,7 +195,7 @@ void pt_intr_post(struct vcpu *v, int ve spin_lock(&v->arch.hvm_vcpu.tm_lock); - pt = is_pt_irq(v, vector, type); + pt = is_pt_irq(v, vector, src); if ( pt == NULL ) { spin_unlock(&v->arch.hvm_vcpu.tm_lock); diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/event.h --- a/xen/include/asm-x86/event.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/event.h Wed Jun 20 11:50:16 2007 +0100 @@ -10,7 +10,6 @@ #define __ASM_EVENT_H__ #include <xen/shared.h> -#include <asm/hvm/irq.h> /* cpu_has_pending_irq() */ static inline void vcpu_kick(struct vcpu *v) { diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Jun 20 11:50:16 2007 +0100 @@ -55,6 +55,14 @@ typedef struct segment_register { u64 base; } __attribute__ ((packed)) segment_register_t; +/* Interrupt acknowledgement sources. */ +enum hvm_intack { + hvm_intack_none, + hvm_intack_pic, + hvm_intack_lapic, + hvm_intack_nmi +}; + /* * The hardware virtual machine (HVM) interface abstracts away from the * x86/x86_64 CPU virtualization assist specifics. Currently this interface @@ -106,7 +114,7 @@ struct hvm_function_table { int (*long_mode_enabled)(struct vcpu *v); int (*pae_enabled)(struct vcpu *v); int (*nx_enabled)(struct vcpu *v); - int (*interrupts_enabled)(struct vcpu *v); + int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack); int (*guest_x86_mode)(struct vcpu *v); unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num); unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg); @@ -199,16 +207,16 @@ hvm_long_mode_enabled(struct vcpu *v) #define hvm_long_mode_enabled(v) (v,0) #endif - static inline int +static inline int hvm_pae_enabled(struct vcpu *v) { return hvm_funcs.pae_enabled(v); } static inline int -hvm_interrupts_enabled(struct vcpu *v) -{ - return hvm_funcs.interrupts_enabled(v); +hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type) +{ + return hvm_funcs.interrupts_enabled(v, type); } static inline int diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/irq.h --- a/xen/include/asm-x86/hvm/irq.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/irq.h Wed Jun 20 11:50:16 2007 +0100 @@ -24,10 +24,10 @@ #include <xen/types.h> #include <xen/spinlock.h> +#include <asm/hvm/hvm.h> #include <asm/hvm/vpic.h> #include <asm/hvm/vioapic.h> #include <public/hvm/save.h> - struct hvm_irq { /* @@ -58,7 +58,6 @@ struct hvm_irq { HVMIRQ_callback_gsi, HVMIRQ_callback_pci_intx } callback_via_type; - uint32_t pad; /* So the next field will be aligned */ }; union { uint32_t gsi; @@ -115,9 +114,12 @@ void hvm_set_callback_irq_level(void); void hvm_set_callback_irq_level(void); void hvm_set_callback_via(struct domain *d, uint64_t via); -int cpu_get_interrupt(struct vcpu *v, int *type); -int cpu_has_pending_irq(struct vcpu *v); -int get_isa_irq_vector(struct vcpu *vcpu, int irq, int type); +/* Check/Acknowledge next pending interrupt. */ +enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v); +int hvm_vcpu_ack_pending_irq( + struct vcpu *v, enum hvm_intack type, int *vector); + +int get_isa_irq_vector(struct vcpu *vcpu, int irq, enum hvm_intack src); int is_isa_irq_masked(struct vcpu *v, int isa_irq); #endif /* __ASM_X86_HVM_IRQ_H__ */ diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/vcpu.h Wed Jun 20 11:50:16 2007 +0100 @@ -30,11 +30,13 @@ struct hvm_vcpu { unsigned long hw_cr3; /* value we give to HW to use */ - unsigned long ioflags; struct hvm_io_op io_op; struct vlapic vlapic; s64 cache_tsc_offset; u64 guest_time; + + /* Is an NMI pending for delivery to this VCPU core? */ + bool_t nmi_pending; /* NB. integrate flag with save/restore */ /* Lock and list for virtual platform timers. */ spinlock_t tm_lock; diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/vlapic.h --- a/xen/include/asm-x86/hvm/vlapic.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/vlapic.h Wed Jun 20 11:50:16 2007 +0100 @@ -76,7 +76,7 @@ int vlapic_find_highest_irr(struct vlapi int vlapic_find_highest_irr(struct vlapic *vlapic); int vlapic_has_interrupt(struct vcpu *v); -int cpu_get_apic_interrupt(struct vcpu *v, int *mode); +int cpu_get_apic_interrupt(struct vcpu *v); int vlapic_init(struct vcpu *v); void vlapic_destroy(struct vcpu *v); diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Jun 20 11:50:16 2007 +0100 @@ -336,9 +336,16 @@ static inline void vmx_inject_sw_excepti instruction_len); } -static inline void vmx_inject_extint(struct vcpu *v, int trap, int error_code) -{ - __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0); +static inline void vmx_inject_extint(struct vcpu *v, int trap) +{ + __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, + VMX_DELIVER_NO_ERROR_CODE, 0); +} + +static inline void vmx_inject_nmi(struct vcpu *v) +{ + __vmx_inject_exception(v, 2, INTR_TYPE_NMI, + VMX_DELIVER_NO_ERROR_CODE, 0); } #endif /* __ASM_X86_HVM_VMX_VMX_H__ */ diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/vpic.h --- a/xen/include/asm-x86/hvm/vpic.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/vpic.h Wed Jun 20 11:50:16 2007 +0100 @@ -32,7 +32,7 @@ void vpic_irq_positive_edge(struct domai void vpic_irq_positive_edge(struct domain *d, int irq); void vpic_irq_negative_edge(struct domain *d, int irq); void vpic_init(struct domain *d); -int cpu_get_pic_interrupt(struct vcpu *v, int *type); +int cpu_get_pic_interrupt(struct vcpu *v); int is_periodic_irq(struct vcpu *v, int irq, int type); #endif /* __ASM_X86_HVM_VPIC_H__ */ diff -r 739d698986e9 -r 50358c4b37f4 xen/include/asm-x86/hvm/vpt.h --- a/xen/include/asm-x86/hvm/vpt.h Wed Jun 20 10:55:37 2007 +0100 +++ b/xen/include/asm-x86/hvm/vpt.h Wed Jun 20 11:50:16 2007 +0100 @@ -29,6 +29,7 @@ #include <xen/timer.h> #include <xen/list.h> #include <asm/hvm/vpic.h> +#include <asm/hvm/irq.h> #include <public/hvm/save.h> struct HPETState; @@ -119,7 +120,7 @@ void pt_freeze_time(struct vcpu *v); void pt_freeze_time(struct vcpu *v); void pt_thaw_time(struct vcpu *v); void pt_update_irq(struct vcpu *v); -void pt_intr_post(struct vcpu *v, int vector, int type); +void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src); void pt_reset(struct vcpu *v); void pt_migrate(struct vcpu *v); void create_periodic_time( _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |