[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Re-jig event delivery logic to better integrate TPR management.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1191244275 -3600
# Node ID 9eff4c97053b866a1803ce1a85a231e2821ea598
# Parent  69a74ac976cbc900ec0b26ba99d1e621ffa46953
hvm: Re-jig event delivery logic to better integrate TPR management.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |    2 
 xen/arch/x86/hvm/irq.c           |   84 ++++++++++++++++------------
 xen/arch/x86/hvm/svm/intr.c      |   91 +++++++++---------------------
 xen/arch/x86/hvm/svm/svm.c       |  115 +++++++++++++--------------------------
 xen/arch/x86/hvm/svm/vmcb.c      |    9 +--
 xen/arch/x86/hvm/vlapic.c        |   31 +++++-----
 xen/arch/x86/hvm/vmx/intr.c      |   88 ++++++++++-------------------
 xen/arch/x86/hvm/vmx/vmx.c       |   34 ++++++++---
 xen/arch/x86/hvm/vpic.c          |    2 
 xen/arch/x86/hvm/vpt.c           |   15 ++---
 xen/include/asm-x86/hvm/hvm.h    |   33 ++++++++---
 xen/include/asm-x86/hvm/irq.h    |    8 +-
 xen/include/asm-x86/hvm/vlapic.h |    4 -
 xen/include/asm-x86/hvm/vpic.h   |    2 
 xen/include/asm-x86/hvm/vpt.h    |    2 
 15 files changed, 233 insertions(+), 287 deletions(-)

diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Mon Oct 01 14:11:15 2007 +0100
@@ -737,7 +737,7 @@ int hvm_set_cr4(unsigned long value)
     old_cr = v->arch.hvm_vcpu.guest_cr[4];
     v->arch.hvm_vcpu.guest_cr[4] = value;
     hvm_update_guest_cr(v, 4);
-  
+
     /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
     if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
         paging_update_paging_modes(v);
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c    Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/irq.c    Mon Oct 01 14:11:15 2007 +0100
@@ -285,49 +285,63 @@ void hvm_set_callback_via(struct domain 
     }
 }
 
-enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
+struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
 {
     struct hvm_domain *plat = &v->domain->arch.hvm_domain;
+    int vector;
 
     if ( unlikely(v->nmi_pending) )
         return hvm_intack_nmi;
 
-    if ( vlapic_has_interrupt(v) != -1 )
-        return hvm_intack_lapic;
-
-    if ( !vlapic_accept_pic_intr(v) )
-        return hvm_intack_none;
-
-    return plat->vpic[0].int_output ? hvm_intack_pic : hvm_intack_none;
-}
-
-int hvm_vcpu_ack_pending_irq(struct vcpu *v, enum hvm_intack type, int *vector)
-{
-    switch ( type )
-    {
-    case hvm_intack_nmi:
-        return test_and_clear_bool(v->nmi_pending);
-    case hvm_intack_lapic:
-        return ((*vector = cpu_get_apic_interrupt(v)) != -1);
-    case hvm_intack_pic:
+    if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
+        return hvm_intack_pic(0);
+
+    vector = vlapic_has_pending_irq(v);
+    if ( vector != -1 )
+        return hvm_intack_lapic(vector);
+
+    return hvm_intack_none;
+}
+
+struct hvm_intack hvm_vcpu_ack_pending_irq(
+    struct vcpu *v, struct hvm_intack intack)
+{
+    int vector;
+
+    switch ( intack.source )
+    {
+    case hvm_intsrc_nmi:
+        if ( !test_and_clear_bool(v->nmi_pending) )
+            intack = hvm_intack_none;
+        break;
+    case hvm_intsrc_pic:
         ASSERT(v->vcpu_id == 0);
-        return ((*vector = cpu_get_pic_interrupt(v)) != -1);
+        if ( (vector = vpic_ack_pending_irq(v)) == -1 )
+            intack = hvm_intack_none;
+        else
+            intack.vector = (uint8_t)vector;
+        break;
+    case hvm_intsrc_lapic:
+        if ( !vlapic_ack_pending_irq(v, intack.vector) )
+            intack = hvm_intack_none;
+        break;
     default:
-        break;
-    }
-
-    return 0;
-}
-
-int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intack src)
+        intack = hvm_intack_none;
+        break;
+    }
+
+    return intack;
+}
+
+int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intsrc src)
 {
     unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
 
-    if ( src == hvm_intack_pic )
+    if ( src == hvm_intsrc_pic )
         return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
                 + (isa_irq & 7));
 
-    ASSERT(src == hvm_intack_lapic);
+    ASSERT(src == hvm_intsrc_lapic);
     return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
 }
 
@@ -345,18 +359,18 @@ int is_isa_irq_masked(struct vcpu *v, in
 
 int hvm_local_events_need_delivery(struct vcpu *v)
 {
-    enum hvm_intack type;
+    struct hvm_intack intack;
 
     /* TODO: Get rid of event-channel special case. */
     if ( vcpu_info(v, evtchn_upcall_pending) )
-        type = hvm_intack_pic;
+        intack = hvm_intack_pic(0);
     else
-        type = hvm_vcpu_has_pending_irq(v);
-
-    if ( likely(type == hvm_intack_none) )
+        intack = hvm_vcpu_has_pending_irq(v);
+
+    if ( likely(intack.source == hvm_intsrc_none) )
         return 0;
 
-    return hvm_interrupts_enabled(v, type);
+    return !hvm_interrupt_blocked(v, intack);
 }
 
 #if 0 /* Keep for debugging */
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c       Mon Oct 01 14:11:15 2007 +0100
@@ -39,19 +39,6 @@
 #include <xen/domain_page.h>
 #include <asm/hvm/trace.h>
 
-static void svm_inject_dummy_vintr(struct vcpu *v)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    vintr_t intr = vmcb->vintr;
-
-    intr.fields.irq = 1;
-    intr.fields.intr_masking = 1;
-    intr.fields.vector = 0;
-    intr.fields.prio = 0xF;
-    intr.fields.ign_tpr = 1;
-    vmcb->vintr = intr;
-}
-    
 static void svm_inject_nmi(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -80,11 +67,14 @@ static void svm_inject_extint(struct vcp
     vmcb->eventinj = event;
 }
     
-static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
+static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    vintr_t intr;
 
-    ASSERT(intr_source != hvm_intack_none);
+    ASSERT(intack.source != hvm_intsrc_none);
+
+    HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
 
     /*
      * Create a dummy virtual interrupt to intercept as soon as the
@@ -95,53 +85,29 @@ static void enable_intr_window(struct vc
      * track 'NMI blocking' from NMI injection until IRET. This can be done
      * quite easily in software by intercepting the unblocking IRET.
      */
+    intr = vmcb->vintr;
+    intr.fields.irq     = 1;
+    intr.fields.vector  = 0;
+    intr.fields.prio    = intack.vector >> 4;
+    intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
+    vmcb->vintr = intr;
     vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
-    HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
-    svm_inject_dummy_vintr(v);
-}
-
-static void update_cr8_intercept(
-    struct vcpu *v, enum hvm_intack masked_intr_source)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    struct vlapic *vlapic = vcpu_vlapic(v);
-    int max_irr;
-
-    vmcb->cr_intercepts &= ~CR_INTERCEPT_CR8_WRITE;
-
-    /*
-     * If ExtInts are masked then that dominates the TPR --- the 'interrupt
-     * window' has already been enabled in this case.
-     */
-    if ( (masked_intr_source == hvm_intack_lapic) ||
-         (masked_intr_source == hvm_intack_pic) )
-        return;
-
-    /* Is there an interrupt pending at the LAPIC? Nothing to do if not. */
-    if ( !vlapic_enabled(vlapic) || 
-         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
-        return;
-
-    /* Highest-priority pending interrupt is masked by the TPR? */
-    if ( (vmcb->vintr.fields.tpr & 0xf) >= (max_irr >> 4) )
-        vmcb->cr_intercepts |= CR_INTERCEPT_CR8_WRITE;
 }
 
 asmlinkage void svm_intr_assist(void) 
 {
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    enum hvm_intack intr_source;
-    int intr_vector;
+    struct hvm_intack intack;
 
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
     hvm_set_callback_irq_level();
 
     do {
-        intr_source = hvm_vcpu_has_pending_irq(v);
-        if ( likely(intr_source == hvm_intack_none) )
-            goto out;
+        intack = hvm_vcpu_has_pending_irq(v);
+        if ( likely(intack.source == hvm_intsrc_none) )
+            return;
 
         /*
          * Pending IRQs must be delayed if:
@@ -158,31 +124,30 @@ asmlinkage void svm_intr_assist(void)
          * 2. The IRQ is masked.
          */
         if ( unlikely(vmcb->eventinj.fields.v) ||
-             !hvm_interrupts_enabled(v, intr_source) )
+             hvm_interrupt_blocked(v, intack) )
         {
-            enable_intr_window(v, intr_source);
-            goto out;
+            enable_intr_window(v, intack);
+            return;
         }
-    } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
 
-    if ( intr_source == hvm_intack_nmi )
+        intack = hvm_vcpu_ack_pending_irq(v, intack);
+    } while ( intack.source == hvm_intsrc_none );
+
+    if ( intack.source == hvm_intsrc_nmi )
     {
         svm_inject_nmi(v);
     }
     else
     {
-        HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
-        svm_inject_extint(v, intr_vector);
-        pt_intr_post(v, intr_vector, intr_source);
+        HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
+        svm_inject_extint(v, intack.vector);
+        pt_intr_post(v, intack);
     }
 
     /* Is there another IRQ to queue up behind this one? */
-    intr_source = hvm_vcpu_has_pending_irq(v);
-    if ( unlikely(intr_source != hvm_intack_none) )
-        enable_intr_window(v, intr_source);
-
- out:
-    update_cr8_intercept(v, intr_source);
+    intack = hvm_vcpu_has_pending_irq(v);
+    if ( unlikely(intack.source != hvm_intsrc_none) )
+        enable_intr_window(v, intack);
 }
 
 /*
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon Oct 01 14:11:15 2007 +0100
@@ -425,16 +425,28 @@ static void svm_restore_dr(struct vcpu *
         __restore_debug_registers(v);
 }
 
-static int svm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    if ( type == hvm_intack_nmi )
-        return !vmcb->interrupt_shadow;
-
-    ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
-    return (!irq_masked(guest_cpu_user_regs()->eflags) &&
-            !vmcb->interrupt_shadow);
+static enum hvm_intblk svm_interrupt_blocked(
+    struct vcpu *v, struct hvm_intack intack)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    if ( vmcb->interrupt_shadow )
+        return hvm_intblk_shadow;
+
+    if ( intack.source == hvm_intsrc_nmi )
+        return hvm_intblk_none;
+
+    ASSERT((intack.source == hvm_intsrc_pic) ||
+           (intack.source == hvm_intsrc_lapic));
+
+    if ( irq_masked(guest_cpu_user_regs()->eflags) )
+        return hvm_intblk_rflags_ie;
+
+    if ( (intack.source == hvm_intsrc_lapic) &&
+         ((vmcb->vintr.fields.tpr & 0xf) >= (intack.vector >> 4)) )
+        return hvm_intblk_tpr;
+
+    return hvm_intblk_none;
 }
 
 static int svm_guest_x86_mode(struct vcpu *v)
@@ -855,7 +867,7 @@ static struct hvm_function_table svm_fun
     .vcpu_destroy         = svm_vcpu_destroy,
     .save_cpu_ctxt        = svm_save_vmcb_ctxt,
     .load_cpu_ctxt        = svm_load_vmcb_ctxt,
-    .interrupts_enabled   = svm_interrupts_enabled,
+    .interrupt_blocked    = svm_interrupt_blocked,
     .guest_x86_mode       = svm_guest_x86_mode,
     .get_segment_base     = svm_get_segment_base,
     .get_segment_register = svm_get_segment_register,
@@ -1552,7 +1564,6 @@ static void mov_from_cr(int cr, int gp, 
 {
     unsigned long value = 0;
     struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     switch ( cr )
@@ -1560,21 +1571,14 @@ static void mov_from_cr(int cr, int gp, 
     case 0:
         value = v->arch.hvm_vcpu.guest_cr[0];
         break;
-    case 2:
-        value = vmcb->cr2;
-        break;
     case 3:
         value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
         break;
     case 4:
         value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
         break;
-    case 8:
-        value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
-        value = (value & 0xF0) >> 4;
-        break;
-        
     default:
+        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
         domain_crash(v->domain);
         return;
     }
@@ -1590,7 +1594,6 @@ static int mov_to_cr(int gpreg, int cr, 
 {
     unsigned long value;
     struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     value = get_reg(gpreg, regs, vmcb);
@@ -1604,18 +1607,10 @@ static int mov_to_cr(int gpreg, int cr, 
     {
     case 0: 
         return svm_set_cr0(value);
-
     case 3:
         return hvm_set_cr3(value);
-
     case 4:
         return hvm_set_cr4(value);
-
-    case 8:
-        vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
-        vmcb->vintr.fields.tpr = value & 0x0F;
-        break;
-
     default:
         gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
         domain_crash(v->domain);
@@ -1894,13 +1889,14 @@ static void svm_vmexit_do_hlt(struct vmc
 static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
                               struct cpu_user_regs *regs)
 {
-    enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
+    struct hvm_intack intack = hvm_vcpu_has_pending_irq(current);
 
     __update_guest_eip(regs, 1);
 
     /* Check for interrupt not handled or new interrupt. */
     if ( vmcb->eventinj.fields.v ||
-         ((type != hvm_intack_none) && svm_interrupts_enabled(current, type)) )
+         ((intack.source != hvm_intsrc_none) &&
+          !svm_interrupt_blocked(current, intack)) )
     {
         HVMTRACE_1D(HLT, current, /*int pending=*/ 1);
         return;
@@ -2080,13 +2076,11 @@ asmlinkage void svm_vmexit_handler(struc
 
     /*
      * Before doing anything else, we need to sync up the VLAPIC's TPR with
-     * SVM's vTPR if CR8 writes are currently disabled.  It's OK if the 
-     * guest doesn't touch the CR8 (e.g. 32-bit Windows) because we update
-     * the vTPR on MMIO writes to the TPR
+     * SVM's vTPR. It's OK if the guest doesn't touch CR8 (e.g. 32-bit Windows)
+     * because we update the vTPR on MMIO writes to the TPR.
      */
-    if ( !(vmcb->cr_intercepts & CR_INTERCEPT_CR8_WRITE) )
-        vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
-                       (vmcb->vintr.fields.tpr & 0x0F) << 4);
+    vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
+                   (vmcb->vintr.fields.tpr & 0x0F) << 4);
 
     exit_reason = vmcb->exitcode;
 
@@ -2222,45 +2216,14 @@ asmlinkage void svm_vmexit_handler(struc
         }
         break;
 
-    case VMEXIT_CR0_READ:
-        svm_cr_access(v, 0, TYPE_MOV_FROM_CR, regs);
-        break;
-
-    case VMEXIT_CR2_READ:
-        svm_cr_access(v, 2, TYPE_MOV_FROM_CR, regs);
-        break;
-
-    case VMEXIT_CR3_READ:
-        svm_cr_access(v, 3, TYPE_MOV_FROM_CR, regs);
-        break;
-
-    case VMEXIT_CR4_READ:
-        svm_cr_access(v, 4, TYPE_MOV_FROM_CR, regs);
-        break;
-
-    case VMEXIT_CR8_READ:
-        svm_cr_access(v, 8, TYPE_MOV_FROM_CR, regs);
-        break;
-
-    case VMEXIT_CR0_WRITE:
-        svm_cr_access(v, 0, TYPE_MOV_TO_CR, regs);
-        break;
-
-    case VMEXIT_CR2_WRITE:
-        svm_cr_access(v, 2, TYPE_MOV_TO_CR, regs);
-        break;
-
-    case VMEXIT_CR3_WRITE:
-        svm_cr_access(v, 3, TYPE_MOV_TO_CR, regs);
-        local_flush_tlb();
-        break;
-
-    case VMEXIT_CR4_WRITE:
-        svm_cr_access(v, 4, TYPE_MOV_TO_CR, regs);
-        break;
-
-    case VMEXIT_CR8_WRITE:
-        svm_cr_access(v, 8, TYPE_MOV_TO_CR, regs);
+    case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
+        svm_cr_access(v, exit_reason - VMEXIT_CR0_READ,
+                      TYPE_MOV_FROM_CR, regs);
+        break;
+
+    case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
+        svm_cr_access(v, exit_reason - VMEXIT_CR0_WRITE,
+                      TYPE_MOV_TO_CR, regs);
         break;
 
     case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE:
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Mon Oct 01 14:11:15 2007 +0100
@@ -130,14 +130,11 @@ static int construct_vmcb(struct vcpu *v
     /* Intercept all debug-register writes. */
     vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
 
-    /*
-     * Intercept all control-register accesses except for CR2 reads/writes
-     * and CR8 reads (and actually CR8 writes, but that's a special case
-     * that's handled in svm/intr.c). 
-     */
+    /* Intercept all control-register accesses except for CR2 and CR8. */
     vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
                             CR_INTERCEPT_CR2_WRITE |
-                            CR_INTERCEPT_CR8_READ);
+                            CR_INTERCEPT_CR8_READ |
+                            CR_INTERCEPT_CR8_WRITE);
 
     /* I/O and MSR permission bitmaps. */
     arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE));
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Mon Oct 01 14:11:15 2007 +0100
@@ -732,33 +732,34 @@ int vlapic_accept_pic_intr(struct vcpu *
              vlapic_hw_disabled(vlapic)));
 }
 
-int vlapic_has_interrupt(struct vcpu *v)
+int vlapic_has_pending_irq(struct vcpu *v)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
-    int highest_irr;
+    int irr, isr;
 
     if ( !vlapic_enabled(vlapic) )
         return -1;
 
-    highest_irr = vlapic_find_highest_irr(vlapic);
-    if ( (highest_irr == -1) ||
-         ((highest_irr & 0xF0) <= vlapic_get_ppr(vlapic)) )
+    irr = vlapic_find_highest_irr(vlapic);
+    if ( irr == -1 )
         return -1;
 
-    return highest_irr;
-}
-
-int cpu_get_apic_interrupt(struct vcpu *v)
-{
-    int vector = vlapic_has_interrupt(v);
+    isr = vlapic_find_highest_isr(vlapic);
+    isr = (isr != -1) ? isr : 0;
+    if ( (isr & 0xf0) >= (irr & 0xf0) )
+        return -1;
+
+    return irr;
+}
+
+int vlapic_ack_pending_irq(struct vcpu *v, int vector)
+{
     struct vlapic *vlapic = vcpu_vlapic(v);
 
-    if ( vector == -1 )
-        return -1;
- 
     vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
     vlapic_clear_irr(vector, vlapic);
-    return vector;
+
+    return 1;
 }
 
 /* Reset the VLPAIC back to its power-on/reset state. */
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Mon Oct 01 14:11:15 2007 +0100
@@ -71,14 +71,14 @@
  * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
  */
 
-static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
+static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
 {
     u32 *cpu_exec_control = &v->arch.hvm_vmx.exec_control;
     u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
 
-    ASSERT(intr_source != hvm_intack_none);
+    ASSERT(intack.source != hvm_intsrc_none);
 
-    if ( (intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi )
+    if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi )
     {
         /*
          * We set MOV-SS blocking in lieu of STI blocking when delivering an
@@ -107,37 +107,6 @@ static void enable_intr_window(struct vc
     }
 }
 
-static void update_tpr_threshold(
-    struct vcpu *v, enum hvm_intack masked_intr_source)
-{
-    struct vlapic *vlapic = vcpu_vlapic(v);
-    int max_irr, tpr, threshold = 0;
-
-    if ( !cpu_has_vmx_tpr_shadow )
-        return;
-
-    /*
-     * If ExtInts are masked then that dominates the TPR --- the 'interrupt
-     * window' has already been enabled in this case.
-     */
-    if ( (masked_intr_source == hvm_intack_lapic) ||
-         (masked_intr_source == hvm_intack_pic) )
-        goto out;
-
-    /* Is there an interrupt pending at the LAPIC? Nothing to do if not. */
-    if ( !vlapic_enabled(vlapic) || 
-         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
-        goto out;
-
-    /* Highest-priority pending interrupt is masked by the TPR? */
-    tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
-    if ( (tpr >> 4) >= (max_irr >> 4) )
-        threshold = max_irr >> 4;
-
- out:
-    __vmwrite(TPR_THRESHOLD, threshold);
-}
-
 static void vmx_dirq_assist(struct domain *d)
 {
     unsigned int irq;
@@ -157,10 +126,10 @@ static void vmx_dirq_assist(struct domai
 
 asmlinkage void vmx_intr_assist(void)
 {
-    int intr_vector;
-    enum hvm_intack intr_source;
+    struct hvm_intack intack;
     struct vcpu *v = current;
-    unsigned int intr_info;
+    unsigned int tpr_threshold = 0;
+    enum hvm_intblk intblk;
 
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
@@ -171,41 +140,48 @@ asmlinkage void vmx_intr_assist(void)
     hvm_set_callback_irq_level();
 
     do {
-        intr_source = hvm_vcpu_has_pending_irq(v);
-        if ( likely(intr_source == hvm_intack_none) )
+        intack = hvm_vcpu_has_pending_irq(v);
+        if ( likely(intack.source == hvm_intsrc_none) )
             goto out;
 
-        /*
-         * An event is already pending or the pending interrupt is masked?
-         * Then the pending interrupt must be delayed.
-         */
-        intr_info = __vmread(VM_ENTRY_INTR_INFO);
-        if ( unlikely(intr_info & INTR_INFO_VALID_MASK) ||
-             !hvm_interrupts_enabled(v, intr_source) )
+        intblk = hvm_interrupt_blocked(v, intack);
+        if ( intblk == hvm_intblk_tpr )
         {
-            enable_intr_window(v, intr_source);
+            ASSERT(vlapic_enabled(vcpu_vlapic(v)));
+            ASSERT(intack.source == hvm_intsrc_lapic);
+            tpr_threshold = intack.vector >> 4;
             goto out;
         }
-    } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
 
-    if ( intr_source == hvm_intack_nmi )
+        if ( (intblk != hvm_intblk_none) ||
+             (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
+        {
+            enable_intr_window(v, intack);
+            goto out;
+        }
+
+        intack = hvm_vcpu_ack_pending_irq(v, intack);
+    } while ( intack.source == hvm_intsrc_none );
+
+    if ( intack.source == hvm_intsrc_nmi )
     {
         vmx_inject_nmi(v);
     }
     else
     {
-        HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
-        vmx_inject_extint(v, intr_vector);
-        pt_intr_post(v, intr_vector, intr_source);
+        HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
+        vmx_inject_extint(v, intack.vector);
+        pt_intr_post(v, intack);
     }
 
     /* Is there another IRQ to queue up behind this one? */
-    intr_source = hvm_vcpu_has_pending_irq(v);
-    if ( unlikely(intr_source != hvm_intack_none) )
-        enable_intr_window(v, intr_source);
+    intack = hvm_vcpu_has_pending_irq(v);
+    if ( unlikely(intack.source != hvm_intsrc_none) )
+        enable_intr_window(v, intack);
 
  out:
-    update_tpr_threshold(v, intr_source);
+    if ( cpu_has_vmx_tpr_shadow )
+        __vmwrite(TPR_THRESHOLD, tpr_threshold);
 }
 
 /*
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Oct 01 14:11:15 2007 +0100
@@ -975,20 +975,34 @@ static void vmx_init_hypercall_page(stru
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
-static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
+static enum hvm_intblk vmx_interrupt_blocked(
+    struct vcpu *v, struct hvm_intack intack)
 {
     unsigned long intr_shadow;
 
     intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
 
-    if ( type == hvm_intack_nmi )
-        return !(intr_shadow & (VMX_INTR_SHADOW_STI|
-                                VMX_INTR_SHADOW_MOV_SS|
-                                VMX_INTR_SHADOW_NMI));
-
-    ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
-    return (!irq_masked(guest_cpu_user_regs()->eflags) &&
-            !(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
+    if ( intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
+        return hvm_intblk_shadow;
+
+    if ( intack.source == hvm_intsrc_nmi )
+        return ((intr_shadow & VMX_INTR_SHADOW_NMI) ?
+                hvm_intblk_nmi_iret : hvm_intblk_none);
+
+    ASSERT((intack.source == hvm_intsrc_pic) ||
+           (intack.source == hvm_intsrc_lapic));
+
+    if ( irq_masked(guest_cpu_user_regs()->eflags) )
+        return hvm_intblk_rflags_ie;
+
+    if ( intack.source == hvm_intsrc_lapic )
+    {
+        uint32_t tpr = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xF0;
+        if ( (tpr >> 4) >= (intack.vector >> 4) )
+            return hvm_intblk_tpr;
+    }
+
+    return hvm_intblk_none;
 }
 
 static void vmx_update_host_cr3(struct vcpu *v)
@@ -1112,7 +1126,7 @@ static struct hvm_function_table vmx_fun
     .vcpu_destroy         = vmx_vcpu_destroy,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
     .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
-    .interrupts_enabled   = vmx_interrupts_enabled,
+    .interrupt_blocked    = vmx_interrupt_blocked,
     .guest_x86_mode       = vmx_guest_x86_mode,
     .get_segment_base     = vmx_get_segment_base,
     .get_segment_register = vmx_get_segment_register,
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/vpic.c
--- a/xen/arch/x86/hvm/vpic.c   Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/vpic.c   Mon Oct 01 14:11:15 2007 +0100
@@ -503,7 +503,7 @@ void vpic_irq_negative_edge(struct domai
         vpic_update_int_output(vpic);
 }
 
-int cpu_get_pic_interrupt(struct vcpu *v)
+int vpic_ack_pending_irq(struct vcpu *v)
 {
     int irq, vector;
     struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
diff -r 69a74ac976cb -r 9eff4c97053b xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/arch/x86/hvm/vpt.c    Mon Oct 01 14:11:15 2007 +0100
@@ -165,12 +165,12 @@ void pt_update_irq(struct vcpu *v)
 }
 
 static struct periodic_time *is_pt_irq(
-    struct vcpu *v, int vector, enum hvm_intack src)
+    struct vcpu *v, struct hvm_intack intack)
 {
     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
     struct periodic_time *pt;
     struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
-    int vec;
+    int vector;
 
     list_for_each_entry ( pt, head, list )
     {
@@ -179,15 +179,16 @@ static struct periodic_time *is_pt_irq(
 
         if ( is_lvtt(v, pt->irq) )
         {
-            if ( pt->irq != vector )
+            if ( pt->irq != intack.vector )
                 continue;
             return pt;
         }
 
-        vec = get_isa_irq_vector(v, pt->irq, src);
+        vector = get_isa_irq_vector(v, pt->irq, intack.source);
 
         /* RTC irq need special care */
-        if ( (vector != vec) || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) )
+        if ( (intack.vector != vector) ||
+             ((pt->irq == 8) && !is_rtc_periodic_irq(rtc)) )
             continue;
 
         return pt;
@@ -196,7 +197,7 @@ static struct periodic_time *is_pt_irq(
     return NULL;
 }
 
-void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src)
+void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
 {
     struct periodic_time *pt;
     time_cb *cb;
@@ -204,7 +205,7 @@ void pt_intr_post(struct vcpu *v, int ve
 
     spin_lock(&v->arch.hvm_vcpu.tm_lock);
 
-    pt = is_pt_irq(v, vector, src);
+    pt = is_pt_irq(v, intack);
     if ( pt == NULL )
     {
         spin_unlock(&v->arch.hvm_vcpu.tm_lock);
diff -r 69a74ac976cb -r 9eff4c97053b xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Mon Oct 01 14:11:15 2007 +0100
@@ -57,11 +57,26 @@ typedef struct segment_register {
 } __attribute__ ((packed)) segment_register_t;
 
 /* Interrupt acknowledgement sources. */
-enum hvm_intack {
-    hvm_intack_none,
-    hvm_intack_pic,
-    hvm_intack_lapic,
-    hvm_intack_nmi
+enum hvm_intsrc {
+    hvm_intsrc_none,
+    hvm_intsrc_pic,
+    hvm_intsrc_lapic,
+    hvm_intsrc_nmi
+};
+struct hvm_intack {
+    uint8_t source; /* enum hvm_intsrc */
+    uint8_t vector;
+};
+#define hvm_intack_none       ( (struct hvm_intack) { hvm_intsrc_none,  0 } )
+#define hvm_intack_pic(vec)   ( (struct hvm_intack) { hvm_intsrc_pic,   vec } )
+#define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
+#define hvm_intack_nmi        ( (struct hvm_intack) { hvm_intsrc_nmi,   2 } )
+enum hvm_intblk {
+    hvm_intblk_none,      /* not blocked (deliverable) */
+    hvm_intblk_shadow,    /* MOV-SS or STI shadow */
+    hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
+    hvm_intblk_tpr,       /* LAPIC TPR too high */
+    hvm_intblk_nmi_iret   /* NMI blocked until IRET */
 };
 
 /*
@@ -94,7 +109,7 @@ struct hvm_function_table {
      * 3) return the current guest segment descriptor base
      * 4) return the current guest segment descriptor
      */
-    int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
+    enum hvm_intblk (*interrupt_blocked)(struct vcpu *v, struct hvm_intack);
     int (*guest_x86_mode)(struct vcpu *v);
     unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
@@ -177,11 +192,11 @@ u64 hvm_get_guest_time(struct vcpu *v);
 #define hvm_long_mode_enabled(v) (v,0)
 #endif
 
-static inline int
-hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
+static inline enum hvm_intblk
+hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
 {
     ASSERT(v == current);
-    return hvm_funcs.interrupts_enabled(v, type);
+    return hvm_funcs.interrupt_blocked(v, intack);
 }
 
 static inline int
diff -r 69a74ac976cb -r 9eff4c97053b xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h     Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/include/asm-x86/hvm/irq.h     Mon Oct 01 14:11:15 2007 +0100
@@ -135,11 +135,11 @@ void hvm_set_callback_via(struct domain 
 void hvm_set_callback_via(struct domain *d, uint64_t via);
 
 /* Check/Acknowledge next pending interrupt. */
-enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
-int hvm_vcpu_ack_pending_irq(
-    struct vcpu *v, enum hvm_intack type, int *vector);
+struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
+struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
+                                           struct hvm_intack intack);
 
-int get_isa_irq_vector(struct vcpu *vcpu, int irq, enum hvm_intack src);
+int get_isa_irq_vector(struct vcpu *vcpu, int irq, enum hvm_intsrc src);
 int is_isa_irq_masked(struct vcpu *v, int isa_irq);
 
 #endif /* __ASM_X86_HVM_IRQ_H__ */
diff -r 69a74ac976cb -r 9eff4c97053b xen/include/asm-x86/hvm/vlapic.h
--- a/xen/include/asm-x86/hvm/vlapic.h  Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/include/asm-x86/hvm/vlapic.h  Mon Oct 01 14:11:15 2007 +0100
@@ -75,8 +75,8 @@ int vlapic_set_irq(struct vlapic *vlapic
 
 int vlapic_find_highest_irr(struct vlapic *vlapic);
 
-int vlapic_has_interrupt(struct vcpu *v);
-int cpu_get_apic_interrupt(struct vcpu *v);
+int vlapic_has_pending_irq(struct vcpu *v);
+int vlapic_ack_pending_irq(struct vcpu *v, int vector);
 
 int  vlapic_init(struct vcpu *v);
 void vlapic_destroy(struct vcpu *v);
diff -r 69a74ac976cb -r 9eff4c97053b xen/include/asm-x86/hvm/vpic.h
--- a/xen/include/asm-x86/hvm/vpic.h    Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/include/asm-x86/hvm/vpic.h    Mon Oct 01 14:11:15 2007 +0100
@@ -32,7 +32,7 @@ void vpic_irq_positive_edge(struct domai
 void vpic_irq_positive_edge(struct domain *d, int irq);
 void vpic_irq_negative_edge(struct domain *d, int irq);
 void vpic_init(struct domain *d);
-int cpu_get_pic_interrupt(struct vcpu *v);
+int vpic_ack_pending_irq(struct vcpu *v);
 int is_periodic_irq(struct vcpu *v, int irq, int type);
 
 #endif  /* __ASM_X86_HVM_VPIC_H__ */  
diff -r 69a74ac976cb -r 9eff4c97053b xen/include/asm-x86/hvm/vpt.h
--- a/xen/include/asm-x86/hvm/vpt.h     Mon Oct 01 13:18:19 2007 +0100
+++ b/xen/include/asm-x86/hvm/vpt.h     Mon Oct 01 14:11:15 2007 +0100
@@ -120,7 +120,7 @@ void pt_freeze_time(struct vcpu *v);
 void pt_freeze_time(struct vcpu *v);
 void pt_thaw_time(struct vcpu *v);
 void pt_update_irq(struct vcpu *v);
-void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src);
+void pt_intr_post(struct vcpu *v, struct hvm_intack intack);
 void pt_reset(struct vcpu *v);
 void pt_migrate(struct vcpu *v);
 void create_periodic_time(

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.