[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 4/4] Nested VMX: Update APIC-v(RVI/SVI) when vmexit to L1



From: Yang Zhang <yang.z.zhang@xxxxxxxxx>

If enabling APIC-v, all interrupts to L1 are delivered through APIC-v.
But when L2 is running, external interrupt will casue L1 vmexit with
reason external interrupt. Then L1 will pick up the interrupt through
vmcs12. when L1 ack the interrupt, since the APIC-v is enabled when
L1 is running, so APIC-v hardware still will do vEOI updating. The problem
is that the interrupt is delivered not through APIC-v hardware, this means
SVI/RVI/vPPR are not setting, but hardware required them when doing vEOI
updating. The solution is that, when L1 tried to pick up the interrupt
from vmcs12, then hypervisor will help to update the SVI/RVI/vPPR to make
sure the following vEOI updating and vPPR updating corrently.

Also, since interrupt is delivered through vmcs12, so APIC-v hardware will
not cleare vIRR and hypervisor need to clear it before L1 running.

Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>
---
 xen/arch/x86/hvm/irq.c             |    2 +-
 xen/arch/x86/hvm/vlapic.c          |   20 ++++++++++++++------
 xen/arch/x86/hvm/vmx/intr.c        |    4 ++--
 xen/arch/x86/hvm/vmx/vmx.c         |   14 ++++++++------
 xen/arch/x86/hvm/vmx/vvmx.c        |   33 +++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/vlapic.h   |    3 ++-
 xen/include/asm-x86/hvm/vmx/vmx.h  |    2 +-
 xen/include/asm-x86/hvm/vmx/vvmx.h |    1 +
 8 files changed, 62 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 9eae5de..6a6fb68 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -437,7 +437,7 @@ struct hvm_intack hvm_vcpu_ack_pending_irq(
             intack.vector = (uint8_t)vector;
         break;
     case hvm_intsrc_lapic:
-        if ( !vlapic_ack_pending_irq(v, intack.vector) )
+        if ( !vlapic_ack_pending_irq(v, intack.vector, 0) )
             intack = hvm_intack_none;
         break;
     case hvm_intsrc_vector:
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index f1530fd..d21a6c9 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -168,6 +168,14 @@ static uint32_t vlapic_get_ppr(struct vlapic *vlapic)
     return ppr;
 }
 
+uint32_t vlapic_set_ppr(struct vlapic *vlapic)
+{
+   uint32_t ppr = vlapic_get_ppr(vlapic);
+
+   vlapic_set_reg(vlapic, APIC_PROCPRI, ppr);
+   return ppr;
+}
+
 static int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
 {
     int result = 0;
@@ -1050,15 +1058,15 @@ int vlapic_has_pending_irq(struct vcpu *v)
     return irr;
 }
 
-int vlapic_ack_pending_irq(struct vcpu *v, int vector)
+int vlapic_ack_pending_irq(struct vcpu *v, int vector, int force_ack)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
 
-    if ( vlapic_virtual_intr_delivery_enabled() )
-        return 1;
-
-    vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
-    vlapic_clear_irr(vector, vlapic);
+    if ( force_ack || !vlapic_virtual_intr_delivery_enabled() )
+    {
+        vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+        vlapic_clear_irr(vector, vlapic);
+    }
 
     return 1;
 }
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index cab9109..45942ab 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -185,7 +185,7 @@ static int nvmx_intr_intercept(struct vcpu *v, struct 
hvm_intack intack)
             if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) )
                 return 0;
 
-            vmx_inject_extint(intack.vector);
+            vmx_inject_extint(intack.vector, intack.source);
 
             ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, VM_EXIT_CONTROLS);
             if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT )
@@ -314,7 +314,7 @@ void vmx_intr_assist(void)
     else
     {
         HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
-        vmx_inject_extint(intack.vector);
+        vmx_inject_extint(intack.vector, intack.source);
         pt_intr_post(v, intack);
     }
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 8ed7026..011a817 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1208,7 +1208,7 @@ static void vmx_update_guest_efer(struct vcpu *v)
 }
 
 void nvmx_enqueue_n2_exceptions(struct vcpu *v, 
-            unsigned long intr_fields, int error_code)
+            unsigned long intr_fields, int error_code, uint8_t source)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
@@ -1216,6 +1216,7 @@ void nvmx_enqueue_n2_exceptions(struct vcpu *v,
         /* enqueue the exception till the VMCS switch back to L1 */
         nvmx->intr.intr_info = intr_fields;
         nvmx->intr.error_code = error_code;
+        nvmx->intr.source = source;
         vcpu_nestedhvm(v).nv_vmexit_pending = 1;
         return;
     }
@@ -1227,7 +1228,8 @@ void nvmx_enqueue_n2_exceptions(struct vcpu *v,
 
 static int nvmx_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
 {
-    nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code);
+    nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code,
+                               hvm_intsrc_none);
     return NESTEDHVM_VMEXIT_DONE;
 }
 
@@ -1258,7 +1260,7 @@ static void __vmx_inject_exception(int trap, int type, 
int error_code)
         curr->arch.hvm_vmx.vmx_emulate = 1;
 }
 
-void vmx_inject_extint(int trap)
+void vmx_inject_extint(int trap, uint8_t source)
 {
     struct vcpu *v = current;
     u32    pin_based_cntrl;
@@ -1269,7 +1271,7 @@ void vmx_inject_extint(int trap)
         if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) {
             nvmx_enqueue_n2_exceptions (v, 
                INTR_INFO_VALID_MASK | (X86_EVENTTYPE_EXT_INTR<<8) | trap,
-               HVM_DELIVER_NO_ERROR_CODE);
+               HVM_DELIVER_NO_ERROR_CODE, source);
             return;
         }
     }
@@ -1288,7 +1290,7 @@ void vmx_inject_nmi(void)
         if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) {
             nvmx_enqueue_n2_exceptions (v, 
                INTR_INFO_VALID_MASK | (X86_EVENTTYPE_NMI<<8) | TRAP_nmi,
-               HVM_DELIVER_NO_ERROR_CODE);
+               HVM_DELIVER_NO_ERROR_CODE, hvm_intsrc_nmi);
             return;
         }
     }
@@ -1356,7 +1358,7 @@ static void vmx_inject_trap(struct hvm_trap *trap)
     {
         nvmx_enqueue_n2_exceptions (curr, 
             INTR_INFO_VALID_MASK | (_trap.type<<8) | _trap.vector,
-            _trap.error_code); 
+            _trap.error_code, hvm_intsrc_none);
         return;
     }
     else
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 4792019..4f6f614 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1295,6 +1295,36 @@ static void sync_exception_state(struct vcpu *v)
     }
 }
 
+static void nvmx_update_apicv(struct vcpu *v)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    unsigned long reason = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_REASON);
+    uint32_t intr_info = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_INFO);
+
+    if ( reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
+         nvmx->intr.source == hvm_intsrc_lapic &&
+         (intr_info & INTR_INFO_VALID_MASK) )
+    {
+        uint16_t status;
+        uint32_t rvi, ppr;
+        uint32_t vector = intr_info & 0xff;
+        struct vlapic *vlapic = vcpu_vlapic(v);
+
+        vlapic_ack_pending_irq(v, vector, 1);
+
+        ppr = vlapic_set_ppr(vlapic);
+        WARN_ON( (ppr & 0xf0) != (vector & 0xf0) );
+
+        status = vector << 8;
+        rvi = vlapic_has_pending_irq(v);
+        if ( rvi != -1 )
+            status |= rvi & 0xff;
+
+        __vmwrite(GUEST_INTR_STATUS, status);
+    }
+}
+
 static void virtual_vmexit(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
@@ -1340,6 +1370,9 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
     /* updating host cr0 to sync TS bit */
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
 
+    if ( cpu_has_vmx_virtual_intr_delivery )
+        nvmx_update_apicv(v);
+
     vmreturn(regs, VMSUCCEED);
 }
 
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index 021a5f2..6109137 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -96,7 +96,7 @@ bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic);
 void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
 
 int vlapic_has_pending_irq(struct vcpu *v);
-int vlapic_ack_pending_irq(struct vcpu *v, int vector);
+int vlapic_ack_pending_irq(struct vcpu *v, int vector, int force_ack);
 
 int  vlapic_init(struct vcpu *v);
 void vlapic_destroy(struct vcpu *v);
@@ -108,6 +108,7 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t 
value);
 uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic);
 
 int vlapic_accept_pic_intr(struct vcpu *v);
+uint32_t vlapic_set_ppr(struct vlapic *vlapic);
 
 void vlapic_adjust_i8259_target(struct domain *d);
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index c33b9f9..f4d759b 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -448,7 +448,7 @@ static inline int __vmxon(u64 addr)
 
 void vmx_get_segment_register(struct vcpu *, enum x86_segment,
                               struct segment_register *);
-void vmx_inject_extint(int trap);
+void vmx_inject_extint(int trap, uint8_t source);
 void vmx_inject_nmi(void);
 
 int ept_p2m_init(struct p2m_domain *p2m);
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h 
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 3874525..be2b5c6 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -36,6 +36,7 @@ struct nestedvmx {
     struct {
         unsigned long intr_info;
         u32           error_code;
+        uint8_t       source;
     } intr;
     struct {
         bool_t   enabled;
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.