[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 1/2] Xen: Fix live migration while enabling APICV



SVI should be restored in case guest is processing virtual interrupt
while saveing a domain state. Otherwise SVI would be missed when
virtual interrupt delivery is enabled.

Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>

diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index ee2294c..38ff216 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1198,6 +1198,9 @@ static int lapic_load_regs(struct domain *d, 
hvm_domain_context_t *h)
     if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) 
         return -EINVAL;
 
+    if ( hvm_funcs.process_isr )
+        hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
+
     vlapic_adjust_i8259_target(d);
     lapic_rearm(s);
     return 0;
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index c5c503e..c961782 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -290,8 +290,8 @@ void vmx_intr_assist(void)
             vmx_set_eoi_exit_bitmap(v, pt_vector);
 
         /* we need update the RVI field */
-        status &= ~(unsigned long)0x0FF;
-        status |= (unsigned long)0x0FF & 
+        status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+        status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK & 
                     intack.vector;
         __vmwrite(GUEST_INTR_STATUS, status);
         if (v->arch.hvm_vmx.eoi_exitmap_changed) {
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 4d7c93f..362273b 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1419,6 +1419,29 @@ static int vmx_virtual_intr_delivery_enabled(void)
     return cpu_has_vmx_virtual_intr_delivery;
 }
 
+static void vmx_process_isr(int isr, struct vcpu *v)
+{
+    unsigned long status;
+    u8 old;
+
+    if ( !cpu_has_vmx_virtual_intr_delivery )
+        return;
+
+    if ( isr < 0 )
+        isr = 0;
+
+    vmx_vmcs_enter(v);
+    status = __vmread(GUEST_INTR_STATUS);
+    old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+    if ( isr != old )
+    {
+        status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+        status |= isr << VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+        __vmwrite(GUEST_INTR_STATUS, status);
+    }
+    vmx_vmcs_exit(v);
+}
+
 static struct hvm_function_table __read_mostly vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -1468,6 +1491,7 @@ static struct hvm_function_table __read_mostly 
vmx_function_table = {
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
     .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
     .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
+    .process_isr          = vmx_process_isr,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
 };
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 76e9cc8..011a6a3 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -183,6 +183,7 @@ struct hvm_function_table {
     /* Virtual interrupt delivery */
     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
     int (*virtual_intr_delivery_enabled)(void);
+    void (*process_isr)(int isr, struct vcpu *v);
 
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 51df81e..d4958c3 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -257,6 +257,10 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info;
  */
 #define VMX_BASIC_DEFAULT1_ZERO                (1ULL << 55)
 
+/* Guest interrupt status */
+#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF
+#define VMX_GUEST_INTR_STATUS_SVI_OFFSET        8
+
 /* VMCS field encodings. */
 enum vmcs_field {
     VIRTUAL_PROCESSOR_ID            = 0x00000000,
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.