[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH]Fix pirq conflict issue when guest adopts per-cpu vector.



Xen: iommu: Fix pirq conflict issue when guest adopts per-cpu vector.
 
Latest Linux and Windows may adopt per-cpu vector instead of global 
vector, so same vector in different vcpu may correspond to different
interrupt sources. That is to say, vector and pirq should be 1:n mapping, 
and the array msi_gvec_pirq can't meet the mapping requirement, so need 
to improve the related logic, otherwise it may introduce strange issues.
 
Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
 
diff -r 72d130772f36 xen/arch/x86/hvm/vmsi.c
--- a/xen/arch/x86/hvm/vmsi.c Wed Sep 16 09:30:41 2009 +0100
+++ b/xen/arch/x86/hvm/vmsi.c Thu Sep 17 21:04:22 2009 +0800
@@ -64,15 +64,6 @@ static void vmsi_inj_irq(
     }
 }
 
-#define VMSI_RH_MASK      0x100
-#define VMSI_DM_MASK      0x200
-#define VMSI_DELIV_MASK   0x7000
-#define VMSI_TRIG_MODE    0x8000
-
-#define GFLAGS_SHIFT_RH             8
-#define GLFAGS_SHIFT_DELIV_MODE     12
-#define GLFAGS_SHIFT_TRG_MODE       15
-
 int vmsi_deliver(struct domain *d, int pirq)
 {
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
diff -r 72d130772f36 xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c Wed Sep 16 09:30:41 2009 +0100
+++ b/xen/drivers/passthrough/io.c Thu Sep 17 21:12:29 2009 +0800
@@ -161,7 +161,6 @@ int pt_irq_create_bind_vtd(
                                              HVM_IRQ_DPCI_GUEST_MSI;
             hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
             hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
-            hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
             /* bind after hvm_irq_dpci is setup to avoid race with irq 
handler*/
             rc = pirq_guest_bind(d->vcpu[0], pirq, 0);
             if ( rc == 0 && pt_irq_bind->u.msi.gtable )
@@ -172,7 +171,6 @@ int pt_irq_create_bind_vtd(
             }
             if ( unlikely(rc) )
             {
-                hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = 0;
                 hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
                 hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
                 hvm_irq_dpci->mirq[pirq].flags = 0;
@@ -194,10 +192,8 @@ int pt_irq_create_bind_vtd(
  
             /* if pirq is already mapped as vmsi, update the guest data/addr */
             old_gvec = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-            hvm_irq_dpci->msi_gvec_pirq[old_gvec] = 0;
             hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
             hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
-            hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
         }
     }
     else
@@ -405,17 +401,27 @@ static void __msi_pirq_eoi(struct domain
 
 void hvm_dpci_msi_eoi(struct domain *d, int vector)
 {
+    int pirq, dest, dest_mode;
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
-    int pirq;
 
     if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
        return;
-
+    
     spin_lock(&d->event_lock);
-
-    pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
-    __msi_pirq_eoi(d, pirq);
-
+    for ( pirq = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
+          pirq < d->nr_pirqs;
+          pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) ) 
{
+        if ( (!(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI)) ||
+                (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector) )
+            continue;
+
+        dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+        dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
+        if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) 
)
+            break;
+    }
+    if ( pirq < d->nr_pirqs )
+        __msi_pirq_eoi(d, pirq);
     spin_unlock(&d->event_lock);
 }
 
diff -r 72d130772f36 xen/include/asm-x86/hvm/vlapic.h
--- a/xen/include/asm-x86/hvm/vlapic.h Wed Sep 16 09:30:41 2009 +0100
+++ b/xen/include/asm-x86/hvm/vlapic.h Thu Sep 17 21:04:22 2009 +0800
@@ -51,6 +51,16 @@
 
 #define vlapic_base_address(vlapic)                             \
     ((vlapic)->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
+
+#define VMSI_DEST_ID_MASK 0xff
+#define VMSI_RH_MASK      0x100
+#define VMSI_DM_MASK      0x200
+#define VMSI_DELIV_MASK   0x7000
+#define VMSI_TRIG_MODE    0x8000
+
+#define GFLAGS_SHIFT_RH             8
+#define GLFAGS_SHIFT_DELIV_MODE     12
+#define GLFAGS_SHIFT_TRG_MODE       15
 
 struct vlapic {
     struct hvm_hw_lapic      hw;
diff -r 72d130772f36 xen/include/xen/hvm/irq.h
--- a/xen/include/xen/hvm/irq.h Wed Sep 16 09:30:41 2009 +0100
+++ b/xen/include/xen/hvm/irq.h Thu Sep 17 21:04:22 2009 +0800
@@ -83,7 +83,6 @@ struct hvm_irq_dpci {
     unsigned long *dirq_mask;
     /* Guest IRQ to guest device/intx mapping. */
     struct list_head girq[NR_HVM_IRQS];
-    uint8_t msi_gvec_pirq[0x100];
     /* Record of mapped ISA IRQs */
     DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
     /* Record of mapped Links */

Attachment: guest_per_cpu_vector_fix.patch
Description: guest_per_cpu_vector_fix.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.