[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/pt: skip setup of posted format IRTE when gvec is 0



When testing with an UP guest with a pass-thru device with vt-d pi
enabled in host, we observed that guest couldn't receive interrupts
from that pass-thru device. Dumping IRTE, we found the corresponding
IRTE is set to posted format with "vector" field as 0.

We would fall into this issue when guest used the pirq format of MSI
(see the comment xen_msi_compose_msg() in linux kernel). As 'dest_id'
is repurposed, skip migration which is based on 'dest_id'.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
 xen/drivers/passthrough/io.c | 68 ++++++++++++++++++++++++++++----------------
 1 file changed, 43 insertions(+), 25 deletions(-)

diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 4290c7c..362d4bd 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -413,34 +413,52 @@ int pt_irq_create_bind(
                 pirq_dpci->gmsi.gflags = gflags;
             }
         }
-        /* Calculate dest_vcpu_id for MSI-type pirq migration. */
-        dest = MASK_EXTR(pirq_dpci->gmsi.gflags,
-                         XEN_DOMCTL_VMSI_X86_DEST_ID_MASK);
-        dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK;
-        delivery_mode = MASK_EXTR(pirq_dpci->gmsi.gflags,
-                                  XEN_DOMCTL_VMSI_X86_DELIV_MASK);
-
-        dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
-        pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
-        spin_unlock(&d->event_lock);
-
-        pirq_dpci->gmsi.posted = false;
-        vcpu = (dest_vcpu_id >= 0) ? d->vcpu[dest_vcpu_id] : NULL;
-        if ( iommu_intpost )
+        /*
+         * Migrate pirq and create posted format IRTE only if we know the 
gmsi's
+         * dest_id and vector.
+         */
+        if ( pirq_dpci->gmsi.gvec )
         {
-            if ( delivery_mode == dest_LowestPrio )
-                vcpu = vector_hashing_dest(d, dest, dest_mode,
-                                           pirq_dpci->gmsi.gvec);
-            if ( vcpu )
-                pirq_dpci->gmsi.posted = true;
+            /* Calculate dest_vcpu_id for MSI-type pirq migration. */
+            dest = MASK_EXTR(pirq_dpci->gmsi.gflags,
+                             XEN_DOMCTL_VMSI_X86_DEST_ID_MASK);
+            dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK;
+            delivery_mode = MASK_EXTR(pirq_dpci->gmsi.gflags,
+                                      XEN_DOMCTL_VMSI_X86_DELIV_MASK);
+
+            dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
+            pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
+            spin_unlock(&d->event_lock);
+
+            pirq_dpci->gmsi.posted = false;
+            vcpu = (dest_vcpu_id >= 0) ? d->vcpu[dest_vcpu_id] : NULL;
+            if ( iommu_intpost )
+            {
+                if ( delivery_mode == dest_LowestPrio )
+                    vcpu = vector_hashing_dest(d, dest, dest_mode,
+                                               pirq_dpci->gmsi.gvec);
+                if ( vcpu )
+                    pirq_dpci->gmsi.posted = true;
+            }
+            if ( vcpu && iommu_enabled )
+                hvm_migrate_pirq(pirq_dpci, vcpu);
+
+            /* Use interrupt posting if it is supported. */
+            if ( iommu_intpost )
+                pi_update_irte(vcpu ? &vcpu->arch.hvm.vmx.pi_desc : NULL,
+                               info, pirq_dpci->gmsi.gvec);
         }
-        if ( vcpu && iommu_enabled )
-            hvm_migrate_pirq(pirq_dpci, vcpu);
+        else /* pirq_dpci->gmsi.gvec == 0 */
+        {
+            pirq_dpci->gmsi.dest_vcpu_id = -1;
+            spin_unlock(&d->event_lock);
 
-        /* Use interrupt posting if it is supported. */
-        if ( iommu_intpost )
-            pi_update_irte(vcpu ? &vcpu->arch.hvm.vmx.pi_desc : NULL,
-                           info, pirq_dpci->gmsi.gvec);
+            if ( unlikely(pirq_dpci->gmsi.posted) )
+            {
+                pi_update_irte(NULL, info, 0);
+                pirq_dpci->gmsi.posted = false;
+            }
+        }
 
         if ( pt_irq_bind->u.msi.gflags & XEN_DOMCTL_VMSI_X86_UNMASKED )
         {
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.