[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 2/3] VT-d: wrap a _sync version for all VT-d flush interfaces



The dev_invalidate_iotlb() scans ats_devices list to flush ATS devices,
and the invalidate_sync() is put after dev_invalidate_iotlb() to
synchronize with hardware for flush status. If we assign multiple
ATS devices to a domain, the flush status is about all these multiple
ATS devices. Once flush timeout expires, we couldn't find out which
one is the buggy ATS device.

Then, The invalidate_sync() variant (We need to pass down the device's
SBDF to hide the ATS device) is put within dev_invalidate_iotlb() to
synchronize for the flush status one by one. If flush timeout expires,
we could find out the buggy ATS device and hide it. However, for other
VT-d flush interfaces, the invalidate_sync() is still put after at present.
This is inconsistent.

So we wrap a _sync version for all VT-d flush interfaces. It simplifies
caller logic and makes code more readable as well.

Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/extern.h  |  2 ++
 xen/drivers/passthrough/vtd/qinval.c  | 60 +++++++++++++++++++++++++----------
 xen/drivers/passthrough/vtd/x86/ats.c | 12 +++----
 3 files changed, 50 insertions(+), 24 deletions(-)

diff --git a/xen/drivers/passthrough/vtd/extern.h 
b/xen/drivers/passthrough/vtd/extern.h
index d4d37c3..6d3187d 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -61,6 +61,8 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
 
 int qinval_device_iotlb(struct iommu *iommu,
                         u32 max_invs_pend, u16 sid, u16 size, u64 addr);
+int qinval_device_iotlb_sync(struct iommu *iommu, u32 max_invs_pend,
+                             u16 sid, u16 size, u64 addr);
 
 unsigned int get_cache_line_size(void);
 void cacheline_flush(char *);
diff --git a/xen/drivers/passthrough/vtd/qinval.c 
b/xen/drivers/passthrough/vtd/qinval.c
index 52ba2c2..d12661b 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -33,6 +33,10 @@ integer_param("vtd_qi_timeout", vtd_qi_timeout);
 
 #define IOMMU_QI_TIMEOUT (vtd_qi_timeout * MILLISECS(1))
 
+static int __must_check queue_invalidate_wait(struct iommu *iommu,
+    u8 iflag, u8 sw, u8 fn);
+static int invalidate_sync(struct iommu *iommu);
+
 static void print_qi_regs(struct iommu *iommu)
 {
     u64 val;
@@ -102,6 +106,15 @@ static void queue_invalidate_context(struct iommu *iommu,
     unmap_vtd_domain_page(qinval_entries);
 }
 
+static int queue_invalidate_context_sync(struct iommu *iommu,
+    u16 did, u16 source_id, u8 function_mask, u8 granu)
+{
+    queue_invalidate_context(iommu, did, source_id,
+                             function_mask, granu);
+
+    return invalidate_sync(iommu);
+}
+
 static void queue_invalidate_iotlb(struct iommu *iommu,
     u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
 {
@@ -135,6 +148,14 @@ static void queue_invalidate_iotlb(struct iommu *iommu,
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
+static int queue_invalidate_iotlb_sync(struct iommu *iommu,
+    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
+{
+    queue_invalidate_iotlb(iommu, granu, dr, dw, did, am, ih, addr);
+
+    return invalidate_sync(iommu);
+}
+
 static int __must_check queue_invalidate_wait(struct iommu *iommu,
     u8 iflag, u8 sw, u8 fn)
 {
@@ -229,6 +250,14 @@ int qinval_device_iotlb(struct iommu *iommu,
     return 0;
 }
 
+int qinval_device_iotlb_sync(struct iommu *iommu,
+    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
+{
+    qinval_device_iotlb(iommu, max_invs_pend, sid, size, addr);
+
+    return invalidate_sync(iommu);
+}
+
 static void queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 
iidx)
 {
     unsigned long flags;
@@ -256,7 +285,7 @@ static void queue_invalidate_iec(struct iommu *iommu, u8 
granu, u8 im, u16 iidx)
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
-static int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
+static int queue_invalidate_iec_sync(struct iommu *iommu, u8 granu, u8 im, u16 
iidx)
 {
     int ret;
 
@@ -273,12 +302,12 @@ static int __iommu_flush_iec(struct iommu *iommu, u8 
granu, u8 im, u16 iidx)
 
 int iommu_flush_iec_global(struct iommu *iommu)
 {
-    return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
+    return queue_invalidate_iec_sync(iommu, IEC_GLOBAL_INVL, 0, 0);
 }
 
 int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
 {
-   return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
+   return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
 }
 
 static int flush_context_qi(
@@ -304,11 +333,9 @@ static int flush_context_qi(
     }
 
     if ( qi_ctrl->qinval_maddr != 0 )
-    {
-        queue_invalidate_context(iommu, did, sid, fm,
-                                 type >> DMA_CCMD_INVL_GRANU_OFFSET);
-        ret = invalidate_sync(iommu);
-    }
+        ret = queue_invalidate_context_sync(iommu, did, sid, fm,
+                  type >> DMA_CCMD_INVL_GRANU_OFFSET);
+
     return ret;
 }
 
@@ -338,23 +365,24 @@ static int flush_iotlb_qi(
 
     if ( qi_ctrl->qinval_maddr != 0 )
     {
-        int rc;
-
         /* use queued invalidation */
         if (cap_write_drain(iommu->cap))
             dw = 1;
         if (cap_read_drain(iommu->cap))
             dr = 1;
         /* Need to conside the ih bit later */
-        queue_invalidate_iotlb(iommu,
-                               type >> DMA_TLB_FLUSH_GRANU_OFFSET, dr,
-                               dw, did, size_order, 0, addr);
+        ret = queue_invalidate_iotlb_sync(iommu,
+                  type >> DMA_TLB_FLUSH_GRANU_OFFSET, dr, dw, did,
+                  size_order, 0, addr);
+
+        /* TODO: Timeout error handling to be added later */
+        if ( ret )
+            return ret;
+
         if ( flush_dev_iotlb )
             ret = dev_invalidate_iotlb(iommu, did, addr, size_order, type);
-        rc = invalidate_sync(iommu);
-        if ( !ret )
-            ret = rc;
     }
+
     return ret;
 }
 
diff --git a/xen/drivers/passthrough/vtd/x86/ats.c 
b/xen/drivers/passthrough/vtd/x86/ats.c
index 334b9c1..7b1c07b 100644
--- a/xen/drivers/passthrough/vtd/x86/ats.c
+++ b/xen/drivers/passthrough/vtd/x86/ats.c
@@ -118,7 +118,6 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
     {
         u16 sid = PCI_BDF2(pdev->bus, pdev->devfn);
         bool_t sbit;
-        int rc = 0;
 
         /* Only invalidate devices that belong to this IOMMU */
         if ( pdev->iommu != iommu )
@@ -134,8 +133,8 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
             /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
             sbit = 1;
             addr = (~0UL << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
-            rc = qinval_device_iotlb(iommu, pdev->ats_queue_depth,
-                                     sid, sbit, addr);
+            ret = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+                                           sid, sbit, addr);
             break;
         case DMA_TLB_PSI_FLUSH:
             if ( !device_in_domain(iommu, pdev, did) )
@@ -154,16 +153,13 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
                 addr |= (((u64)1 << (size_order - 1)) - 1) << PAGE_SHIFT_4K;
             }
 
-            rc = qinval_device_iotlb(iommu, pdev->ats_queue_depth,
-                                     sid, sbit, addr);
+            ret = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+                                           sid, sbit, addr);
             break;
         default:
             dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
             return -EOPNOTSUPP;
         }
-
-        if ( !ret )
-            ret = rc;
     }
 
     return ret;
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.