[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] Vt-d: queued invalidation cleanup



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1254409914 -3600
# Node ID ee6d57c2d7ca162f98c55e13e714230fee4bf10f
# Parent  601886e960f7f0ddcfb4f4ee707b85991db6da50
Vt-d: queued invalidation cleanup

This patch cleans up queued invalidation, including round wrap
check, multiple polling status and other minor changes. This version
uses local variable as the polling address, which is clean.

Signed-off-by: Zhai Edwin <edwin.zhai@xxxxxxxxx>
xen-unstable changeset:   20203:5eb189704690
xen-unstable date:        Tue Sep 15 09:24:59 2009 +0100
---
 xen/drivers/passthrough/vtd/iommu.c  |    1 
 xen/drivers/passthrough/vtd/iommu.h  |   24 +++++++-----
 xen/drivers/passthrough/vtd/qinval.c |   67 +++++++++++++++++++++--------------
 3 files changed, 55 insertions(+), 37 deletions(-)

diff -r 601886e960f7 -r ee6d57c2d7ca xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Thu Oct 01 16:11:20 2009 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Thu Oct 01 16:11:54 2009 +0100
@@ -87,7 +87,6 @@ static struct intel_iommu *alloc_intel_i
     memset(intel, 0, sizeof(struct intel_iommu));
 
     spin_lock_init(&intel->qi_ctrl.qinval_lock);
-    spin_lock_init(&intel->qi_ctrl.qinval_poll_lock);
     spin_lock_init(&intel->ir_ctrl.iremap_lock);
 
     return intel;
diff -r 601886e960f7 -r ee6d57c2d7ca xen/drivers/passthrough/vtd/iommu.h
--- a/xen/drivers/passthrough/vtd/iommu.h       Thu Oct 01 16:11:20 2009 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.h       Thu Oct 01 16:11:54 2009 +0100
@@ -392,14 +392,20 @@ struct qinval_entry {
     }q;
 };
 
-struct poll_info {
-    u64 saddr;
-    u32 udata;
-};
-
-#define NUM_QINVAL_PAGES 1
-#define IQA_REG_QS       0    // derived from NUM_QINVAL_PAGES per VT-d spec.
-#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct 
qinval_entry))
+/* Order of queue invalidation pages */
+#define IQA_REG_QS       0
+#define NUM_QINVAL_PAGES (1 << IQA_REG_QS)
+
+/* Each entry is 16 byte */
+#define QINVAL_ENTRY_NR  (1 << (IQA_REG_QS + 8))
+
+/* Status data flag */
+#define QINVAL_STAT_INIT  0
+#define QINVAL_STAT_DONE  1
+
+/* Queue invalidation head/tail shift */
+#define QINVAL_INDEX_SHIFT 4
+
 #define qinval_present(v) ((v).lo & 1)
 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
 
@@ -441,8 +447,6 @@ struct qi_ctrl {
     u64 qinval_maddr;  /* queue invalidation page machine address */
     int qinval_index;                    /* queue invalidation index */
     spinlock_t qinval_lock;      /* lock for queue invalidation page */
-    spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
-    volatile u32 qinval_poll_status;     /* used by poll methord to sync */
 };
 
 struct ir_ctrl {
diff -r 601886e960f7 -r ee6d57c2d7ca xen/drivers/passthrough/vtd/qinval.c
--- a/xen/drivers/passthrough/vtd/qinval.c      Thu Oct 01 16:11:20 2009 +0100
+++ b/xen/drivers/passthrough/vtd/qinval.c      Thu Oct 01 16:11:54 2009 +0100
@@ -45,18 +45,29 @@ static void print_qi_regs(struct iommu *
 
 static int qinval_next_index(struct iommu *iommu)
 {
+    u64 tail, head;
+
+    tail = dmar_readq(iommu->reg, DMAR_IQT_REG);
+    tail >>= QINVAL_INDEX_SHIFT;
+
+    head = dmar_readq(iommu->reg, DMAR_IQH_REG);
+    head >>= QINVAL_INDEX_SHIFT;
+
+    /* round wrap check */
+    if ( ( tail + 1 ) % QINVAL_ENTRY_NR == head  )
+        return -1;
+
+    return tail;
+}
+
+static int qinval_update_qtail(struct iommu *iommu, int index)
+{
     u64 val;
-    val = dmar_readq(iommu->reg, DMAR_IQT_REG);
-    return (val >> 4);
-}
-
-static int qinval_update_qtail(struct iommu *iommu, int index)
-{
-    u64 val;
-
-    /* Need an ASSERT to insure that we have got register lock */
-    val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0;
-    dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4));
+
+    /* Need hold register lock when update tail */
+    ASSERT( spin_is_locked(&iommu->register_lock) );
+    val = (index + 1) % QINVAL_ENTRY_NR;
+    dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
     return 0;
 }
 
@@ -146,6 +157,8 @@ int queue_invalidate_iotlb(struct iommu 
     spin_lock_irqsave(&iommu->register_lock, flags);
 
     index = qinval_next_index(iommu);
+    if ( index == -1 )
+        return -EBUSY;
     ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
                             am, ih, addr);
     ret |= qinval_update_qtail(iommu, index);
@@ -180,29 +193,29 @@ static int gen_wait_dsc(struct iommu *io
 }
 
 static int queue_invalidate_wait(struct iommu *iommu,
-    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
-{
-    unsigned long flags;
+    u8 iflag, u8 sw, u8 fn)
+{
     s_time_t start_time;
+    u32 poll_slot = QINVAL_STAT_INIT;
     int index = -1;
     int ret = -1;
-    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
-    spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
-    spin_lock(&iommu->register_lock);
+    unsigned long flags;
+
+    spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
-    if ( *saddr == 1 )
-        *saddr = 0;
-    ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
+    if ( index == -1 )
+        return -EBUSY;
+
+    ret = gen_wait_dsc(iommu, index, iflag, sw, fn, QINVAL_STAT_DONE, 
&poll_slot);
     ret |= qinval_update_qtail(iommu, index);
-    spin_unlock(&iommu->register_lock);
+    spin_unlock_irqrestore(&iommu->register_lock, flags);
 
     /* Now we don't support interrupt method */
     if ( sw )
     {
         /* In case all wait descriptor writes to same addr with same data */
         start_time = NOW();
-        while ( *saddr != 1 )
+        while ( poll_slot != QINVAL_STAT_DONE )
         {
             if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
             {
@@ -212,7 +225,6 @@ static int queue_invalidate_wait(struct 
             cpu_relax();
         }
     }
-    spin_unlock_irqrestore(&qi_ctrl->qinval_poll_lock, flags);
     return ret;
 }
 
@@ -223,8 +235,7 @@ int invalidate_sync(struct iommu *iommu)
 
     if ( qi_ctrl->qinval_maddr != 0 )
     {
-        ret = queue_invalidate_wait(iommu,
-            0, 1, 1, 1, &qi_ctrl->qinval_poll_status);
+        ret = queue_invalidate_wait(iommu, 0, 1, 1);
         return ret;
     }
     return 0;
@@ -269,6 +280,8 @@ int qinval_device_iotlb(struct iommu *io
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
+    if ( index == -1 )
+        return -EBUSY;
     ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
                                 sid, size, addr);
     ret |= qinval_update_qtail(iommu, index);
@@ -311,6 +324,8 @@ int queue_invalidate_iec(struct iommu *i
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
+    if ( index == -1 )
+        return -EBUSY;
     ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
     ret |= qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.