[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] AMD/IOMMU: re-work locking around sending of commands



commit f95b7b37cfc6d4613721df9357090d14712013c0
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Jun 29 12:35:12 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Jun 29 12:35:12 2021 +0200

    AMD/IOMMU: re-work locking around sending of commands
    
    It appears unhelpful to me for flush_command_buffer() to block all
    progress elsewhere for the given IOMMU by holding its lock while waiting
    for command completion. There's no real need for callers of that
    function or of send_iommu_command() to hold the lock. Contain all
    command sending related locking to the latter function.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_cmd.c     | 23 +++++------------------
 xen/drivers/passthrough/amd/iommu_guest.c   |  3 ++-
 xen/drivers/passthrough/amd/iommu_init.c    | 15 +++++++--------
 xen/drivers/passthrough/amd/iommu_intr.c    |  6 ------
 xen/drivers/passthrough/amd/pci_amd_iommu.c | 15 ++++++++++-----
 5 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_cmd.c 
b/xen/drivers/passthrough/amd/iommu_cmd.c
index e8541276ca..4db8e1651f 100644
--- a/xen/drivers/passthrough/amd/iommu_cmd.c
+++ b/xen/drivers/passthrough/amd/iommu_cmd.c
@@ -27,6 +27,9 @@ static void send_iommu_command(struct amd_iommu *iommu,
                                const uint32_t cmd[4])
 {
     uint32_t tail;
+    unsigned long flags;
+
+    spin_lock_irqsave(&iommu->lock, flags);
 
     tail = iommu->cmd_buffer.tail + sizeof(cmd_entry_t);
     if ( tail == iommu->cmd_buffer.size )
@@ -47,6 +50,8 @@ static void send_iommu_command(struct amd_iommu *iommu,
     iommu->cmd_buffer.tail = tail;
 
     writel(tail, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
 static void flush_command_buffer(struct amd_iommu *iommu,
@@ -273,7 +278,6 @@ static void invalidate_iommu_all(struct amd_iommu *iommu)
 void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
                            daddr_t daddr, unsigned int order)
 {
-    unsigned long flags;
     struct amd_iommu *iommu;
     unsigned int req_id, queueid, maxpend;
 
@@ -300,10 +304,8 @@ void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev 
*pdev,
     maxpend = pdev->ats.queue_depth & 0xff;
 
     /* send INVALIDATE_IOTLB_PAGES command */
-    spin_lock_irqsave(&iommu->lock, flags);
     invalidate_iotlb_pages(iommu, maxpend, 0, queueid, daddr, req_id, order);
     flush_command_buffer(iommu, iommu_dev_iotlb_timeout);
-    spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
 static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr,
@@ -330,17 +332,14 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d, 
daddr_t daddr,
 static void _amd_iommu_flush_pages(struct domain *d,
                                    daddr_t daddr, unsigned int order)
 {
-    unsigned long flags;
     struct amd_iommu *iommu;
     unsigned int dom_id = d->domain_id;
 
     /* send INVALIDATE_IOMMU_PAGES command */
     for_each_amd_iommu ( iommu )
     {
-        spin_lock_irqsave(&iommu->lock, flags);
         invalidate_iommu_pages(iommu, daddr, dom_id, order);
         flush_command_buffer(iommu, 0);
-        spin_unlock_irqrestore(&iommu->lock, flags);
     }
 
     if ( ats_enabled )
@@ -360,37 +359,25 @@ void amd_iommu_flush_pages(struct domain *d,
 
 void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf)
 {
-    ASSERT( spin_is_locked(&iommu->lock) );
-
     invalidate_dev_table_entry(iommu, bdf);
     flush_command_buffer(iommu, 0);
 }
 
 void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf)
 {
-    ASSERT( spin_is_locked(&iommu->lock) );
-
     invalidate_interrupt_table(iommu, bdf);
     flush_command_buffer(iommu, 0);
 }
 
 void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
 {
-    ASSERT( spin_is_locked(&iommu->lock) );
-
     invalidate_iommu_all(iommu);
     flush_command_buffer(iommu, 0);
 }
 
 void amd_iommu_send_guest_cmd(struct amd_iommu *iommu, u32 cmd[])
 {
-    unsigned long flags;
-
-    spin_lock_irqsave(&iommu->lock, flags);
-
     send_iommu_command(iommu, cmd);
     /* TBD: Timeout selection may require peeking into cmd[]. */
     flush_command_buffer(iommu, 0);
-
-    spin_unlock_irqrestore(&iommu->lock, flags);
 }
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c 
b/xen/drivers/passthrough/amd/iommu_guest.c
index 00c5ccd7b5..85828490ff 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -449,9 +449,10 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t 
*cmd)
     spin_lock_irqsave(&iommu->lock, flags);
     dte_set_gcr3_table(mdte, hdom_id, gcr3_mfn << PAGE_SHIFT, gv, glx);
 
-    amd_iommu_flush_device(iommu, req_id);
     spin_unlock_irqrestore(&iommu->lock, flags);
 
+    amd_iommu_flush_device(iommu, req_id);
+
     return 0;
 }
 
diff --git a/xen/drivers/passthrough/amd/iommu_init.c 
b/xen/drivers/passthrough/amd/iommu_init.c
index bb52c181f8..312f335c1d 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -871,7 +871,10 @@ static void enable_iommu(struct amd_iommu *iommu)
     spin_lock_irqsave(&iommu->lock, flags);
 
     if ( unlikely(iommu->enabled) )
-        goto out;
+    {
+        spin_unlock_irqrestore(&iommu->lock, flags);
+        return;
+    }
 
     amd_iommu_erratum_746_workaround(iommu);
 
@@ -921,13 +924,12 @@ static void enable_iommu(struct amd_iommu *iommu)
 
     set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
 
-    if ( iommu->features.flds.ia_sup )
-        amd_iommu_flush_all_caches(iommu);
-
     iommu->enabled = 1;
 
- out:
     spin_unlock_irqrestore(&iommu->lock, flags);
+
+    if ( iommu->features.flds.ia_sup )
+        amd_iommu_flush_all_caches(iommu);
 }
 
 static void disable_iommu(struct amd_iommu *iommu)
@@ -1544,7 +1546,6 @@ static int _invalidate_all_devices(
 {
     unsigned int bdf; 
     u16 req_id;
-    unsigned long flags;
     struct amd_iommu *iommu;
 
     for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
@@ -1553,10 +1554,8 @@ static int _invalidate_all_devices(
         req_id = ivrs_mappings[bdf].dte_requestor_id;
         if ( iommu )
         {
-            spin_lock_irqsave(&iommu->lock, flags);
             amd_iommu_flush_device(iommu, req_id);
             amd_iommu_flush_intremap(iommu, req_id);
-            spin_unlock_irqrestore(&iommu->lock, flags);
         }
     }
 
diff --git a/xen/drivers/passthrough/amd/iommu_intr.c 
b/xen/drivers/passthrough/amd/iommu_intr.c
index d78bb209bc..bf00370574 100644
--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -310,9 +310,7 @@ static int update_intremap_entry_from_ioapic(
         entry.ptr32->flds.remap_en = false;
         spin_unlock(lock);
 
-        spin_lock(&iommu->lock);
         amd_iommu_flush_intremap(iommu, req_id);
-        spin_unlock(&iommu->lock);
 
         spin_lock(lock);
     }
@@ -527,11 +525,9 @@ static int update_intremap_entry_from_msi_msg(
 
         if ( iommu->enabled )
         {
-            spin_lock_irqsave(&iommu->lock, flags);
             amd_iommu_flush_intremap(iommu, req_id);
             if ( alias_id != req_id )
                 amd_iommu_flush_intremap(iommu, alias_id);
-            spin_unlock_irqrestore(&iommu->lock, flags);
         }
 
         return 0;
@@ -567,11 +563,9 @@ static int update_intremap_entry_from_msi_msg(
         entry.ptr32->flds.remap_en = false;
         spin_unlock(lock);
 
-        spin_lock(&iommu->lock);
         amd_iommu_flush_intremap(iommu, req_id);
         if ( alias_id != req_id )
             amd_iommu_flush_intremap(iommu, alias_id);
-        spin_unlock(&iommu->lock);
 
         spin_lock(lock);
     }
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 085fe2f577..2dce505b97 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -129,6 +129,8 @@ static void amd_iommu_setup_domain_device(
              iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
             dte->i = ats_enabled;
 
+        spin_unlock_irqrestore(&iommu->lock, flags);
+
         amd_iommu_flush_device(iommu, req_id);
 
         AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %#x, "
@@ -138,8 +140,8 @@ static void amd_iommu_setup_domain_device(
                         page_to_maddr(hd->arch.amd.root_table),
                         domain->domain_id, hd->arch.amd.paging_mode);
     }
-
-    spin_unlock_irqrestore(&iommu->lock, flags);
+    else
+        spin_unlock_irqrestore(&iommu->lock, flags);
 
     ASSERT(pcidevs_locked());
 
@@ -307,6 +309,8 @@ static void amd_iommu_disable_domain_device(const struct 
domain *domain,
         smp_wmb();
         dte->v = true;
 
+        spin_unlock_irqrestore(&iommu->lock, flags);
+
         amd_iommu_flush_device(iommu, req_id);
 
         AMD_IOMMU_DEBUG("Disable: device id = %#x, "
@@ -314,7 +318,8 @@ static void amd_iommu_disable_domain_device(const struct 
domain *domain,
                         req_id,  domain->domain_id,
                         dom_iommu(domain)->arch.amd.paging_mode);
     }
-    spin_unlock_irqrestore(&iommu->lock, flags);
+    else
+        spin_unlock_irqrestore(&iommu->lock, flags);
 
     ASSERT(pcidevs_locked());
 
@@ -455,9 +460,9 @@ static int amd_iommu_add_device(u8 devfn, struct pci_dev 
*pdev)
             iommu->dev_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE),
             ivrs_mappings[bdf].intremap_table, iommu, iommu_intremap);
 
-        amd_iommu_flush_device(iommu, bdf);
-
         spin_unlock_irqrestore(&iommu->lock, flags);
+
+        amd_iommu_flush_device(iommu, bdf);
     }
 
     amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.