[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/AMD-Vi: Fold exit paths of {enable, disable}_iommu()



... to avoid having multiple spin_unlock_irqrestore() calls.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>

Interestingly GCC 6.3 managed to fold disable_iommu() automatically.  There is
some partial folding for enable_iommu() (insofar as there is only a single
call to _spin_unlock_irqrestore emitted), but this delta yeilds

  add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-20 (-20)
  Function                                     old     new   delta
  enable_iommu                                1844    1824     -20
  Total: Before=3340299, After=3340279, chg -0.00%

which means that something wasn't done automatically.

Noticed while investigating the S3 regression.
---
 xen/drivers/passthrough/amd/iommu_init.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_init.c 
b/xen/drivers/passthrough/amd/iommu_init.c
index bb9f33e264..bb5a3e57c9 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -899,11 +899,8 @@ static void enable_iommu(struct amd_iommu *iommu)
 
     spin_lock_irqsave(&iommu->lock, flags);
 
-    if ( iommu->enabled )
-    {
-        spin_unlock_irqrestore(&iommu->lock, flags); 
-        return;
-    }
+    if ( unlikely(iommu->enabled) )
+        goto out;
 
     amd_iommu_erratum_746_workaround(iommu);
 
@@ -957,6 +954,8 @@ static void enable_iommu(struct amd_iommu *iommu)
         amd_iommu_flush_all_caches(iommu);
 
     iommu->enabled = 1;
+
+ out:
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -966,11 +965,8 @@ static void disable_iommu(struct amd_iommu *iommu)
 
     spin_lock_irqsave(&iommu->lock, flags);
 
-    if ( !iommu->enabled )
-    {
-        spin_unlock_irqrestore(&iommu->lock, flags);
-        return;
-    }
+    if ( unlikely(!iommu->enabled) )
+        goto out;
 
     if ( !iommu->ctrl.int_cap_xt_en )
         amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
@@ -988,6 +984,7 @@ static void disable_iommu(struct amd_iommu *iommu)
 
     iommu->enabled = 0;
 
+ out:
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.