[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] vtd: Do not update-and-flush an IOMMU PTE if it does not change.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1279298513 -3600
# Node ID 07b718833c7416b06a3aee61a0bf27c2e1df1160
# Parent  e382656e4dccab583006352ca131af9da623673a
vtd: Do not update-and-flush an IOMMU PTE if it does not change.

There are cases of identical repeated calls to
iommu_map_page(). Flushing is slow, so it's worth detecting these and
bailing early.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c |   20 +++++++++++++-------
 1 files changed, 13 insertions(+), 7 deletions(-)

diff -r e382656e4dcc -r 07b718833c74 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Jul 16 16:19:51 2010 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Jul 16 17:41:53 2010 +0100
@@ -1591,9 +1591,8 @@ static int intel_iommu_map_page(
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
-    struct dma_pte *page = NULL, *pte = NULL;
+    struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
     u64 pg_maddr;
-    int pte_present;
     int flush_dev_iotlb;
     int iommu_domid;
 
@@ -1611,15 +1610,22 @@ static int intel_iommu_map_page(
     }
     page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
     pte = page + (gfn & LEVEL_MASK);
-    pte_present = dma_pte_present(*pte);
-    dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
-    dma_set_pte_prot(*pte,
+    old = *pte;
+    dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
+    dma_set_pte_prot(new,
                      ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
                      ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
 
     /* Set the SNP on leaf page table if Snoop Control available */
     if ( iommu_snoop )
-        dma_set_pte_snp(*pte);
+        dma_set_pte_snp(new);
+
+    if ( old.val == new.val )
+    {
+        spin_unlock(&hd->mapping_lock);
+        return 0;
+    }
+    *pte = new;
 
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
     spin_unlock(&hd->mapping_lock);
@@ -1642,7 +1648,7 @@ static int intel_iommu_map_page(
             continue;
         if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
                                    (paddr_t)gfn << PAGE_SHIFT_4K, 1,
-                                   !pte_present, flush_dev_iotlb) )
+                                   !dma_pte_present(old), flush_dev_iotlb) )
             iommu_flush_write_buffer(iommu);
     }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.