[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] iommu: Introduce per cpu flag (iommu_dont_flush_iotlb) to avoid unnecessary iotlb flush



# HG changeset patch
# User Jean Guyader <jean.guyader@xxxxxxxxxxxxx>
# Date 1321623806 0
# Node ID fe3e9d0c123cc3184b6e29531e326927a0e4938d
# Parent  89a4d97731c58db2790f63dd687edc537e7ce2b8
iommu: Introduce per cpu flag (iommu_dont_flush_iotlb) to avoid unnecessary 
iotlb flush

Add cpu flag that will be checked by the iommu low level code
to skip iotlb flushes. iommu_iotlb_flush shall be called explicitly.

Signed-off-by: Jean Guyader <jean.guyader@xxxxxxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 89a4d97731c5 -r fe3e9d0c123c xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Nov 18 13:42:46 2011 +0000
+++ b/xen/arch/x86/mm.c Fri Nov 18 13:43:26 2011 +0000
@@ -4794,10 +4794,15 @@
 static int xenmem_add_to_physmap(struct domain *d,
                                  struct xen_add_to_physmap *xatp)
 {
+    struct xen_add_to_physmap start_xatp;
     int rc = 0;
 
     if ( xatp->space == XENMAPSPACE_gmfn_range )
     {
+        if ( need_iommu(d) )
+            this_cpu(iommu_dont_flush_iotlb) = 1;
+
+        start_xatp = *xatp;
         while ( xatp->size > 0 )
         {
             rc = xenmem_add_to_physmap_once(d, xatp);
@@ -4816,6 +4821,13 @@
             }
         }
 
+        if ( need_iommu(d) )
+        {
+            this_cpu(iommu_dont_flush_iotlb) = 0;
+            iommu_iotlb_flush(d, start_xatp.idx, start_xatp.size - xatp->size);
+            iommu_iotlb_flush(d, start_xatp.gpfn, start_xatp.size - 
xatp->size);
+        }
+
         return rc;
     }
 
diff -r 89a4d97731c5 -r fe3e9d0c123c xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri Nov 18 13:42:46 2011 +0000
+++ b/xen/drivers/passthrough/iommu.c   Fri Nov 18 13:43:26 2011 +0000
@@ -52,6 +52,8 @@
 bool_t __read_mostly iommu_debug;
 bool_t __read_mostly amd_iommu_perdev_intremap;
 
+DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
+
 static void __init parse_iommu_param(char *s)
 {
     char *ss;
@@ -227,6 +229,7 @@
 
     spin_lock(&d->page_alloc_lock);
 
+    this_cpu(iommu_dont_flush_iotlb) = 1;
     page_list_for_each ( page, &d->page_list )
     {
         if ( is_hvm_domain(d) ||
@@ -244,6 +247,8 @@
             }
         }
     }
+    this_cpu(iommu_dont_flush_iotlb) = 0;
+    iommu_iotlb_flush_all(d);
     spin_unlock(&d->page_alloc_lock);
     return 0;
 }
diff -r 89a4d97731c5 -r fe3e9d0c123c xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Nov 18 13:42:46 2011 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Nov 18 13:43:26 2011 +0000
@@ -663,7 +663,8 @@
     spin_unlock(&hd->mapping_lock);
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
-    __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1);
+    if ( !this_cpu(iommu_dont_flush_iotlb) )
+        __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1);
 
     unmap_vtd_domain_page(page);
 
@@ -1760,7 +1761,8 @@
     spin_unlock(&hd->mapping_lock);
     unmap_vtd_domain_page(page);
 
-    __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
+    if ( !this_cpu(iommu_dont_flush_iotlb) )
+        __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
 
     return 0;
 }
diff -r 89a4d97731c5 -r fe3e9d0c123c xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Fri Nov 18 13:42:46 2011 +0000
+++ b/xen/include/xen/iommu.h   Fri Nov 18 13:43:26 2011 +0000
@@ -164,4 +164,16 @@
 void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int 
page_count);
 void iommu_iotlb_flush_all(struct domain *d);
 
+/*
+ * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
+ * avoid unecessary iotlb_flush in the low level IOMMU code.
+ *
+ * iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes
+ * this operation can be really expensive. This flag will be set by the
+ * caller to notify the low level IOMMU code to avoid the iotlb flushes.
+ * iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by
+ * the caller.
+ */
+DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
+
 #endif /* _IOMMU_H_ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.