[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] VT-d: get rid of hardcode in iommu_flush_cache_entry



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1259743716 0
# Node ID 04037c99b5f19bfffdda963c37b61a5f564371f0
# Parent  c3373757a5d6d9747379fa2ad575d7b8c12e25b7
VT-d: get rid of hardcode in iommu_flush_cache_entry

Currently iommu_flush_cache_entry uses a fixed size 8 bytes to flush
cache. But it also needs to flush caches with different sizes,
e.g. struct root_entry is 16 bytes. This patch fixes the hardcode by
using a parameter "size" to flush caches with different sizes.

Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/intremap.c |    6 +++---
 xen/drivers/passthrough/vtd/iommu.c    |   24 ++++++++++++------------
 xen/drivers/passthrough/vtd/vtd.h      |    2 +-
 3 files changed, 16 insertions(+), 16 deletions(-)

diff -r c3373757a5d6 -r 04037c99b5f1 xen/drivers/passthrough/vtd/intremap.c
--- a/xen/drivers/passthrough/vtd/intremap.c    Wed Dec 02 08:47:49 2009 +0000
+++ b/xen/drivers/passthrough/vtd/intremap.c    Wed Dec 02 08:48:36 2009 +0000
@@ -155,7 +155,7 @@ static void free_remap_entry(struct iomm
                      iremap_entries, iremap_entry);
 
     memset(iremap_entry, 0, sizeof(struct iremap_entry));
-    iommu_flush_cache_entry(iremap_entry);
+    iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
     iommu_flush_iec_index(iommu, 0, index);
 
     unmap_vtd_domain_page(iremap_entries);
@@ -329,7 +329,7 @@ static int ioapic_rte_to_remap_entry(str
     }
 
     memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
-    iommu_flush_cache_entry(iremap_entry);
+    iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
     iommu_flush_iec_index(iommu, 0, index);
     invalidate_sync(iommu);
 
@@ -635,7 +635,7 @@ static int msi_msg_to_remap_entry(
     remap_rte->data = 0;
 
     memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
-    iommu_flush_cache_entry(iremap_entry);
+    iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
     iommu_flush_iec_index(iommu, 0, index);
     invalidate_sync(iommu);
 
diff -r c3373757a5d6 -r 04037c99b5f1 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Wed Dec 02 08:47:49 2009 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.c       Wed Dec 02 08:48:36 2009 +0000
@@ -117,7 +117,7 @@ struct iommu_flush *iommu_get_flush(stru
 
 static unsigned int clflush_size;
 static int iommus_incoherent;
-static void __iommu_flush_cache(void *addr, int size)
+static void __iommu_flush_cache(void *addr, unsigned int size)
 {
     int i;
 
@@ -128,9 +128,9 @@ static void __iommu_flush_cache(void *ad
         cacheline_flush((char *)addr + i);
 }
 
-void iommu_flush_cache_entry(void *addr)
-{
-    __iommu_flush_cache(addr, 8);
+void iommu_flush_cache_entry(void *addr, unsigned int size)
+{
+    __iommu_flush_cache(addr, size);
 }
 
 void iommu_flush_cache_page(void *addr, unsigned long npages)
@@ -190,7 +190,7 @@ static u64 bus_to_context_maddr(struct i
         }
         set_root_value(*root, maddr);
         set_root_present(*root);
-        iommu_flush_cache_entry(root);
+        iommu_flush_cache_entry(root, sizeof(struct root_entry));
     }
     maddr = (u64) get_context_addr(*root);
     unmap_vtd_domain_page(root_entries);
@@ -249,7 +249,7 @@ static u64 addr_to_dma_page_maddr(struct
              */
             dma_set_pte_readable(*pte);
             dma_set_pte_writable(*pte);
-            iommu_flush_cache_entry(pte);
+            iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
         }
         else
         {
@@ -546,9 +546,9 @@ static void dma_pte_clear_one(struct dom
         return;
     }
 
-    dma_clear_pte(*pte); 
+    dma_clear_pte(*pte);
     spin_unlock(&hd->mapping_lock);
-    iommu_flush_cache_entry(pte);
+    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     /* No need pcidevs_lock here since do that on assign/deassign device*/
     for_each_drhd_unit ( drhd )
@@ -587,7 +587,7 @@ static void iommu_free_pagetable(u64 pt_
             iommu_free_pagetable(dma_pte_addr(*pte), next_level);
 
         dma_clear_pte(*pte);
-        iommu_flush_cache_entry(pte);
+        iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
     }
 
     unmap_vtd_domain_page(pt_vaddr);
@@ -1178,7 +1178,7 @@ static int domain_context_mapping_one(
     context_set_address_width(*context, agaw);
     context_set_fault_enable(*context);
     context_set_present(*context);
-    iommu_flush_cache_entry(context);
+    iommu_flush_cache_entry(context, sizeof(struct context_entry));
     spin_unlock(&iommu->lock);
 
     /* Context entry was previously non-present (with domid 0). */
@@ -1309,7 +1309,7 @@ static int domain_context_unmap_one(
 
     context_clear_present(*context);
     context_clear_entry(*context);
-    iommu_flush_cache_entry(context);
+    iommu_flush_cache_entry(context, sizeof(struct context_entry));
 
     if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
                                     (((u16)bus) << 8) | devfn,
@@ -1485,7 +1485,7 @@ static int intel_iommu_map_page(
     if ( iommu_snoop )
         dma_set_pte_snp(*pte);
 
-    iommu_flush_cache_entry(pte);
+    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
     spin_unlock(&hd->mapping_lock);
     unmap_vtd_domain_page(page);
 
diff -r c3373757a5d6 -r 04037c99b5f1 xen/drivers/passthrough/vtd/vtd.h
--- a/xen/drivers/passthrough/vtd/vtd.h Wed Dec 02 08:47:49 2009 +0000
+++ b/xen/drivers/passthrough/vtd/vtd.h Wed Dec 02 08:48:36 2009 +0000
@@ -105,7 +105,7 @@ void *map_vtd_domain_page(u64 maddr);
 void *map_vtd_domain_page(u64 maddr);
 void unmap_vtd_domain_page(void *va);
 
-void iommu_flush_cache_entry(void *addr);
+void iommu_flush_cache_entry(void *addr, unsigned int size);
 void iommu_flush_cache_page(void *addr, unsigned long npages);
 
 #endif // _VTD_H_

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.