[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] [IA64] Support dma tracking for swiotlb and xen_dma



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1181684607 21600
# Node ID bab3dd910801edf8763d55e0c8133bc08d1cbcfd
# Parent  114cdf08dfb340c581e84b22d534f4ba197769cd
[IA64] Support dma tracking for swiotlb and xen_dma

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 arch/ia64/xen/swiotlb.c |   26 +++++++++++++++++++-------
 arch/ia64/xen/xen_dma.c |   10 ++++++++--
 2 files changed, 27 insertions(+), 9 deletions(-)

diff -r 114cdf08dfb3 -r bab3dd910801 arch/ia64/xen/swiotlb.c
--- a/arch/ia64/xen/swiotlb.c   Tue Jun 12 15:42:06 2007 -0600
+++ b/arch/ia64/xen/swiotlb.c   Tue Jun 12 15:43:27 2007 -0600
@@ -33,6 +33,8 @@
 #include <linux/bootmem.h>
 
 #ifdef CONFIG_XEN
+#include <xen/gnttab.h>
+#include <asm/gnttab_dma.h>
 /*
  * What DMA mask should Xen use to remap the bounce buffer pool?  Most
  * reports seem to indicate 30 bits is sufficient, except maybe for old
@@ -597,7 +599,7 @@ dma_addr_t
 dma_addr_t
 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
 {
-       unsigned long dev_addr = virt_to_bus(ptr);
+       unsigned long dev_addr = gnttab_dma_map_virt(ptr);
        void *map;
 
        BUG_ON(dir == DMA_NONE);
@@ -610,6 +612,7 @@ swiotlb_map_single(struct device *hwdev,
            !address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
                return dev_addr;
 
+       __gnttab_dma_unmap_page(virt_to_page(ptr));
        /*
         * Oh well, have to allocate and map a bounce buffer.
         */
@@ -672,8 +675,11 @@ swiotlb_unmap_single(struct device *hwde
        BUG_ON(dir == DMA_NONE);
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
                unmap_single(hwdev, dma_addr, size, dir);
-       else if (dir == DMA_FROM_DEVICE)
-               mark_clean(dma_addr, size);
+       else {
+               gnttab_dma_unmap_page(dev_addr);
+               if (dir == DMA_FROM_DEVICE)
+                       mark_clean(dma_addr, size);
+       }
 }
 
 /*
@@ -774,9 +780,11 @@ swiotlb_map_sg(struct device *hwdev, str
 
        for (i = 0; i < nelems; i++, sg++) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
-               dev_addr = virt_to_bus(addr);
+               dev_addr = gnttab_dma_map_virt(addr);
                if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
-                       void *map = map_single(hwdev, addr, sg->length, dir);
+                       void *map;
+                       gnttab_dma_unmap_page(dev_addr);
+                       map = map_single(hwdev, addr, sg->length, dir);
                        sg->dma_address = virt_to_bus(map);
                        if (!map) {
                                /* Don't panic here, we expect map_sg users
@@ -808,8 +816,12 @@ swiotlb_unmap_sg(struct device *hwdev, s
        for (i = 0; i < nelems; i++, sg++)
                if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
                        unmap_single(hwdev, (void *) 
bus_to_virt(sg->dma_address), sg->dma_length, dir);
-               else if (dir == DMA_FROM_DEVICE)
-                       mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+               else {
+                       gnttab_dma_unmap_page(sg->dma_address);
+                       if (dir == DMA_FROM_DEVICE)
+                               mark_clean(SG_ENT_VIRT_ADDRESS(sg),
+                                          sg->dma_length);
+               }
 }
 
 /*
diff -r 114cdf08dfb3 -r bab3dd910801 arch/ia64/xen/xen_dma.c
--- a/arch/ia64/xen/xen_dma.c   Tue Jun 12 15:42:06 2007 -0600
+++ b/arch/ia64/xen/xen_dma.c   Tue Jun 12 15:43:27 2007 -0600
@@ -26,6 +26,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <asm/scatterlist.h>
+#include <xen/gnttab.h>
+#include <asm/gnttab_dma.h>
 
 #define IOMMU_BUG_ON(test)                                     \
 do {                                                           \
@@ -58,7 +60,7 @@ xen_map_sg(struct device *dev, struct sc
        int i;
 
        for (i = 0 ; i < nents ; i++) {
-               sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
+               sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + 
sg[i].offset;
                sg[i].dma_length  = sg[i].length;
 
                IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address));
@@ -72,6 +74,9 @@ xen_unmap_sg(struct device *dev, struct 
 xen_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
             int direction)
 {
+       int i;
+       for (i = 0; i < nents; i++)
+               __gnttab_dma_unmap_page(sg[i].page);
 }
 EXPORT_SYMBOL(xen_unmap_sg);
 
@@ -129,7 +134,7 @@ xen_map_single(struct device *dev, void 
 xen_map_single(struct device *dev, void *ptr, size_t size,
               int direction)
 {
-       dma_addr_t dma_addr = virt_to_bus(ptr);
+       dma_addr_t dma_addr = gnttab_dma_map_virt(ptr);
 
        IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
        IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
@@ -142,5 +147,6 @@ xen_unmap_single(struct device *dev, dma
 xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
                 int direction)
 {
+       gnttab_dma_unmap_page(dma_addr);
 }
 EXPORT_SYMBOL(xen_unmap_single);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.