[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08/11] xen/swiotlb: use dma_alloc_from_coherent to get device coherent memory



From: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>

Impact: cleanup

This avoids the previous hack of replicating struct dma_coherent_mem.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
---
 drivers/pci/xen-iommu.c |   39 ++++++++-------------------------------
 1 files changed, 8 insertions(+), 31 deletions(-)

diff --git a/drivers/pci/xen-iommu.c b/drivers/pci/xen-iommu.c
index e7a22f1..32a8b49 100644
--- a/drivers/pci/xen-iommu.c
+++ b/drivers/pci/xen-iommu.c
@@ -34,14 +34,6 @@ do {                                                 \
        (unsigned long long)addr + size);               \
 } while (0)
 
-struct dma_coherent_mem {
-       void            *virt_base;
-       u32             device_base;
-       int             size;
-       int             flags;
-       unsigned long   *bitmap;
-};
-
 static inline int address_needs_mapping(struct device *hwdev,
                                                dma_addr_t addr)
 {
@@ -151,7 +143,6 @@ static void *xen_alloc_coherent(struct device *dev, size_t 
size,
                                dma_addr_t *dma_handle, gfp_t gfp)
 {
        void *ret;
-       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
        unsigned int order = get_order(size);
        unsigned long vstart;
        u64 mask;
@@ -159,18 +150,8 @@ static void *xen_alloc_coherent(struct device *dev, size_t 
size,
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
-       if (mem) {
-               int page = bitmap_find_free_region(mem->bitmap, mem->size,
-                                                    order);
-               if (page >= 0) {
-                       *dma_handle = mem->device_base + (page << PAGE_SHIFT);
-                       ret = mem->virt_base + (page << PAGE_SHIFT);
-                       memset(ret, 0, size);
-                       return ret;
-               }
-               if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-                       return NULL;
-       }
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+               return ret;
 
        if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
                gfp |= GFP_DMA;
@@ -196,19 +177,15 @@ static void *xen_alloc_coherent(struct device *dev, 
size_t size,
 }
 
 static void xen_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_addr)
+                             void *vaddr, dma_addr_t dma_addr)
 {
-       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
        int order = get_order(size);
 
-       if (mem && vaddr >= mem->virt_base &&
-           vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-               bitmap_release_region(mem->bitmap, page, order);
-       } else {
-               xen_destroy_contiguous_region((unsigned long)vaddr, order);
-               free_pages((unsigned long)vaddr, order);
-       }
+       if (dma_release_from_coherent(dev, order, vaddr))
+               return;
+
+       xen_destroy_contiguous_region((unsigned long)vaddr, order);
+       free_pages((unsigned long)vaddr, order);
 }
 
 static dma_addr_t xen_swiotlb_map_single(struct device *dev, phys_addr_t paddr,
-- 
1.6.0.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.