[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 02/14] swiotlb: remove the alloc_size parameter to swiotlb_tbl_unmap_single



Now that swiotlb remembers the allocation size there is no need to pass
it back to swiotlb_tbl_unmap_single.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 drivers/iommu/dma-iommu.c | 11 +++-------
 drivers/xen/swiotlb-xen.c |  4 ++--
 include/linux/swiotlb.h   |  1 -
 kernel/dma/direct.h       |  2 +-
 kernel/dma/swiotlb.c      | 45 ++++++++++++++++++++-------------------
 5 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9ab6ee22c11088..da2bd8f0885e6e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -493,8 +493,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
dma_addr_t dma_addr,
                unsigned long attrs)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
-       struct iommu_dma_cookie *cookie = domain->iova_cookie;
-       struct iova_domain *iovad = &cookie->iovad;
        phys_addr_t phys;
 
        phys = iommu_iova_to_phys(domain, dma_addr);
@@ -504,8 +502,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, 
dma_addr_t dma_addr,
        __iommu_dma_unmap(dev, dma_addr, size);
 
        if (unlikely(is_swiotlb_buffer(phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size,
-                               iova_align(iovad, size), dir, attrs);
+               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
 
 static bool dev_is_untrusted(struct device *dev)
@@ -580,10 +577,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device 
*dev, phys_addr_t phys,
        }
 
        iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
-       if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
-               swiotlb_tbl_unmap_single(dev, phys, org_size,
-                               aligned_size, dir, attrs);
-
+       if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+               swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
        return iova;
 }
 
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2b385c1b4a99cb..d47f1b311caac0 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -406,7 +406,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, 
struct page *page,
         * Ensure that the address returned is DMA'ble
         */
        if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
-               swiotlb_tbl_unmap_single(dev, map, size, size, dir,
+               swiotlb_tbl_unmap_single(dev, map, size, dir,
                                attrs | DMA_ATTR_SKIP_CPU_SYNC);
                return DMA_MAPPING_ERROR;
        }
@@ -445,7 +445,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, 
dma_addr_t dev_addr,
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(hwdev, dev_addr))
-               swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 }
 
 static void
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5857a937c63722..59f421d041ed9e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -57,7 +57,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 
phys_addr_t phys,
 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
                                     phys_addr_t tlb_addr,
                                     size_t mapping_size,
-                                    size_t alloc_size,
                                     enum dma_data_direction dir,
                                     unsigned long attrs);
 
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index b9861557873768..e1bf721591c0cf 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -114,6 +114,6 @@ static inline void dma_direct_unmap_page(struct device 
*dev, dma_addr_t addr,
                dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 
        if (unlikely(is_swiotlb_buffer(phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
+               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
 #endif /* _KERNEL_DMA_DIRECT_H */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index c10e855a03bc16..03aa614565e417 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -102,7 +102,7 @@ static phys_addr_t *io_tlb_orig_addr;
 /*
  * The mapped buffer's size should be validated during a sync operation.
  */
-static size_t *io_tlb_orig_size;
+static size_t *io_tlb_alloc_size;
 
 /*
  * Protect the above data structures in the map and unmap calls
@@ -253,15 +253,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
                      __func__, alloc_size, PAGE_SIZE);
 
        alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t));
-       io_tlb_orig_size = memblock_alloc(alloc_size, PAGE_SIZE);
-       if (!io_tlb_orig_size)
+       io_tlb_alloc_size = memblock_alloc(alloc_size, PAGE_SIZE);
+       if (!io_tlb_alloc_size)
                panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
                      __func__, alloc_size, PAGE_SIZE);
 
        for (i = 0; i < io_tlb_nslabs; i++) {
                io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-               io_tlb_orig_size[i] = 0;
+               io_tlb_alloc_size[i] = 0;
        }
        io_tlb_index = 0;
        no_iotlb_memory = false;
@@ -393,18 +393,18 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long 
nslabs)
        if (!io_tlb_orig_addr)
                goto cleanup4;
 
-       io_tlb_orig_size = (size_t *)
+       io_tlb_alloc_size = (size_t *)
                __get_free_pages(GFP_KERNEL,
                                 get_order(io_tlb_nslabs *
                                           sizeof(size_t)));
-       if (!io_tlb_orig_size)
+       if (!io_tlb_alloc_size)
                goto cleanup5;
 
 
        for (i = 0; i < io_tlb_nslabs; i++) {
                io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-               io_tlb_orig_size[i] = 0;
+               io_tlb_alloc_size[i] = 0;
        }
        io_tlb_index = 0;
        no_iotlb_memory = false;
@@ -436,7 +436,7 @@ void __init swiotlb_exit(void)
                return;
 
        if (late_alloc) {
-               free_pages((unsigned long)io_tlb_orig_size,
+               free_pages((unsigned long)io_tlb_alloc_size,
                           get_order(io_tlb_nslabs * sizeof(size_t)));
                free_pages((unsigned long)io_tlb_orig_addr,
                           get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
@@ -447,7 +447,7 @@ void __init swiotlb_exit(void)
        } else {
                memblock_free_late(__pa(io_tlb_orig_addr),
                                   PAGE_ALIGN(io_tlb_nslabs * 
sizeof(phys_addr_t)));
-               memblock_free_late(__pa(io_tlb_orig_size),
+               memblock_free_late(__pa(io_tlb_alloc_size),
                                   PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)));
                memblock_free_late(__pa(io_tlb_list),
                                   PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
@@ -639,7 +639,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, 
phys_addr_t orig_addr,
         */
        for (i = 0; i < nr_slots(alloc_size + offset); i++) {
                io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
-               io_tlb_orig_size[index+i] = alloc_size - (i << IO_TLB_SHIFT);
+               io_tlb_alloc_size[index+i] = alloc_size - (i << IO_TLB_SHIFT);
        }
        tlb_addr = slot_addr(io_tlb_start, index) + offset;
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
@@ -648,14 +648,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, 
phys_addr_t orig_addr,
        return tlb_addr;
 }
 
-static void validate_sync_size_and_truncate(struct device *hwdev, size_t 
orig_size, size_t *size)
+static void validate_sync_size_and_truncate(struct device *hwdev, size_t 
alloc_size, size_t *size)
 {
-       if (*size > orig_size) {
+       if (*size > alloc_size) {
                /* Warn and truncate mapping_size */
                dev_WARN_ONCE(hwdev, 1,
                        "Attempt for buffer overflow. Original size: %zu. 
Mapping size: %zu.\n",
-                       orig_size, *size);
-               *size = orig_size;
+                       alloc_size, *size);
+               *size = alloc_size;
        }
 }
 
@@ -663,16 +663,17 @@ static void validate_sync_size_and_truncate(struct device 
*hwdev, size_t orig_si
  * tlb_addr is the physical address of the bounce buffer to unmap.
  */
 void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
-                             size_t mapping_size, size_t alloc_size,
-                             enum dma_data_direction dir, unsigned long attrs)
+                             size_t mapping_size, enum dma_data_direction dir,
+                             unsigned long attrs)
 {
        unsigned long flags;
        unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
-       int i, count, nslots = nr_slots(alloc_size + offset);
        int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
        phys_addr_t orig_addr = io_tlb_orig_addr[index];
+       size_t alloc_size = io_tlb_alloc_size[index];
+       int i, count, nslots = nr_slots(alloc_size + offset);
 
-       validate_sync_size_and_truncate(hwdev, io_tlb_orig_size[index], 
&mapping_size);
+       validate_sync_size_and_truncate(hwdev, alloc_size, &mapping_size);
 
        /*
         * First, sync the memory before unmapping the entry
@@ -701,7 +702,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, 
phys_addr_t tlb_addr,
        for (i = index + nslots - 1; i >= index; i--) {
                io_tlb_list[i] = ++count;
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-               io_tlb_orig_size[i] = 0;
+               io_tlb_alloc_size[i] = 0;
        }
 
        /*
@@ -721,13 +722,13 @@ void swiotlb_tbl_sync_single(struct device *hwdev, 
phys_addr_t tlb_addr,
                             enum dma_sync_target target)
 {
        int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       size_t orig_size = io_tlb_orig_size[index];
+       size_t alloc_size = io_tlb_alloc_size[index];
        phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
        if (orig_addr == INVALID_PHYS_ADDR)
                return;
 
-       validate_sync_size_and_truncate(hwdev, orig_size, &size);
+       validate_sync_size_and_truncate(hwdev, alloc_size, &size);
 
        switch (target) {
        case SYNC_FOR_CPU:
@@ -770,7 +771,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t 
paddr, size_t size,
        /* Ensure that the address returned is DMA'ble */
        dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
        if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
-               swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
+               swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
                        attrs | DMA_ATTR_SKIP_CPU_SYNC);
                dev_WARN_ONCE(dev, 1,
                        "swiotlb addr %pad+%zu overflow (mask %llx, bus limit 
%llx).\n",
-- 
2.29.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.