[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2] dma-mapping: use vmalloc_to_page for vmalloc addresses



From: Roman Skakun <Roman_Skakun@xxxxxxxx>

This commit is dedicated to fix incorrect conversion from
cpu_addr to page address in cases when we get virtual
address which allocated in the vmalloc range.
As the result, virt_to_page() cannot convert this address
properly and return incorrect page address.

Need to detect such cases and obtains the page address using
vmalloc_to_page() instead.

Signed-off-by: Roman Skakun <roman_skakun@xxxxxxxx>
Reviewed-by: Andrii Anisov <andrii_anisov@xxxxxxxx>
---
Hi, Christoph!
It's updated patch in accordance with your and Stefano 
suggestions. 

 drivers/xen/swiotlb-xen.c   |  7 +------
 include/linux/dma-map-ops.h |  2 ++
 kernel/dma/ops_helpers.c    | 16 ++++++++++++++--
 3 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 92ee6eea30cd..c2f612a10a95 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -337,7 +337,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
        int order = get_order(size);
        phys_addr_t phys;
        u64 dma_mask = DMA_BIT_MASK(32);
-       struct page *page;
+       struct page *page = cpu_addr_to_page(vaddr);
 
        if (hwdev && hwdev->coherent_dma_mask)
                dma_mask = hwdev->coherent_dma_mask;
@@ -349,11 +349,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
        /* Convert the size to actually allocated. */
        size = 1UL << (order + XEN_PAGE_SHIFT);
 
-       if (is_vmalloc_addr(vaddr))
-               page = vmalloc_to_page(vaddr);
-       else
-               page = virt_to_page(vaddr);
-
        if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
                     range_straddles_page_boundary(phys, size)) &&
            TestClearPageXenRemapped(page))
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index a5f89fc4d6df..ce0edb0bb603 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -226,6 +226,8 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t 
size,
                bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
 bool dma_free_from_pool(struct device *dev, void *start, size_t size);
 
+struct page *cpu_addr_to_page(void *cpu_addr);
+
 #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
 #include <asm/dma-coherence.h>
 #elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index 910ae69cae77..472e861750d3 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -5,6 +5,17 @@
  */
 #include <linux/dma-map-ops.h>
 
+/*
+ * This helper converts virtual address to page address.
+ */
+struct page *cpu_addr_to_page(void *cpu_addr)
+{
+       if (is_vmalloc_addr(cpu_addr))
+               return vmalloc_to_page(cpu_addr);
+       else
+               return virt_to_page(cpu_addr);
+}
+
 /*
  * Create scatter-list for the already allocated DMA buffer.
  */
@@ -12,7 +23,7 @@ int dma_common_get_sgtable(struct device *dev, struct 
sg_table *sgt,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                 unsigned long attrs)
 {
-       struct page *page = virt_to_page(cpu_addr);
+       struct page *page = cpu_addr_to_page(cpu_addr);
        int ret;
 
        ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +43,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct 
*vma,
        unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
+       struct page *page = cpu_addr_to_page(cpu_addr);
        int ret = -ENXIO;
 
        vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +55,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct 
*vma,
                return -ENXIO;
 
        return remap_pfn_range(vma, vma->vm_start,
-                       page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+                       page_to_pfn(page) + vma->vm_pgoff,
                        user_count << PAGE_SHIFT, vma->vm_page_prot);
 #else
        return -ENXIO;
-- 
2.27.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.