[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 8/9] xen: swiotlb: Convert mapping routine to rely on physical address



From: Leon Romanovsky <leonro@xxxxxxxxxx>

Switch to .map_phys callback instead of .map_page.

Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx>
---
 drivers/xen/grant-dma-ops.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 29257d2639dbf..7f76e516fe24c 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -163,18 +163,22 @@ static void xen_grant_dma_free_pages(struct device *dev, 
size_t size,
        xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
 }
 
-static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
-                                        unsigned long offset, size_t size,
+static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t phys,
+                                        size_t size,
                                         enum dma_data_direction dir,
                                         unsigned long attrs)
 {
        struct xen_grant_dma_data *data;
+       unsigned long offset = offset_in_page(phys);
        unsigned long dma_offset = xen_offset_in_page(offset),
                        pfn_offset = XEN_PFN_DOWN(offset);
        unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
        grant_ref_t grant;
        dma_addr_t dma_handle;
 
+       if (attrs & DMA_ATTR_MMIO)
+               return DMA_MAPPING_ERROR;
+
        if (WARN_ON(dir == DMA_NONE))
                return DMA_MAPPING_ERROR;
 
@@ -190,7 +194,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device 
*dev, struct page *page,
 
        for (i = 0; i < n_pages; i++) {
                gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
-                               pfn_to_gfn(page_to_xen_pfn(page) + i + 
pfn_offset),
+                               pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) 
+ i + pfn_offset),
                                dir == DMA_TO_DEVICE);
        }
 
@@ -199,7 +203,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device 
*dev, struct page *page,
        return dma_handle;
 }
 
-static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
                                     size_t size, enum dma_data_direction dir,
                                     unsigned long attrs)
 {
@@ -242,7 +246,7 @@ static void xen_grant_dma_unmap_sg(struct device *dev, 
struct scatterlist *sg,
                return;
 
        for_each_sg(sg, s, nents, i)
-               xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), 
dir,
+               xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), 
dir,
                                attrs);
 }
 
@@ -257,7 +261,7 @@ static int xen_grant_dma_map_sg(struct device *dev, struct 
scatterlist *sg,
                return -EINVAL;
 
        for_each_sg(sg, s, nents, i) {
-               s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), 
s->offset,
+               s->dma_address = xen_grant_dma_map_phys(dev, sg_phys(s),
                                s->length, dir, attrs);
                if (s->dma_address == DMA_MAPPING_ERROR)
                        goto out;
@@ -286,8 +290,8 @@ static const struct dma_map_ops xen_grant_dma_ops = {
        .free_pages = xen_grant_dma_free_pages,
        .mmap = dma_common_mmap,
        .get_sgtable = dma_common_get_sgtable,
-       .map_page = xen_grant_dma_map_page,
-       .unmap_page = xen_grant_dma_unmap_page,
+       .map_phys = xen_grant_dma_map_phys,
+       .unmap_phys = xen_grant_dma_unmap_phys,
        .map_sg = xen_grant_dma_map_sg,
        .unmap_sg = xen_grant_dma_unmap_sg,
        .dma_supported = xen_grant_dma_supported,
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.