[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 03/11] xen/arm: simplify dma_cache_maint



On Mon, 26 Aug 2019, Christoph Hellwig wrote:
> Calculate the required operation in the caller, and pass it directly
> instead of recalculating it for each page, and use simple arithmetics
> to get from the physical address to Xen page size aligned chunks.
> 
> Signed-off-by: Christoph Hellwig <hch@xxxxxx>
> ---
>  arch/arm/xen/mm.c | 62 +++++++++++++++++------------------------------
>  1 file changed, 22 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
> index 90574d89d0d4..14210ebdea1a 100644
> --- a/arch/arm/xen/mm.c
> +++ b/arch/arm/xen/mm.c
> @@ -35,64 +35,46 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int 
> order)
>       return __get_free_pages(flags, order);
>  }
>  
> -enum dma_cache_op {
> -       DMA_UNMAP,
> -       DMA_MAP,
> -};
>  static bool hypercall_cflush = false;
>  
> -/* functions called by SWIOTLB */
> -
> -static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
> -     size_t size, enum dma_data_direction dir, enum dma_cache_op op)
> +/* buffers in highmem or foreign pages cannot cross page boundaries */
> +static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
>  {
>       struct gnttab_cache_flush cflush;
> -     unsigned long xen_pfn;
> -     size_t left = size;
>  
> -     xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
> -     offset %= XEN_PAGE_SIZE;
> +     cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;
> +     cflush.offset = xen_offset_in_page(handle);
> +     cflush.op = op;
>  
>       do {
> -             size_t len = left;
> -     
> -             /* buffers in highmem or foreign pages cannot cross page
> -              * boundaries */
> -             if (len + offset > XEN_PAGE_SIZE)
> -                     len = XEN_PAGE_SIZE - offset;
> -
> -             cflush.op = 0;
> -             cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
> -             cflush.offset = offset;
> -             cflush.length = len;
> -
> -             if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
> -                     cflush.op = GNTTAB_CACHE_INVAL;
> -             if (op == DMA_MAP) {
> -                     if (dir == DMA_FROM_DEVICE)
> -                             cflush.op = GNTTAB_CACHE_INVAL;
> -                     else
> -                             cflush.op = GNTTAB_CACHE_CLEAN;
> -             }
> -             if (cflush.op)
> -                     HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, 
> &cflush, 1);
> +             if (size + cflush.offset > XEN_PAGE_SIZE)
> +                     cflush.length = XEN_PAGE_SIZE - cflush.offset;
> +             else
> +                     cflush.length = size;

isn't it missing a:

  cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;

here?


> +             HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
> +
> +             handle += cflush.length;
> +             size -= cflush.length;
>  
> -             offset = 0;
> -             xen_pfn++;
> -             left -= len;
> -     } while (left);
> +             cflush.offset = 0;
> +     } while (size);
>  }
>  
>  static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t 
> handle,
>               size_t size, enum dma_data_direction dir)
>  {
> -     dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, 
> DMA_UNMAP);
> +     if (dir != DMA_TO_DEVICE)
> +             dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
>  }
>  
>  static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t 
> handle,
>               size_t size, enum dma_data_direction dir)
>  {
> -     dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, 
> DMA_MAP);
> +     if (dir == DMA_FROM_DEVICE)
> +             dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
> +     else
> +             dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN);
>  }
>  
>  void __xen_dma_map_page(struct device *hwdev, struct page *page,
> -- 
> 2.20.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.