[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance



On Thu, 5 Sep 2019, Christoph Hellwig wrote:
> Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA
> on-coherent devices.
> 
> Signed-off-by: Christoph Hellwig <hch@xxxxxx>

This is much better and much more readable.

Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>

> ---
>  arch/arm/include/asm/device.h            |  3 -
>  arch/arm/include/asm/xen/page-coherent.h | 72 +++++++++---------------
>  arch/arm/mm/dma-mapping.c                |  8 +--
>  drivers/xen/swiotlb-xen.c                | 20 -------
>  4 files changed, 28 insertions(+), 75 deletions(-)
> 
> diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
> index f6955b55c544..c675bc0d5aa8 100644
> --- a/arch/arm/include/asm/device.h
> +++ b/arch/arm/include/asm/device.h
> @@ -14,9 +14,6 @@ struct dev_archdata {
>  #endif
>  #ifdef CONFIG_ARM_DMA_USE_IOMMU
>       struct dma_iommu_mapping        *mapping;
> -#endif
> -#ifdef CONFIG_XEN
> -     const struct dma_map_ops *dev_dma_ops;
>  #endif
>       unsigned int dma_coherent:1;
>       unsigned int dma_ops_setup:1;
> diff --git a/arch/arm/include/asm/xen/page-coherent.h 
> b/arch/arm/include/asm/xen/page-coherent.h
> index 2c403e7c782d..602ac02f154c 100644
> --- a/arch/arm/include/asm/xen/page-coherent.h
> +++ b/arch/arm/include/asm/xen/page-coherent.h
> @@ -6,23 +6,37 @@
>  #include <asm/page.h>
>  #include <xen/arm/page-coherent.h>
>  
> -static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
> -{
> -     if (dev && dev->archdata.dev_dma_ops)
> -             return dev->archdata.dev_dma_ops;
> -     return get_arch_dma_ops(NULL);
> -}
> -
>  static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t 
> size,
>               dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
>  {
> -     return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, 
> attrs);
> +     return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
>  }
>  
>  static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
>               void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
>  {
> -     xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
> +     dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
> +}
> +
> +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
> +             dma_addr_t handle, size_t size, enum dma_data_direction dir)
> +{
> +     unsigned long pfn = PFN_DOWN(handle);
> +
> +     if (pfn_valid(pfn))
> +             dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
> +     else
> +             __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
> +}
> +
> +static inline void xen_dma_sync_single_for_device(struct device *hwdev,
> +             dma_addr_t handle, size_t size, enum dma_data_direction dir)
> +{
> +     unsigned long pfn = PFN_DOWN(handle);
> +     if (pfn_valid(pfn))
> +             dma_direct_sync_single_for_device(hwdev, handle, size, dir);
> +     else
> +             __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
>  }
>  
>  static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
> @@ -36,17 +50,8 @@ static inline void xen_dma_map_page(struct device *hwdev, 
> struct page *page,
>       bool local = (page_pfn <= dev_pfn) &&
>               (dev_pfn - page_pfn < compound_pages);
>  
> -     /*
> -      * Dom0 is mapped 1:1, while the Linux page can span across
> -      * multiple Xen pages, it's not possible for it to contain a
> -      * mix of local and foreign Xen pages. So if the first xen_pfn
> -      * == mfn the page is local otherwise it's a foreign page
> -      * grant-mapped in dom0. If the page is local we can safely
> -      * call the native dma_ops function, otherwise we call the xen
> -      * specific function.
> -      */
>       if (local)
> -             xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, 
> dir, attrs);
> +             dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
>       else
>               __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, 
> attrs);
>  }
> @@ -63,33 +68,10 @@ static inline void xen_dma_unmap_page(struct device 
> *hwdev, dma_addr_t handle,
>        * safely call the native dma_ops function, otherwise we call the xen
>        * specific function.
>        */
> -     if (pfn_valid(pfn)) {
> -             if (xen_get_dma_ops(hwdev)->unmap_page)
> -                     xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, 
> dir, attrs);
> -     } else
> +     if (pfn_valid(pfn))
> +             dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
> +     else
>               __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
>  }
>  
> -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
> -             dma_addr_t handle, size_t size, enum dma_data_direction dir)
> -{
> -     unsigned long pfn = PFN_DOWN(handle);
> -     if (pfn_valid(pfn)) {
> -             if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
> -                     xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, 
> handle, size, dir);
> -     } else
> -             __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
> -}
> -
> -static inline void xen_dma_sync_single_for_device(struct device *hwdev,
> -             dma_addr_t handle, size_t size, enum dma_data_direction dir)
> -{
> -     unsigned long pfn = PFN_DOWN(handle);
> -     if (pfn_valid(pfn)) {
> -             if (xen_get_dma_ops(hwdev)->sync_single_for_device)
> -                     xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, 
> handle, size, dir);
> -     } else
> -             __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
> -}
> -
>  #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
> diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
> index d42557ee69c2..738097396445 100644
> --- a/arch/arm/mm/dma-mapping.c
> +++ b/arch/arm/mm/dma-mapping.c
> @@ -1132,10 +1132,6 @@ static const struct dma_map_ops 
> *arm_get_dma_map_ops(bool coherent)
>        * 32-bit DMA.
>        * Use the generic dma-direct / swiotlb ops code in that case, as that
>        * handles bounce buffering for us.
> -      *
> -      * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
> -      * latter is also selected by the Xen code, but that code for now relies
> -      * on non-NULL dev_dma_ops.  To be cleaned up later.
>        */
>       if (IS_ENABLED(CONFIG_ARM_LPAE))
>               return NULL;
> @@ -2363,10 +2359,8 @@ void arch_setup_dma_ops(struct device *dev, u64 
> dma_base, u64 size,
>       set_dma_ops(dev, dma_ops);
>  
>  #ifdef CONFIG_XEN
> -     if (xen_initial_domain()) {
> -             dev->archdata.dev_dma_ops = dev->dma_ops;
> +     if (xen_initial_domain())
>               dev->dma_ops = xen_dma_ops;
> -     }
>  #endif
>       dev->archdata.dma_ops_setup = true;
>  }
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index ae1df496bf38..eee86cc7046b 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -557,11 +557,6 @@ xen_swiotlb_dma_mmap(struct device *dev, struct 
> vm_area_struct *vma,
>                    void *cpu_addr, dma_addr_t dma_addr, size_t size,
>                    unsigned long attrs)
>  {
> -#ifdef CONFIG_ARM
> -     if (xen_get_dma_ops(dev)->mmap)
> -             return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
> -                                                 dma_addr, size, attrs);
> -#endif
>       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
>  }
>  
> @@ -574,21 +569,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct 
> sg_table *sgt,
>                       void *cpu_addr, dma_addr_t handle, size_t size,
>                       unsigned long attrs)
>  {
> -#ifdef CONFIG_ARM
> -     if (xen_get_dma_ops(dev)->get_sgtable) {
> -#if 0
> -     /*
> -      * This check verifies that the page belongs to the current domain and
> -      * is not one mapped from another domain.
> -      * This check is for debug only, and should not go to production build
> -      */
> -             unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
> -             BUG_ON (!page_is_ram(bfn));
> -#endif
> -             return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
> -                                                        handle, size, attrs);
> -     }
> -#endif
>       return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
>  }
>  
> -- 
> 2.20.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.