[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-merge] [PATCH 04/23] dma-mapping subarch headers
--- linux-2.6.12-xen0-arch.orig/include/asm-i386/dma-mapping.h +++ linux-2.6.12-xen0-arch/include/asm-i386/dma-mapping.h @@ -6,6 +6,7 @@ #include <asm/cache.h> #include <asm/io.h> #include <asm/scatterlist.h> +#include <mach_dma-mapping.h> #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) @@ -16,104 +17,6 @@ void *dma_alloc_coherent(struct device * void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - flush_write_buffers(); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++ ) { - BUG_ON(!sg[i].page); - - sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; - } - - flush_write_buffers(); - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return page_to_phys(page) + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - static inline int dma_mapping_error(dma_addr_t dma_addr) { --- /dev/null +++ linux-2.6.12-xen0-arch/include/asm-i386/mach-default/mach_dma-mapping.h @@ -0,0 +1,102 @@ +#ifndef __ASM_MACH_DMA_MAPPING_H +#define __ASM_MACH_DMA_MAPPING_H + +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + flush_write_buffers(); + return virt_to_phys(ptr); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++ ) { + BUG_ON(!sg[i].page); + + sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; + } + + flush_write_buffers(); + return nents; +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return page_to_phys(page) + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + flush_write_buffers(); +} + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + flush_write_buffers(); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + flush_write_buffers(); +} + +#endif --- linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_dma-mapping.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_dma-mapping.h 2005-08-01 21:44:38.000000000 -0700 @@ -0,0 +1,88 @@ +#ifndef __ASM_MACH_DMA_MAPPING_H +#define __ASM_MACH_DMA_MAPPING_H + +dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); + +void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++ ) { + BUG_ON(!sg[i].page); + + sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; + } + + flush_write_buffers(); + return nents; +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return page_to_phys(page) + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +extern void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_device(dev, dma_handle+offset, size, direction); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + flush_write_buffers(); +} + +#endif -- _______________________________________________ Xen-merge mailing list Xen-merge@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-merge
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |