[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] linux: Reduce restrictions on address width for DMA operations
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1168616070 0 # Node ID ed73ff8440d8077fdd9b2e3cd93cd753a81d4021 # Parent 38fcc76469599d5c7f9c46b56e20f1dbfcaecd84 linux: Reduce restrictions on address width for DMA operations Use address width needed by device rather than dma_bits in dma_alloc_coherent(). Probe supported address width in swiotlb initialization. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c | 9 + linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c | 60 +++++++---- linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/swiotlb.h | 2 3 files changed, 51 insertions(+), 20 deletions(-) diff -r 38fcc7646959 -r ed73ff8440d8 linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c --- a/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c Fri Jan 12 15:27:09 2007 +0000 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c Fri Jan 12 15:34:30 2007 +0000 @@ -161,6 +161,8 @@ void *dma_alloc_coherent(struct device * struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; unsigned int order = get_order(size); unsigned long vstart; + u64 mask; + /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); @@ -183,9 +185,14 @@ void *dma_alloc_coherent(struct device * vstart = __get_free_pages(gfp, order); ret = (void *)vstart; + if (dev != NULL && dev->coherent_dma_mask) + mask = dev->coherent_dma_mask; + else + mask = 0xffffffff; + if (ret != NULL) { if (xen_create_contiguous_region(vstart, order, - dma_bits) != 0) { + fls64(mask)) != 0) { free_pages(vstart, order); return NULL; } diff -r 38fcc7646959 -r ed73ff8440d8 linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c --- a/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c Fri Jan 12 15:27:09 2007 +0000 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c Fri Jan 12 15:34:30 2007 +0000 @@ -47,9 +47,6 @@ EXPORT_SYMBOL(swiotlb); */ #define IO_TLB_SHIFT 11 -/* Width of DMA addresses. 30 bits is a b44 limitation. */ -#define DEFAULT_DMA_BITS 30 - static int swiotlb_force; static char *iotlb_virt_start; static unsigned long iotlb_nslabs; @@ -98,11 +95,12 @@ static struct phys_addr { */ static DEFINE_SPINLOCK(io_tlb_lock); -unsigned int dma_bits = DEFAULT_DMA_BITS; +static unsigned int dma_bits; +static unsigned int __initdata max_dma_bits = 32; static int __init setup_dma_bits(char *str) { - dma_bits = simple_strtoul(str, NULL, 0); + max_dma_bits = simple_strtoul(str, NULL, 0); return 0; } __setup("dma_bits=", setup_dma_bits); @@ -143,6 +141,7 @@ swiotlb_init_with_default_size (size_t d swiotlb_init_with_default_size (size_t default_size) { unsigned long i, bytes; + int rc; if (!iotlb_nslabs) { iotlb_nslabs = (default_size >> IO_TLB_SHIFT); @@ -159,16 +158,33 @@ swiotlb_init_with_default_size (size_t d */ iotlb_virt_start = alloc_bootmem_low_pages(bytes); if (!iotlb_virt_start) - panic("Cannot allocate SWIOTLB buffer!\n" - "Use dom0_mem Xen boot parameter to reserve\n" - "some DMA memory (e.g., dom0_mem=-128M).\n"); - + panic("Cannot allocate SWIOTLB buffer!\n"); + + dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) { - int rc = xen_create_contiguous_region( - (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT), - get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), - dma_bits); - BUG_ON(rc); + do { + rc = xen_create_contiguous_region( + (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT), + get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), + dma_bits); + } while (rc && dma_bits++ < max_dma_bits); + if (rc) { + if (i == 0) + panic("No suitable physical memory available for SWIOTLB buffer!\n" + "Use dom0_mem Xen boot parameter to reserve\n" + "some DMA memory (e.g., dom0_mem=-128M).\n"); + iotlb_nslabs = i; + i <<= IO_TLB_SHIFT; + free_bootmem(__pa(iotlb_virt_start + i), bytes - i); + bytes = i; + for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) { + unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1)); + + if (bits > dma_bits) + dma_bits = bits; + } + break; + } } /* @@ -186,17 +202,27 @@ swiotlb_init_with_default_size (size_t d * Get the overflow emergency buffer */ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); + if (!io_tlb_overflow_buffer) + panic("Cannot allocate SWIOTLB overflow buffer!\n"); + + do { + rc = xen_create_contiguous_region( + (unsigned long)io_tlb_overflow_buffer, + get_order(io_tlb_overflow), + dma_bits); + } while (rc && dma_bits++ < max_dma_bits); + if (rc) + panic("No suitable physical memory available for SWIOTLB overflow buffer!\n"); iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT; iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT); printk(KERN_INFO "Software IO TLB enabled: \n" " Aperture: %lu megabytes\n" - " Kernel range: 0x%016lx - 0x%016lx\n" + " Kernel range: %p - %p\n" " Address size: %u bits\n", bytes >> 20, - (unsigned long)iotlb_virt_start, - (unsigned long)iotlb_virt_start + bytes, + iotlb_virt_start, iotlb_virt_start + bytes, dma_bits); } diff -r 38fcc7646959 -r ed73ff8440d8 linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/swiotlb.h --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/swiotlb.h Fri Jan 12 15:27:09 2007 +0000 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/swiotlb.h Fri Jan 12 15:34:30 2007 +0000 @@ -34,8 +34,6 @@ extern int swiotlb_dma_supported(struct extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); -extern unsigned int dma_bits; - #ifdef CONFIG_SWIOTLB extern int swiotlb; #else _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |