[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] x86: dma_map_sg() must handle multi-page segments.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1183985110 -3600
# Node ID 08cf42135056cbc07a6d790d4851e0e4b160f847
# Parent  f833757672a70ee43afd0bfbfaa22cec3b132445
x86: dma_map_sg() must handle multi-page segments.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 arch/i386/kernel/pci-dma-xen.c              |    5 ++++-
 arch/i386/kernel/swiotlb.c                  |    6 ++++--
 include/asm-i386/mach-xen/asm/dma-mapping.h |    6 +++---
 3 files changed, 11 insertions(+), 6 deletions(-)

diff -r f833757672a7 -r 08cf42135056 arch/i386/kernel/pci-dma-xen.c
--- a/arch/i386/kernel/pci-dma-xen.c    Sat Jul 07 11:44:16 2007 +0100
+++ b/arch/i386/kernel/pci-dma-xen.c    Mon Jul 09 13:45:10 2007 +0100
@@ -97,6 +97,9 @@ dma_map_sg(struct device *hwdev, struct 
                        BUG_ON(!sg[i].page);
                        IOMMU_BUG_ON(address_needs_mapping(
                                hwdev, sg[i].dma_address));
+                       IOMMU_BUG_ON(range_straddles_page_boundary(
+                               page_to_pseudophys(sg[i].page) + sg[i].offset,
+                               sg[i].length));
                }
                rc = nents;
        }
@@ -338,7 +341,7 @@ dma_map_single(struct device *dev, void 
        } else {
                dma = gnttab_dma_map_page(virt_to_page(ptr)) +
                      offset_in_page(ptr);
-               IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
+               IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
                IOMMU_BUG_ON(address_needs_mapping(dev, dma));
        }
 
diff -r f833757672a7 -r 08cf42135056 arch/i386/kernel/swiotlb.c
--- a/arch/i386/kernel/swiotlb.c        Sat Jul 07 11:44:16 2007 +0100
+++ b/arch/i386/kernel/swiotlb.c        Mon Jul 09 13:45:10 2007 +0100
@@ -480,7 +480,7 @@ swiotlb_map_single(struct device *hwdev,
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (!range_straddles_page_boundary(ptr, size) &&
+       if (!range_straddles_page_boundary(__pa(ptr), size) &&
            !address_needs_mapping(hwdev, dev_addr))
                return dev_addr;
 
@@ -577,7 +577,9 @@ swiotlb_map_sg(struct device *hwdev, str
        for (i = 0; i < nelems; i++, sg++) {
                dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
 
-               if (address_needs_mapping(hwdev, dev_addr)) {
+               if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
+                                                 + sg->offset, sg->length)
+                   || address_needs_mapping(hwdev, dev_addr)) {
                        gnttab_dma_unmap_page(dev_addr);
                        buffer.page   = sg->page;
                        buffer.offset = sg->offset;
diff -r f833757672a7 -r 08cf42135056 include/asm-i386/mach-xen/asm/dma-mapping.h
--- a/include/asm-i386/mach-xen/asm/dma-mapping.h       Sat Jul 07 11:44:16 
2007 +0100
+++ b/include/asm-i386/mach-xen/asm/dma-mapping.h       Mon Jul 09 13:45:10 
2007 +0100
@@ -23,11 +23,11 @@ address_needs_mapping(struct device *hwd
 }
 
 static inline int
-range_straddles_page_boundary(void *p, size_t size)
+range_straddles_page_boundary(paddr_t p, size_t size)
 {
        extern unsigned long *contiguous_bitmap;
-       return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-               !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
+       return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+               !test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.