[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] make gnttab_dma_map_page() compound page aware


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Mon, 12 Nov 2012 08:44:04 +0000
  • Delivery-date: Mon, 12 Nov 2012 08:44:18 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1352709645 -3600
# Node ID 171f57e6ad50fb71354b5b7f2177394102ca02fe
# Parent  f695483240db110763b89c6821a2357929f2ec1e
make gnttab_dma_map_page() compound page aware

While this was found to be a problem in practice only on recent
kernels, it was nevertheless a mistake from the beginning to not take
into consideration compound pages.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r f695483240db -r 171f57e6ad50 arch/i386/kernel/pci-dma-xen.c
--- a/arch/i386/kernel/pci-dma-xen.c    Fri Oct 19 17:57:20 2012 +0200
+++ b/arch/i386/kernel/pci-dma-xen.c    Mon Nov 12 09:40:45 2012 +0100
@@ -120,7 +120,7 @@ dma_map_sg(struct device *hwdev, struct 
                for (i = 0; i < nents; i++ ) {
                        BUG_ON(!sg[i].page);
                        sg[i].dma_address =
-                               gnttab_dma_map_page(sg[i].page) + sg[i].offset;
+                               gnttab_dma_map_page(sg[i].page, sg[i].offset);
                        sg[i].dma_length  = sg[i].length;
                        IOMMU_BUG_ON(address_needs_mapping(
                                hwdev, sg[i].dma_address));
@@ -165,7 +165,7 @@ dma_map_page(struct device *dev, struct 
                dma_addr = swiotlb_map_page(
                        dev, page, offset, size, direction);
        } else {
-               dma_addr = gnttab_dma_map_page(page) + offset;
+               dma_addr = gnttab_dma_map_page(page, offset);
                IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
        }
 
@@ -363,8 +363,8 @@ dma_map_single(struct device *dev, void 
        if (swiotlb) {
                dma = swiotlb_map_single(dev, ptr, size, direction);
        } else {
-               dma = gnttab_dma_map_page(virt_to_page(ptr)) +
-                     offset_in_page(ptr);
+               dma = gnttab_dma_map_page(virt_to_page(ptr),
+                                         offset_in_page(ptr));
                IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
                IOMMU_BUG_ON(address_needs_mapping(dev, dma));
        }
diff -r f695483240db -r 171f57e6ad50 arch/ia64/hp/common/sba_iommu.c
--- a/arch/ia64/hp/common/sba_iommu.c   Fri Oct 19 17:57:20 2012 +0200
+++ b/arch/ia64/hp/common/sba_iommu.c   Mon Nov 12 09:40:45 2012 +0100
@@ -1522,7 +1522,7 @@ int sba_map_sg(struct device *dev, struc
                for (sg = sglist ; filled < nents ; filled++, sg++){
                        sg->dma_length = sg->length;
 #ifdef CONFIG_XEN
-                       sg->dma_address = gnttab_dma_map_page(sg->page) + 
sg->offset;
+                       sg->dma_address = gnttab_dma_map_page(sg->page, 
sg->offset);
 #else
                        sg->dma_address = virt_to_bus(sba_sg_address(sg));
 #endif
diff -r f695483240db -r 171f57e6ad50 arch/ia64/xen/xen_dma.c
--- a/arch/ia64/xen/xen_dma.c   Fri Oct 19 17:57:20 2012 +0200
+++ b/arch/ia64/xen/xen_dma.c   Mon Nov 12 09:40:45 2012 +0100
@@ -92,7 +92,7 @@ xen_map_sg(struct device *dev, struct sc
        int i;
 
        for (i = 0 ; i < nents ; i++) {
-               sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + 
sg[i].offset;
+               sg[i].dma_address = gnttab_dma_map_page(sg[i].page, 
sg[i].offset);
                sg[i].dma_length  = sg[i].length;
 
                IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address));
diff -r f695483240db -r 171f57e6ad50 drivers/xen/core/gnttab.c
--- a/drivers/xen/core/gnttab.c Fri Oct 19 17:57:20 2012 +0200
+++ b/drivers/xen/core/gnttab.c Mon Nov 12 09:40:45 2012 +0100
@@ -705,6 +705,8 @@ void __gnttab_dma_map_page(struct page *
        if (!is_running_on_xen() || !PageForeign(page))
                return;
 
+       BUG_ON(PageCompound(page));
+
        do {
                seq = read_seqbegin(&gnttab_dma_lock);
 
diff -r f695483240db -r 171f57e6ad50 include/asm-i386/mach-xen/asm/gnttab_dma.h
--- a/include/asm-i386/mach-xen/asm/gnttab_dma.h        Fri Oct 19 17:57:20 
2012 +0200
+++ b/include/asm-i386/mach-xen/asm/gnttab_dma.h        Mon Nov 12 09:40:45 
2012 +0100
@@ -21,16 +21,24 @@
 #ifndef _ASM_I386_GNTTAB_DMA_H
 #define _ASM_I386_GNTTAB_DMA_H
 
+#include <asm/bug.h>
+
 static inline int gnttab_dma_local_pfn(struct page *page)
 {
        /* Has it become a local MFN? */
        return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
 }
 
-static inline maddr_t gnttab_dma_map_page(struct page *page)
+static inline maddr_t gnttab_dma_map_page(struct page *page,
+                                         unsigned long offset)
 {
+       unsigned int pgnr = offset >> PAGE_SHIFT;
+       unsigned int order = PageCompound(page) ? (long)page[1].lru.prev : 0;
+
+       BUG_ON(pgnr >> order);
        __gnttab_dma_map_page(page);
-       return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT);
+       return ((maddr_t)pfn_to_mfn(page_to_pfn(page) + pgnr) << PAGE_SHIFT)
+              + (offset & ~PAGE_MASK);
 }
 
 static inline void gnttab_dma_unmap_page(maddr_t maddr)
diff -r f695483240db -r 171f57e6ad50 include/asm-ia64/gnttab_dma.h
--- a/include/asm-ia64/gnttab_dma.h     Fri Oct 19 17:57:20 2012 +0200
+++ b/include/asm-ia64/gnttab_dma.h     Mon Nov 12 09:40:45 2012 +0100
@@ -21,6 +21,8 @@
 #ifndef _ASM_IA64_GNTTAB_DMA_H
 #define _ASM_IA64_GNTTAB_DMA_H
 
+#include <asm/bug.h>
+
 static inline int gnttab_dma_local_pfn(struct page *page)
 {
        return 0;
@@ -32,10 +34,15 @@ static inline void gnttab_dma_use_page(s
        __gnttab_dma_map_page(page);
 }
 
-static inline dma_addr_t gnttab_dma_map_page(struct page *page)
+static inline dma_addr_t gnttab_dma_map_page(struct page *page,
+                                            unsigned long offset)
 {
+       unsigned int pgnr = offset >> PAGE_SHIFT;
+       unsigned int order = PageCompound(page) ? (long)page[1].lru.prev : 0;
+
+       BUG_ON(pgnr >> order);
        gnttab_dma_use_page(page);
-       return page_to_bus(page);
+       return page_to_bus(page + pgnr) + (offset & ~PAGE_MASK);
 }
 
 static inline dma_addr_t gnttab_dma_map_virt(void *ptr)
diff -r f695483240db -r 171f57e6ad50 lib/swiotlb-xen.c
--- a/lib/swiotlb-xen.c Fri Oct 19 17:57:20 2012 +0200
+++ b/lib/swiotlb-xen.c Mon Nov 12 09:40:45 2012 +0100
@@ -501,8 +501,8 @@ swiotlb_full(struct device *dev, size_t 
 dma_addr_t
 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
 {
-       dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
-                             offset_in_page(ptr);
+       dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr),
+                                                 offset_in_page(ptr));
        void *map;
        struct phys_addr buffer;
 
@@ -613,7 +613,7 @@ swiotlb_map_sg(struct device *hwdev, str
        BUG_ON(dir == DMA_NONE);
 
        for (i = 0; i < nelems; i++, sg++) {
-               dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
+               dev_addr = gnttab_dma_map_page(sg->page, sg->offset);
 
                if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
                                                  + sg->offset, sg->length)
@@ -705,7 +705,7 @@ swiotlb_map_page(struct device *hwdev, s
        dma_addr_t dev_addr;
        char *map;
 
-       dev_addr = gnttab_dma_map_page(page) + offset;
+       dev_addr = gnttab_dma_map_page(page, offset);
        if (address_needs_mapping(hwdev, dev_addr)) {
                gnttab_dma_unmap_page(dev_addr);
                buffer.page   = page;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.