[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Further tweaks to the bounce buffer code for dma_map_single.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ceb8d28b683e0aed53d4433f16699439c3aad045
# Parent  f5c64bb5ed7433e2b0ee698d982effb7119866b3

Further tweaks to the bounce buffer code for dma_map_single.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r f5c64bb5ed74 -r ceb8d28b683e 
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c    Fri Jul  8 
12:55:56 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c    Fri Jul  8 
16:36:26 2005
@@ -237,8 +237,10 @@
 struct dma_map_entry {
        struct list_head list;
        dma_addr_t dma;
-       void *bounce, *host;
+       char *bounce, *host;
+       size_t size;
 };
+#define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
 
 dma_addr_t
 dma_map_single(struct device *dev, void *ptr, size_t size,
@@ -266,6 +268,7 @@
                ent->dma    = dma;
                ent->bounce = bnc;
                ent->host   = ptr;
+               ent->size   = size;
                spin_lock_irqsave(&dma_map_lock, flags);
                list_add(&ent->list, &dma_map_head);
                spin_unlock_irqrestore(&dma_map_lock, flags);
@@ -289,13 +292,15 @@
        if (!list_empty(&dma_map_head)) {
                spin_lock_irqsave(&dma_map_lock, flags);
                list_for_each_entry ( ent, &dma_map_head, list ) {
-                       if (ent->dma == dma_addr) {
+                       if (DMA_MAP_MATCHES(ent, dma_addr)) {
                                list_del(&ent->list);
                                break;
                        }
                }
                spin_unlock_irqrestore(&dma_map_lock, flags);
                if (&ent->list != &dma_map_head) {
+                       BUG_ON(dma_addr != ent->dma);
+                       BUG_ON(size != ent->size);
                        if (direction != DMA_TO_DEVICE)
                                memcpy(ent->host, ent->bounce, size);
                        dma_free_coherent(dev, size, ent->bounce, ent->dma);
@@ -310,18 +315,21 @@
                        enum dma_data_direction direction)
 {
        struct dma_map_entry *ent;
-       unsigned long flags;
+       unsigned long flags, off;
 
        /* Fast-path check: are there any multi-page DMA mappings? */
        if (!list_empty(&dma_map_head)) {
                spin_lock_irqsave(&dma_map_lock, flags);
                list_for_each_entry ( ent, &dma_map_head, list )
-                       if (ent->dma == dma_handle)
+                       if (DMA_MAP_MATCHES(ent, dma_handle))
                                break;
                spin_unlock_irqrestore(&dma_map_lock, flags);
-               if (&ent->list != &dma_map_head)
+               if (&ent->list != &dma_map_head) {
+                       off = dma_handle - ent->dma;
+                       BUG_ON((off + size) > ent->size);
                        if (direction != DMA_TO_DEVICE)
-                               memcpy(ent->host, ent->bounce, size);
+                               memcpy(ent->host+off, ent->bounce+off, size);
+               }
        }
 }
 EXPORT_SYMBOL(dma_sync_single_for_cpu);
@@ -331,18 +339,21 @@
                            enum dma_data_direction direction)
 {
        struct dma_map_entry *ent;
-       unsigned long flags;
+       unsigned long flags, off;
 
        /* Fast-path check: are there any multi-page DMA mappings? */
        if (!list_empty(&dma_map_head)) {
                spin_lock_irqsave(&dma_map_lock, flags);
                list_for_each_entry ( ent, &dma_map_head, list )
-                       if (ent->dma == dma_handle)
+                       if (DMA_MAP_MATCHES(ent, dma_handle))
                                break;
                spin_unlock_irqrestore(&dma_map_lock, flags);
-               if (&ent->list != &dma_map_head)
+               if (&ent->list != &dma_map_head) {
+                       off = dma_handle - ent->dma;
+                       BUG_ON((off + size) > ent->size);
                        if (direction != DMA_FROM_DEVICE)
-                               memcpy(ent->bounce, ent->host, size);
+                               memcpy(ent->bounce+off, ent->host+off, size);
+               }
        }
 
        flush_write_buffers();
diff -r f5c64bb5ed74 -r ceb8d28b683e 
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h    Fri Jul 
 8 12:55:56 2005
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h    Fri Jul 
 8 16:36:26 2005
@@ -78,7 +78,7 @@
                              unsigned long offset, size_t size,
                              enum dma_data_direction direction)
 {
-       dma_sync_single_for_cpu(dev, dma_handle, size, direction);
+       dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
 }
 
 static inline void
@@ -86,7 +86,7 @@
                                 unsigned long offset, size_t size,
                                 enum dma_data_direction direction)
 {
-       dma_sync_single_for_device(dev, dma_handle, size, direction);
+       dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
 }
 
 static inline void

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.