[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] [IA64] Support DMA tracking in sba_iommu.c



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1181687637 21600
# Node ID 2c15ed1d75fbf302d89bade0079ba580eb54023b
# Parent  1483ef74511b1c9819212aac9f67232a66f71adf
[IA64] Support DMA tracking in sba_iommu.c

Based on patch from Isaku Yamahata

Signed-off-by: Alex Williamson <alex.williamson@xxxxxx>
---
 arch/ia64/hp/common/sba_iommu.c |  129 +++++++++++++++++++++++++++++++++++++---
 1 files changed, 121 insertions(+), 8 deletions(-)

diff -r 1483ef74511b -r 2c15ed1d75fb arch/ia64/hp/common/sba_iommu.c
--- a/arch/ia64/hp/common/sba_iommu.c   Tue Jun 12 15:52:41 2007 -0600
+++ b/arch/ia64/hp/common/sba_iommu.c   Tue Jun 12 16:33:57 2007 -0600
@@ -42,6 +42,10 @@
 #include <asm/system.h>                /* wmb() */
 
 #include <asm/acpi-ext.h>
+#ifdef CONFIG_XEN
+#include <xen/gnttab.h>
+#include <asm/gnttab_dma.h>
+#endif
 
 #define PFX "IOC: "
 
@@ -198,6 +202,9 @@ struct ioc {
        void __iomem    *ioc_hpa;       /* I/O MMU base address */
        char            *res_map;       /* resource map, bit == pdir entry */
        u64             *pdir_base;     /* physical base address */
+#ifdef CONFIG_XEN
+       u64             *xen_virt_cache;
+#endif
        unsigned long   ibase;          /* pdir IOV Space base */
        unsigned long   imask;          /* pdir IOV Space mask */
 
@@ -762,15 +769,21 @@ sba_free_range(struct ioc *ioc, dma_addr
  * on the vba.
  */
 
-#if 1
-#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr =   \
+#ifndef CONFIG_XEN
+#define sba_io_pdir_entry(ioc, pdir_ptr, vba) *pdir_ptr =      \
        ((virt_to_bus((void *)vba) & ~0xFFFULL) | 0x8000000000000000ULL)
 #else
 void SBA_INLINE
-sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
+sba_io_pdir_entry(struct ioc *ioc, u64 *pdir_ptr, unsigned long vba)
 {
        *pdir_ptr = ((virt_to_bus((void *)vba) & ~0xFFFULL) |
                    0x80000000000000FFULL);
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               int pide = ((u64)pdir_ptr - (u64)ioc->pdir_base) >> 3;
+               ioc->xen_virt_cache[pide] = vba;
+       }
+#endif
 }
 #endif
 
@@ -857,6 +870,10 @@ sba_mark_invalid(struct ioc *ioc, dma_ad
                */
                ioc->pdir_base[off] = (0x80000000000000FFULL | 
prefetch_spill_page);
 #endif
+#ifdef CONFIG_XEN
+               if (is_running_on_xen())
+                       ioc->xen_virt_cache[off] = 0UL;
+#endif
        } else {
                u32 t = get_iovp_order(byte_cnt) + iovp_shift;
 
@@ -872,6 +889,10 @@ sba_mark_invalid(struct ioc *ioc, dma_ad
 #else
                        ioc->pdir_base[off] = (0x80000000000000FFULL | 
prefetch_spill_page);
 #endif
+#ifdef CONFIG_XEN
+                       if (is_running_on_xen())
+                               ioc->xen_virt_cache[off] = 0UL;
+#endif
                        off++;
                        byte_cnt -= iovp_size;
                } while (byte_cnt > 0);
@@ -902,7 +923,21 @@ sba_map_single(struct device *dev, void 
 #endif
 #ifdef ALLOW_IOV_BYPASS
        unsigned long pci_addr = virt_to_bus(addr);
-
+#endif
+
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               void* tmp_addr = addr;
+               size_t tmp_size = size;
+               do {
+                       gnttab_dma_use_page(virt_to_page(tmp_addr));
+                       tmp_addr += PAGE_SIZE;
+                       tmp_size -= min(tmp_size, PAGE_SIZE);
+               } while (tmp_size);
+       }
+#endif
+
+#ifdef ALLOW_IOV_BYPASS
        ASSERT(to_pci_dev(dev)->dma_mask);
        /*
        ** Check if the PCI device can DMA to ptr... if so, just return ptr
@@ -950,7 +985,7 @@ sba_map_single(struct device *dev, void 
 
        while (size > 0) {
                ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
-               sba_io_pdir_entry(pdir_start, (unsigned long) addr);
+               sba_io_pdir_entry(ioc, pdir_start, (unsigned long) addr);
 
                DBG_RUN("     pdir 0x%p %lx\n", pdir_start, *pdir_start);
 
@@ -979,14 +1014,58 @@ sba_mark_clean(struct ioc *ioc, dma_addr
        void    *addr;
 
        if (size <= iovp_size) {
+#ifdef CONFIG_XEN
+               if (is_running_on_xen())
+                       addr = (void *)ioc->xen_virt_cache[off];
+               else
+                       addr = bus_to_virt(ioc->pdir_base[off] &
+                                          ~0xE000000000000FFFULL);
+#else
                addr = bus_to_virt(ioc->pdir_base[off] &
                                   ~0xE000000000000FFFULL);
+#endif
                mark_clean(addr, size);
        } else {
                do {
+#ifdef CONFIG_XEN
+                       if (is_running_on_xen())
+                               addr = (void *)ioc->xen_virt_cache[off];
+                       else
+                               addr = bus_to_virt(ioc->pdir_base[off] &
+                                                  ~0xE000000000000FFFULL);
+#else
                        addr = bus_to_virt(ioc->pdir_base[off] &
                                           ~0xE000000000000FFFULL);
+#endif
                        mark_clean(addr, min(size, iovp_size));
+                       off++;
+                       size -= iovp_size;
+               } while (size > 0);
+       }
+}
+#endif
+
+#ifdef CONFIG_XEN
+static void
+sba_gnttab_dma_unmap_page(struct ioc *ioc, dma_addr_t iova, size_t size)
+{
+       u32 iovp = (u32) SBA_IOVP(ioc,iova);
+       int off = PDIR_INDEX(iovp);
+       struct page *page;
+
+       if (size <= iovp_size) {
+               BUG_ON(!ioc->xen_virt_cache[off]);
+               page = virt_to_page(ioc->xen_virt_cache[off]);
+               __gnttab_dma_unmap_page(page);
+       } else {
+               struct page *last_page = (struct page *)~0UL;
+               do {
+                       BUG_ON(!ioc->xen_virt_cache[off]);
+                       page = virt_to_page(ioc->xen_virt_cache[off]);
+                       if (page != last_page) {
+                               __gnttab_dma_unmap_page(page);
+                               last_page = page;
+                       }
                        off++;
                        size -= iovp_size;
                } while (size > 0);
@@ -1027,6 +1106,15 @@ void sba_unmap_single(struct device *dev
                        mark_clean(bus_to_virt(iova), size);
                }
 #endif
+#ifdef CONFIG_XEN
+               if (is_running_on_xen()) {
+                       do {
+                               gnttab_dma_unmap_page(iova);
+                               iova += PAGE_SIZE;
+                               size -= min(size,PAGE_SIZE);
+                       } while (size);
+               }
+#endif
                return;
        }
 #endif
@@ -1042,6 +1130,10 @@ void sba_unmap_single(struct device *dev
 #ifdef ENABLE_MARK_CLEAN
        if (dir == DMA_FROM_DEVICE)
                sba_mark_clean(ioc, iova, size);
+#endif
+#ifdef CONFIG_XEN
+       if (is_running_on_xen())
+               sba_gnttab_dma_unmap_page(ioc, iova, size);
 #endif
 
 #if DELAYED_RESOURCE_CNT > 0
@@ -1240,7 +1332,7 @@ sba_fill_pdir(
                        dma_offset=0;   /* only want offset on first chunk */
                        cnt = ROUNDUP(cnt, iovp_size);
                        do {
-                               sba_io_pdir_entry(pdirp, vaddr);
+                               sba_io_pdir_entry(ioc, pdirp, vaddr);
                                vaddr += iovp_size;
                                cnt -= iovp_size;
                                pdirp++;
@@ -1427,7 +1519,11 @@ int sba_map_sg(struct device *dev, struc
        if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
                for (sg = sglist ; filled < nents ; filled++, sg++){
                        sg->dma_length = sg->length;
+#ifdef CONFIG_XEN
+                       sg->dma_address = gnttab_dma_map_page(sg->page) + 
sg->offset;
+#else
                        sg->dma_address = virt_to_bus(sba_sg_address(sg));
+#endif
                }
                return filled;
        }
@@ -1450,6 +1546,15 @@ int sba_map_sg(struct device *dev, struc
 #endif
 
        prefetch(ioc->res_hint);
+
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               int i;
+
+               for (i = 0; i < nents; i++)
+                       gnttab_dma_use_page(sglist[i].page);
+       }
+#endif
 
        /*
        ** First coalesce the chunks and allocate I/O pdir space
@@ -1581,13 +1686,21 @@ ioc_iova_init(struct ioc *ioc)
        if (!ioc->pdir_base)
                panic(PFX "Couldn't allocate I/O Page Table\n");
 
+       memset(ioc->pdir_base, 0, ioc->pdir_size);
+
 #ifdef CONFIG_XEN
        /* The page table needs to be pinned in Xen memory */
        if (xen_create_contiguous_region((unsigned long)ioc->pdir_base,
                                         get_order(ioc->pdir_size), 0))
                panic(PFX "Couldn't contiguously map I/O Page Table\n");
-#endif
-       memset(ioc->pdir_base, 0, ioc->pdir_size);
+
+       ioc->xen_virt_cache = (void *) __get_free_pages(
+                                       GFP_KERNEL, get_order(ioc->pdir_size));
+       if (!ioc->xen_virt_cache)
+               panic(PFX "Couldn't allocate Xen virtual address cache\n");
+
+       memset(ioc->xen_virt_cache, 0, ioc->pdir_size);
+#endif
 
        DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
                iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.