[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 14/15] [swiotlb] Move initialization (swiotlb_init) and its friends in swiotlb-default.c



We move all of the initialization functions and as well
all functions defined in the swiotlb_ops to a seperate file.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 lib/Makefile          |    2 +-
 lib/swiotlb-default.c |  242 +++++++++++++++++++++++++++++++++++++++++++++++++
 lib/swiotlb.c         |  231 +----------------------------------------------
 3 files changed, 245 insertions(+), 230 deletions(-)
 create mode 100644 lib/swiotlb-default.c

diff --git a/lib/Makefile b/lib/Makefile
index 347ad8d..fd96891 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -77,7 +77,7 @@ obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
 obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_SWIOTLB) += swiotlb.o swiotlb-default.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 
diff --git a/lib/swiotlb-default.c b/lib/swiotlb-default.c
new file mode 100644
index 0000000..c490fcf
--- /dev/null
+++ b/lib/swiotlb-default.c
@@ -0,0 +1,242 @@
+
+#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
+#include <linux/bootmem.h>
+
+
+#define OFFSET(val, align) ((unsigned long)    \
+                                (val) & ((align) - 1))
+
+#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+
+/*
+ * Minimum IO TLB size to bother booting with.  Systems with mainly
+ * 64bit capable cards will only lightly use the swiotlb.  If we can't
+ * allocate a contiguous 1MB, we're probably in trouble anyway.
+ */
+#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
+
+/* Note that this doesn't work with highmem page */
+static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
+                                     void *address)
+{
+       return phys_to_dma(hwdev, virt_to_phys(address));
+}
+
+static void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t dev_addr)
+{
+       return phys_to_virt(dma_to_phys(hwdev, dev_addr));
+};
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void __init
+swiotlb_init_with_default_size(struct swiotlb_engine *iommu_sw,
+                              size_t default_size, int verbose)
+{
+       unsigned long i, bytes;
+
+       if (!swiotlb_nslabs) {
+               iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
+               iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
+       } else
+               iommu_sw->nslabs = swiotlb_nslabs;
+
+       bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
+
+       /*
+        * Get IO TLB memory from the low pages
+        */
+       iommu_sw->start = alloc_bootmem_low_pages(bytes);
+       if (!iommu_sw->start)
+               panic("Cannot allocate SWIOTLB buffer");
+       iommu_sw->end = iommu_sw->start + bytes;
+
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between iommu_sw->start and iommu_sw->end.
+        */
+       iommu_sw->list = alloc_bootmem(iommu_sw->nslabs * sizeof(int));
+       for (i = 0; i < iommu_sw->nslabs; i++)
+               iommu_sw->list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+       iommu_sw->index = 0;
+       iommu_sw->orig_addr = alloc_bootmem(iommu_sw->nslabs *
+                                           sizeof(phys_addr_t));
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       iommu_sw->overflow_buffer = alloc_bootmem_low(iommu_sw->overflow);
+       if (!iommu_sw->overflow_buffer)
+               panic("Cannot allocate SWIOTLB overflow buffer!\n");
+       if (verbose)
+               swiotlb_print_info();
+
+       iommu_sw->priv = NULL;
+}
+
+int swiotlb_release(struct swiotlb_engine *iommu_sw)
+{
+       if (!iommu_sw)
+               return -ENODEV;
+
+       if (iommu_sw->priv) {
+               free_pages((unsigned long)iommu_sw->overflow_buffer,
+                          get_order(iommu_sw->overflow));
+               free_pages((unsigned long)iommu_sw->orig_addr,
+                          get_order(iommu_sw->nslabs * sizeof(phys_addr_t)));
+               free_pages((unsigned long)iommu_sw->list,
+                          get_order(iommu_sw->nslabs * sizeof(int)));
+               free_pages((unsigned long)iommu_sw->start,
+                          get_order(iommu_sw->nslabs << IO_TLB_SHIFT));
+       } else {
+               free_bootmem_late(__pa(iommu_sw->overflow_buffer),
+                                 iommu_sw->overflow);
+               free_bootmem_late(__pa(iommu_sw->orig_addr),
+                                 iommu_sw->nslabs * sizeof(phys_addr_t));
+               free_bootmem_late(__pa(iommu_sw->list),
+                                 iommu_sw->nslabs * sizeof(int));
+               free_bootmem_late(__pa(iommu_sw->start),
+                                 iommu_sw->nslabs << IO_TLB_SHIFT);
+       }
+       return 0;
+}
+
+static int is_swiotlb_buffer(struct swiotlb_engine *iommu_sw,
+                            dma_addr_t dma_addr, phys_addr_t paddr)
+{
+       return paddr >= virt_to_phys(iommu_sw->start) &&
+               paddr < virt_to_phys(iommu_sw->end);
+}
+
+static bool swiotlb_dma_capable(struct device *hwdev, dma_addr_t dma_addr,
+                               phys_addr_t phys, size_t size)
+{
+       /* Phys is not neccessary in this case. */
+       return dma_capable(hwdev, dma_addr, size);
+}
+static struct swiotlb_engine swiotlb_ops = {
+       .name = "software IO TLB",
+       .overflow = 32 * 1024,
+       .release = swiotlb_release,
+       .dma_capable = swiotlb_dma_capable,
+       .is_swiotlb_buffer = is_swiotlb_buffer,
+       .phys_to_bus =  phys_to_dma,
+       .bus_to_phys = dma_to_phys,
+       .virt_to_bus = swiotlb_virt_to_bus,
+       .bus_to_virt = swiotlb_bus_to_virt,
+};
+
+void __init
+swiotlb_init(int verbose)
+{
+       swiotlb_register_engine(&swiotlb_ops);
+       swiotlb_init_with_default_size(&swiotlb_ops, 64 * (1<<20),
+                                       verbose);       /* default to 64MB */
+}
+
+/*
+ * Systems with larger DMA zones (those that don't support ISA) can
+ * initialize the swiotlb later using the slab allocator if needed.
+ * This should be just like above, but with some error catching.
+ */
+int
+swiotlb_late_init_with_default_size(struct swiotlb_engine *iommu_sw,
+                                   size_t default_size)
+{
+       unsigned long i, bytes, req_nslabs = iommu_sw->nslabs;
+       unsigned int order;
+
+       if (!swiotlb_nslabs) {
+               iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
+               iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
+       } else
+               iommu_sw->nslabs = swiotlb_nslabs;
+
+       /*
+        * Get IO TLB memory from the low pages
+        */
+       order = get_order(iommu_sw->nslabs << IO_TLB_SHIFT);
+       iommu_sw->nslabs = SLABS_PER_PAGE << order;
+       bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
+
+       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+               iommu_sw->start = (void *)__get_free_pages(GFP_DMA |
+                                                       __GFP_NOWARN, order);
+               if (iommu_sw->start)
+                       break;
+               order--;
+       }
+
+       if (!iommu_sw->start)
+               goto cleanup1;
+
+       if (order != get_order(bytes)) {
+               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
+                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+               iommu_sw->nslabs = SLABS_PER_PAGE << order;
+               bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
+       }
+       iommu_sw->end = iommu_sw->start + bytes;
+       memset(iommu_sw->start, 0, bytes);
+
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between iommu_sw->start and iommu_sw->end.
+        */
+       iommu_sw->list = (unsigned int *)__get_free_pages(GFP_KERNEL,
+                               get_order(iommu_sw->nslabs * sizeof(int)));
+       if (!iommu_sw->list)
+               goto cleanup2;
+
+       for (i = 0; i < iommu_sw->nslabs; i++)
+               iommu_sw->list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+       iommu_sw->index = 0;
+
+       iommu_sw->orig_addr = (phys_addr_t *)
+               __get_free_pages(GFP_KERNEL,
+                                get_order(iommu_sw->nslabs *
+                                          sizeof(phys_addr_t)));
+       if (!iommu_sw->orig_addr)
+               goto cleanup3;
+
+       memset(iommu_sw->orig_addr, 0, iommu_sw->nslabs * sizeof(phys_addr_t));
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       iommu_sw->overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+                                               get_order(iommu_sw->overflow));
+       if (!iommu_sw->overflow_buffer)
+               goto cleanup4;
+
+       swiotlb_print_info();
+
+       /* We utilize the private field to figure out whether we
+        * were allocated late or early.
+        */
+       iommu_sw->priv = (void *)1;
+
+       return 0;
+
+cleanup4:
+       free_pages((unsigned long)iommu_sw->orig_addr,
+                  get_order(iommu_sw->nslabs * sizeof(phys_addr_t)));
+       iommu_sw->orig_addr = NULL;
+cleanup3:
+       free_pages((unsigned long)iommu_sw->list, get_order(iommu_sw->nslabs *
+                                               sizeof(int)));
+       iommu_sw->list = NULL;
+cleanup2:
+       iommu_sw->end = NULL;
+       free_pages((unsigned long)iommu_sw->start, order);
+       iommu_sw->start = NULL;
+cleanup1:
+       iommu_sw->nslabs = req_nslabs;
+       return -ENOMEM;
+}
+
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8e65cee..9e72d21 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -40,14 +40,6 @@
 #define OFFSET(val,align) ((unsigned long)     \
                           ( (val) & ( (align) - 1)))
 
-#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
-
-/*
- * Minimum IO TLB size to bother booting with.  Systems with mainly
- * 64bit capable cards will only lightly use the swiotlb.  If we can't
- * allocate a contiguous 1MB, we're probably in trouble anyway.
- */
-#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 
 /*
  * Enumeration for sync targets
@@ -96,18 +88,6 @@ setup_io_tlb_npages(char *str)
 __setup("swiotlb=", setup_io_tlb_npages);
 /* make io_tlb_overflow tunable too? */
 
-/* Note that this doesn't work with highmem page */
-static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
-                                     void *address)
-{
-       return phys_to_dma(hwdev, virt_to_phys(address));
-}
-
-static void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t dev_addr)
-{
-       return phys_to_virt(dma_to_phys(hwdev, dev_addr));
-};
-
 /*
  * Register a software IO TLB engine.
  *
@@ -165,220 +145,13 @@ void swiotlb_print_info(void)
               (unsigned long long)pend);
 }
 
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void __init
-swiotlb_init_with_default_size(size_t default_size, int verbose)
-{
-       unsigned long i, bytes;
-
-       if (!swiotlb_nslabs) {
-               iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
-               iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
-       } else
-               iommu_sw->nslabs = swiotlb_nslabs;
-
-       bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
-
-       /*
-        * Get IO TLB memory from the low pages
-        */
-       iommu_sw->start = alloc_bootmem_low_pages(bytes);
-       if (!iommu_sw->start)
-               panic("Cannot allocate SWIOTLB buffer");
-       iommu_sw->end = iommu_sw->start + bytes;
-
-       /*
-        * Allocate and initialize the free list array.  This array is used
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between iommu_sw->start and iommu_sw->end.
-        */
-       iommu_sw->list = alloc_bootmem(iommu_sw->nslabs * sizeof(int));
-       for (i = 0; i < iommu_sw->nslabs; i++)
-               iommu_sw->list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       iommu_sw->index = 0;
-       iommu_sw->orig_addr = alloc_bootmem(iommu_sw->nslabs *
-                                           sizeof(phys_addr_t));
-
-       /*
-        * Get the overflow emergency buffer
-        */
-       iommu_sw->overflow_buffer = alloc_bootmem_low(iommu_sw->overflow);
-       if (!iommu_sw->overflow_buffer)
-               panic("Cannot allocate SWIOTLB overflow buffer!\n");
-       if (verbose)
-               swiotlb_print_info();
-
-       iommu_sw->priv = NULL;
-}
-
-static int swiotlb_release(struct swiotlb_engine *iotlb)
-{
-       swiotlb_free();
-       return 0;
-}
-static int is_swiotlb_buffer(struct swiotlb_engine *iommu_sw,
-                            dma_addr_t dev_addr, phys_addr_t paddr)
-{
-       return paddr >= virt_to_phys(iommu_sw->start) &&
-               paddr < virt_to_phys(iommu_sw->end);
-}
-
-static bool swiotlb_dma_capable(struct device *hwdev, dma_addr_t dma_addr,
-                               phys_addr_t phys, size_t size)
-{
-       /* Phys is not neccessary in this case. */
-       return dma_capable(hwdev, dma_addr, size);
-}
-
-static struct swiotlb_engine swiotlb_ops = {
-       .name = "software IO TLB",
-       .overflow = 32 * 1024,
-       .release = swiotlb_release,
-       .dma_capable = swiotlb_dma_capable,
-       .is_swiotlb_buffer = is_swiotlb_buffer,
-       .phys_to_bus =  phys_to_dma,
-       .bus_to_phys = dma_to_phys,
-       .virt_to_bus = swiotlb_virt_to_bus,
-       .bus_to_virt = swiotlb_bus_to_virt,
-};
-
-void __init
-swiotlb_init(int verbose)
-{
-       swiotlb_register_engine(&swiotlb_ops);
-       swiotlb_init_with_default_size(64 * (1<<20), verbose);  /* default to 
64MB */
-}
-
-/*
- * Systems with larger DMA zones (those that don't support ISA) can
- * initialize the swiotlb later using the slab allocator if needed.
- * This should be just like above, but with some error catching.
- */
-int
-swiotlb_late_init_with_default_size(size_t default_size)
-{
-       unsigned long i, bytes, req_nslabs = iommu_sw->nslabs;
-       unsigned int order;
-
-       if (!swiotlb_nslabs) {
-               iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
-               iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
-       } else
-               iommu_sw->nslabs = swiotlb_nslabs;
-
-       /*
-        * Get IO TLB memory from the low pages
-        */
-       order = get_order(iommu_sw->nslabs << IO_TLB_SHIFT);
-       iommu_sw->nslabs = SLABS_PER_PAGE << order;
-       bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
-
-       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-               iommu_sw->start = (void *)__get_free_pages(GFP_DMA |
-                                                       __GFP_NOWARN, order);
-               if (iommu_sw->start)
-                       break;
-               order--;
-       }
-
-       if (!iommu_sw->start)
-               goto cleanup1;
-
-       if (order != get_order(bytes)) {
-               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
-                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
-               iommu_sw->nslabs = SLABS_PER_PAGE << order;
-               bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
-       }
-       iommu_sw->end = iommu_sw->start + bytes;
-       memset(iommu_sw->start, 0, bytes);
-
-       /*
-        * Allocate and initialize the free list array.  This array is used
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between iommu_sw->start and iommu_sw->end.
-        */
-       iommu_sw->list = (unsigned int *)__get_free_pages(GFP_KERNEL,
-                               get_order(iommu_sw->nslabs * sizeof(int)));
-       if (!iommu_sw->list)
-               goto cleanup2;
-
-       for (i = 0; i < iommu_sw->nslabs; i++)
-               iommu_sw->list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       iommu_sw->index = 0;
-
-       iommu_sw->orig_addr = (phys_addr_t *)
-               __get_free_pages(GFP_KERNEL,
-                                get_order(iommu_sw->nslabs *
-                                          sizeof(phys_addr_t)));
-       if (!iommu_sw->orig_addr)
-               goto cleanup3;
-
-       memset(iommu_sw->orig_addr, 0, iommu_sw->nslabs * sizeof(phys_addr_t));
-
-       /*
-        * Get the overflow emergency buffer
-        */
-       iommu_sw->overflow_buffer = (void *)__get_free_pages(GFP_DMA,
-                                               get_order(iommu_sw->overflow));
-       if (!iommu_sw->overflow_buffer)
-               goto cleanup4;
-
-       swiotlb_print_info();
-
-       /* We utilize the private field to figure out whether we
-        * were allocated late or early.
-        */
-       iommu_sw->priv = (void *)1;
-
-       return 0;
-
-cleanup4:
-       free_pages((unsigned long)iommu_sw->orig_addr,
-                  get_order(iommu_sw->nslabs * sizeof(phys_addr_t)));
-       iommu_sw->orig_addr = NULL;
-cleanup3:
-       free_pages((unsigned long)iommu_sw->list, get_order(iommu_sw->nslabs *
-                                                        sizeof(int)));
-       iommu_sw->list = NULL;
-cleanup2:
-       iommu_sw->end = NULL;
-       free_pages((unsigned long)iommu_sw->start, order);
-       iommu_sw->start = NULL;
-cleanup1:
-       iommu_sw->nslabs = req_nslabs;
-       return -ENOMEM;
-}
-
 void __init swiotlb_free(void)
 {
        if (!iommu_sw)
                return;
 
-       if (iommu_sw->priv) {
-               free_pages((unsigned long)iommu_sw->overflow_buffer,
-                          get_order(iommu_sw->overflow));
-               free_pages((unsigned long)iommu_sw->orig_addr,
-                          get_order(iommu_sw->nslabs * sizeof(phys_addr_t)));
-               free_pages((unsigned long)iommu_sw->list,
-                          get_order(iommu_sw->nslabs * sizeof(int)));
-               free_pages((unsigned long)iommu_sw->start,
-                          get_order(iommu_sw->nslabs << IO_TLB_SHIFT));
-       } else {
-               free_bootmem_late(__pa(iommu_sw->overflow_buffer),
-                                 iommu_sw->overflow);
-               free_bootmem_late(__pa(iommu_sw->orig_addr),
-                                 iommu_sw->nslabs * sizeof(phys_addr_t));
-               free_bootmem_late(__pa(iommu_sw->list),
-                                 iommu_sw->nslabs * sizeof(int));
-               free_bootmem_late(__pa(iommu_sw->start),
-                                 iommu_sw->nslabs << IO_TLB_SHIFT);
-       }
-}
-
+       iommu_sw->release(iommu_sw);
+};
 
 /*
  * Bounce: copy the swiotlb buffer back to the original dma location
-- 
1.6.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.