[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 13/15] [swiotlb] Make io_tlb_nslabs visible outside lib/swiotlb.c and rename it.



We rename it to something more generic: swiotlb_nslabs and make it
visible outside the lib/swiotlb.c library.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 include/linux/swiotlb.h |    1 +
 lib/swiotlb.c           |   14 +++++++-------
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 3bc3c42..23739b0 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -9,6 +9,7 @@ struct scatterlist;
 
 extern int swiotlb_force;
 
+extern unsigned long swiotlb_nslabs;
 /*
  * Maximum allowable number of contiguous slabs to map,
  * must be a power of 2.  What is the appropriate value ?
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c11dcb1..8e65cee 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -63,7 +63,7 @@ int swiotlb_force;
  * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
  * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
  */
-static unsigned long io_tlb_nslabs;
+unsigned long swiotlb_nslabs;
 
 /*
  * Protect the iommu_sw data structures in the map and unmap calls
@@ -81,9 +81,9 @@ setup_io_tlb_npages(char *str)
 {
        while (*str) {
                if (isdigit(*str)) {
-                       io_tlb_nslabs = simple_strtoul(str, &str, 0);
+                       swiotlb_nslabs = simple_strtoul(str, &str, 0);
                        /* avoid tail segment of size < IO_TLB_SEGSIZE */
-                       io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+                       swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
                }
                if (!strncmp(str, "force", 5))
                        swiotlb_force = 1;
@@ -174,11 +174,11 @@ swiotlb_init_with_default_size(size_t default_size, int 
verbose)
 {
        unsigned long i, bytes;
 
-       if (!io_tlb_nslabs) {
+       if (!swiotlb_nslabs) {
                iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
                iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
        } else
-               iommu_sw->nslabs = io_tlb_nslabs;
+               iommu_sw->nslabs = swiotlb_nslabs;
 
        bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
 
@@ -263,11 +263,11 @@ swiotlb_late_init_with_default_size(size_t default_size)
        unsigned long i, bytes, req_nslabs = iommu_sw->nslabs;
        unsigned int order;
 
-       if (!io_tlb_nslabs) {
+       if (!swiotlb_nslabs) {
                iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
                iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
        } else
-               iommu_sw->nslabs = io_tlb_nslabs;
+               iommu_sw->nslabs = swiotlb_nslabs;
 
        /*
         * Get IO TLB memory from the low pages
-- 
1.6.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.