[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 2/2] xen-swiotlb: Provide size aligned DMA addresses.



The dma_alloc_coherent() API specifies that:

[...]
The CPU virtual address and the DMA address are both guaranteed to be
aligned to the smallest PAGE_SIZE order which is greater than or equal
to the requested size.  This invariant exists (for example) to guarantee
that if you allocate a chunk which is smaller than or equal to 64
kilobytes, the extent of the buffer you receive will not cross a 64K
boundary."

This change, along with the XENMEMF_align_size in the hypervisor
allows us to provide DMA buffers that are size aligned.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 arch/x86/xen/mmu.c             | 12 +++++++-----
 drivers/xen/swiotlb-xen.c      |  4 ++--
 include/xen/interface/memory.h | 10 ++++++++++
 include/xen/xen-ops.h          |  2 +-
 4 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7d5afdb..07e5a97 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2577,7 +2577,8 @@ static int xen_exchange_memory(unsigned long extents_in, 
unsigned int order_in,
                               unsigned long extents_out,
                               unsigned int order_out,
                               unsigned long *mfns_out,
-                              unsigned int address_bits)
+                              unsigned int address_bits,
+                              bool size_align)
 {
        long rc;
        int success;
@@ -2599,7 +2600,8 @@ static int xen_exchange_memory(unsigned long extents_in, 
unsigned int order_in,
        };
 
        BUG_ON(extents_in << order_in != extents_out << order_out);
-
+        if (size_align)
+               exchange.out.address_bits |= XENMEMF_align_size;
        rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
        success = (exchange.nr_exchanged == extents_in);
 
@@ -2611,7 +2613,7 @@ static int xen_exchange_memory(unsigned long extents_in, 
unsigned int order_in,
 
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                 unsigned int address_bits,
-                                dma_addr_t *dma_handle)
+                                dma_addr_t *dma_handle, bool size_align)
 {
        unsigned long *in_frames = discontig_frames, out_frame;
        unsigned long  flags;
@@ -2641,7 +2643,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, 
unsigned int order,
        out_frame = virt_to_pfn(vstart);
        success = xen_exchange_memory(1UL << order, 0, in_frames,
                                      1, order, &out_frame,
-                                     address_bits);
+                                     address_bits, size_align);
 
        /* 3. Map the new extent in place of old pages. */
        if (success)
@@ -2682,7 +2684,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, 
unsigned int order)
 
        /* 3. Do the exchange for non-contiguous MFNs. */
        success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
-                                       0, out_frames, 0);
+                                       0, out_frames, 0, false);
 
        /* 4. Map new pages in place of old pages. */
        if (success)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 87e6035..4996685 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -173,7 +173,7 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long 
nslabs)
                        rc = xen_create_contiguous_region(
                                p + (i << IO_TLB_SHIFT),
                                get_order(slabs << IO_TLB_SHIFT),
-                               dma_bits, &dma_handle);
+                               dma_bits, &dma_handle, false);
                } while (rc && dma_bits++ < max_dma_bits);
                if (rc)
                        return rc;
@@ -334,7 +334,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t 
size,
                *dma_handle = dev_addr;
        else {
                if (xen_create_contiguous_region(phys, order,
-                                                fls64(dma_mask), dma_handle) 
!= 0) {
+                                                fls64(dma_mask), dma_handle, 
true) != 0) {
                        xen_free_coherent_pages(hwdev, size, ret, 
(dma_addr_t)phys, attrs);
                        return NULL;
                }
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 9aa8988..b98271f 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -19,6 +19,16 @@
 #define XENMEM_increase_reservation 0
 #define XENMEM_decrease_reservation 1
 #define XENMEM_populate_physmap     6
+/*
+ * Maximum # bits addressable by the user of the allocated region (e.g., I/O
+ * devices often have a 32-bit limitation even in 64-bit systems). If zero
+ * then the user has no addressing restriction. This field is not used by
+ * XENMEM_decrease_reservation.
+ */
+
+/* Flag to indicate the allocation to be size aligned. */
+#define XENMEMF_align_size (1U<<19)
+
 struct xen_memory_reservation {
 
     /*
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index b5486e6..9aa3ab8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -36,7 +36,7 @@ int xen_setup_shutdown_event(void);
 extern unsigned long *xen_contiguous_bitmap;
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                unsigned int address_bits,
-                               dma_addr_t *dma_handle);
+                               dma_addr_t *dma_handle, bool size_align);
 
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.