[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] amd-iommu: add flush iommu_ops



The iommu_ops structure contains two methods for flushing: 'iotlb_flush' and
'iotlb_flush_all'. This patch adds implementations of these for AMD IOMMUs.

The iotlb_flush method takes a base DFN and a (4k) page count, but the
flush needs to be done by page order (i.e. 0, 9 or 18). Because a flush
operation is fairly expensive to perform, the code calculates the minimum
order single flush that will cover the specified page range rather than
performing multiple flushes.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c       | 48 +++++++++++++++++++++++++++
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |  2 ++
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  3 ++
 xen/include/xen/iommu.h                       |  5 +++
 4 files changed, 58 insertions(+)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 04cb7b3182..c05b042821 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -631,6 +631,54 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
     spin_unlock(&hd->arch.mapping_lock);
 
     amd_iommu_flush_pages(d, dfn_x(dfn), 0);
+    return 0;
+}
+
+static unsigned long flush_count(dfn_t dfn, unsigned int page_count,
+                                 unsigned int order)
+{
+    unsigned long start = dfn_x(dfn) / (1u << order);
+    unsigned long end = DIV_ROUND_UP(dfn_x(dfn) + page_count,
+                                     (1u << order));
+
+    ASSERT(end > start);
+    return end - start;
+}
+
+int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
+                                unsigned int page_count)
+{
+    /* Match VT-d semantics */
+    if ( !page_count || dfn_eq(dfn, INVALID_DFN) ||
+         dfn_lt(dfn_add(dfn, page_count), dfn) /* overflow */ )
+    {
+        amd_iommu_flush_all_pages(d);
+        return 0;
+    }
+
+    /*
+     * Flushes are expensive so find the minimal single flush that will
+     * cover the page range.
+     *
+     * NOTE: It is unnecessary to round down the DFN value to align with
+     *       the flush order here. This is done by the internals of the
+     *       flush code.
+     */
+    if ( page_count == 1 ) /* order 0 flush count */
+        amd_iommu_flush_pages(d, dfn_x(dfn), 0);
+    else if ( flush_count(dfn, page_count, 9) == 1 )
+        amd_iommu_flush_pages(d, dfn_x(dfn), 9);
+    else if ( flush_count(dfn, page_count, 18) == 1 )
+        amd_iommu_flush_pages(d, dfn_x(dfn), 18);
+    else
+        amd_iommu_flush_all_pages(d);
+
+    return 0;
+}
+
+int amd_iommu_flush_iotlb_all(struct domain *d)
+{
+    amd_iommu_flush_all_pages(d);
 
     return 0;
 }
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 900136390d..33a3798f36 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -579,6 +579,8 @@ static const struct iommu_ops __initconstrel amd_iommu_ops 
= {
     .teardown = amd_iommu_domain_destroy,
     .map_page = amd_iommu_map_page,
     .unmap_page = amd_iommu_unmap_page,
+    .iotlb_flush = amd_iommu_flush_iotlb_pages,
+    .iotlb_flush_all = amd_iommu_flush_iotlb_all,
     .free_page_table = deallocate_page_table,
     .reassign_device = reassign_device,
     .get_device_group_id = amd_iommu_group_id,
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h 
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 718a44f956..88715329ca 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -60,6 +60,9 @@ int __must_check amd_iommu_alloc_root(struct domain_iommu 
*hd);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
                                        paddr_t phys_addr, unsigned long size,
                                        int iw, int ir);
+int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
+                                             unsigned int page_count);
+int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
 
 /* Share p2m table with iommu */
 void amd_iommu_share_p2m(struct domain *d);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 3d78126801..da8294bac8 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -52,6 +52,11 @@ static inline bool_t dfn_eq(dfn_t x, dfn_t y)
     return dfn_x(x) == dfn_x(y);
 }
 
+static inline bool_t dfn_lt(dfn_t x, dfn_t y)
+{
+    return dfn_x(x) < dfn_x(y);
+}
+
 extern bool_t iommu_enable, iommu_enabled;
 extern bool_t force_iommu, iommu_verbose;
 extern bool_t iommu_workaround_bios_bug, iommu_igfx;
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.