|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 1/4] amd-iommu: add flush iommu_ops
The iommu_ops structure contains two methods for flushing: 'iotlb_flush' and
'iotlb_flush_all'. This patch adds implementations of these for AMD IOMMUs.
The iotlb_flush method takes a base DFN and a (4k) page count, but the
flush needs to be done by page order (i.e. 0, 9 or 18). Because a flush
operation is fairly expensive to perform, the code calculates the minimum
order single flush that will cover the specified page range rather than
performing multiple flushes.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
v4:
- Fix flush_count() properly this time.
v3:
- Really get rid of dfn_lt().
- Fix flush_count().
v2:
- Treat passing INVALID_DFN to iommu_iotlb_flush() as an error, and a zero
page_count as a no-op.
- Get rid of dfn_lt().
---
xen/drivers/passthrough/amd/iommu_map.c | 50 +++++++++++++++++++++++++++
xen/drivers/passthrough/amd/pci_amd_iommu.c | 2 ++
xen/drivers/passthrough/iommu.c | 6 +++-
xen/drivers/passthrough/vtd/iommu.c | 2 ++
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 3 ++
5 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c
b/xen/drivers/passthrough/amd/iommu_map.c
index 2429e01bb4..de5a880070 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -634,6 +634,56 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
spin_unlock(&hd->arch.mapping_lock);
amd_iommu_flush_pages(d, dfn_x(dfn), 0);
+ return 0;
+}
+
+static unsigned long flush_count(unsigned long dfn, unsigned int page_count,
+ unsigned int order)
+{
+ unsigned long start = dfn >> order;
+ unsigned long end = ((dfn + page_count - 1) >> order) + 1;
+
+ ASSERT(end > start);
+ return end - start;
+}
+
+int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
+ unsigned int page_count)
+{
+ unsigned long dfn_l = dfn_x(dfn);
+
+ ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+
+ /* If the range wraps then just flush everything */
+ if ( dfn_l + page_count < dfn_l )
+ {
+ amd_iommu_flush_all_pages(d);
+ return 0;
+ }
+
+ /*
+ * Flushes are expensive so find the minimal single flush that will
+ * cover the page range.
+ *
+ * NOTE: It is unnecessary to round down the DFN value to align with
+ * the flush order here. This is done by the internals of the
+ * flush code.
+ */
+ if ( page_count == 1 ) /* order 0 flush count */
+ amd_iommu_flush_pages(d, dfn_l, 0);
+ else if ( flush_count(dfn_l, page_count, 9) == 1 )
+ amd_iommu_flush_pages(d, dfn_l, 9);
+ else if ( flush_count(dfn_l, page_count, 18) == 1 )
+ amd_iommu_flush_pages(d, dfn_l, 18);
+ else
+ amd_iommu_flush_all_pages(d);
+
+ return 0;
+}
+
+int amd_iommu_flush_iotlb_all(struct domain *d)
+{
+ amd_iommu_flush_all_pages(d);
return 0;
}
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 900136390d..33a3798f36 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -579,6 +579,8 @@ static const struct iommu_ops __initconstrel amd_iommu_ops
= {
.teardown = amd_iommu_domain_destroy,
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
+ .iotlb_flush = amd_iommu_flush_iotlb_pages,
+ .iotlb_flush_all = amd_iommu_flush_iotlb_all,
.free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index ac62d7f52a..c1cce08551 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -414,9 +414,13 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn,
unsigned int page_count)
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush
)
+ if ( !iommu_enabled || !hd->platform_ops ||
+ !hd->platform_ops->iotlb_flush || !page_count )
return 0;
+ if ( dfn_eq(dfn, INVALID_DFN) )
+ return -EINVAL;
+
rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
if ( unlikely(rc) )
{
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 1601278b07..d2fa5e2b25 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -635,6 +635,8 @@ static int __must_check iommu_flush_iotlb_pages(struct
domain *d,
dfn_t dfn,
unsigned int page_count)
{
+ ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+
return iommu_flush_iotlb(d, dfn, 1, page_count);
}
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 718a44f956..88715329ca 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -60,6 +60,9 @@ int __must_check amd_iommu_alloc_root(struct domain_iommu
*hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
paddr_t phys_addr, unsigned long size,
int iw, int ir);
+int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
+ unsigned int page_count);
+int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
/* Share p2m table with iommu */
void amd_iommu_share_p2m(struct domain *d);
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |