[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v7 01/14] iommu: add preemption support to iommu_{un,}map()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Tue, 5 Jul 2022 14:43:19 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=2kE9tF4OKVANrzWwJZfuQmt3GkZdJOPqWNjkumzgidE=; b=d3o3+cS+3VPOBjMM1d7m/NxT8OYuKC3u9lPtmdqA6NIXD7WwIJFRt4HHBkrz5UopNrhp982exh57LL2dz10d+oheM3Y9P9XOywTfUadZu9s3D23ouhVkfp/+pjZhSxjf67QbcVlIVMv8LKoIwEJxVmPcHuGMaayn4/pz1cqwoNGHGDSOX377rGGg0dFeS7qjoclVMknaVT0JBYrrfnZGR5yIRuVCl1FZAxEsR7x957ga+iYO4e50hD7xI8JKdTDTlEyaFgnxsGmlpI7dq7zf46tHVflK3mrKx/4Z8Mc8XyOOyecRcxjBcfdHBIKQGO85NzOtdBT6lHSzztIGbTx4gQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=CENuYn1a5CC1/0q4tYgDbKb06/+uiip/Y6wq2RvaO2/k9cBa6wv5MX7NF4TebW01LZqLqpicjY1qGo9oBTPUs+hb5dMV5vCeVKVYgfuw81ZEQx5dbYdE8Q8UNrkiU8JynwvvtrpHrDSv/QzPHTEQuh8x6be6rPE3FDDZi3dEU/LKTgfhj21dnmlrI0R0yaFP/Oq8WbLGjgdkVWFG7+XfpZMnROC9x0CHO4euE9isac4JXsUbnafqXM1r1Te2rpA+p6qmDXssBJ7LM81Eg3xY57hMwOO5QYn8w6eN16/vACpQp1Fa1n885M5/tJd/+0F4K4vVhoNbOjZyqC+GoYXbZQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Paul Durrant <paul@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Tue, 05 Jul 2022 12:43:26 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

From: Roger Pau Monné <roger.pau@xxxxxxxxxx>

The loop in iommu_{,un}map() can be arbitrary large, and as such it
needs to handle preemption.  Introduce a new flag that signals whether
the function should do preemption checks, returning the number of pages
that have been processed in case a need for preemption was actually
found.

Note that the cleanup done in iommu_map() can now be incomplete if
preemption has happened, and hence callers would need to take care of
unmapping the whole range (ie: ranges already mapped by previously
preempted calls).  So far none of the callers care about having those
ranges unmapped, so error handling in arch_iommu_hwdom_init() can be
kept as-is.

Note that iommu_legacy_{un,}map() are left without preemption handling:
callers of those interfaces aren't going to modified to pass bigger
chunks, and hence the functions won't be modified as they are legacy and
uses should be replaced with iommu_{un,}map() instead if preemption is
required.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v7: Integrate into series, with quite a few adjustments (beyond mere re-
    basing).

--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -308,13 +308,13 @@ static unsigned int mapping_order(const
     return order;
 }
 
-int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
-              unsigned long page_count, unsigned int flags,
-              unsigned int *flush_flags)
+long iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
+               unsigned long page_count, unsigned int flags,
+               unsigned int *flush_flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
     unsigned long i;
-    unsigned int order;
+    unsigned int order, j = 0;
     int rc = 0;
 
     if ( !is_iommu_enabled(d) )
@@ -329,6 +329,11 @@ int iommu_map(struct domain *d, dfn_t df
 
         order = mapping_order(hd, dfn, mfn, page_count - i);
 
+        if ( (flags & IOMMUF_preempt) &&
+             ((!(++j & 0xfff) && general_preempt_check()) ||
+              i > LONG_MAX - (1UL << order)) )
+            return i;
+
         rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
                         flags | IOMMUF_order(order), flush_flags);
 
@@ -341,7 +346,7 @@ int iommu_map(struct domain *d, dfn_t df
                    d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
 
         /* while statement to satisfy __must_check */
-        while ( iommu_unmap(d, dfn0, i, flush_flags) )
+        while ( iommu_unmap(d, dfn0, i, 0, flush_flags) )
             break;
 
         if ( !is_hardware_domain(d) )
@@ -365,7 +370,10 @@ int iommu_legacy_map(struct domain *d, d
                      unsigned long page_count, unsigned int flags)
 {
     unsigned int flush_flags = 0;
-    int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
+    int rc;
+
+    ASSERT(!(flags & IOMMUF_preempt));
+    rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
         rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
@@ -373,25 +381,33 @@ int iommu_legacy_map(struct domain *d, d
     return rc;
 }
 
-int iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
-                unsigned int *flush_flags)
+long iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
+                 unsigned int flags, unsigned int *flush_flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
     unsigned long i;
-    unsigned int order;
+    unsigned int order, j = 0;
     int rc = 0;
 
     if ( !is_iommu_enabled(d) )
         return 0;
 
+    ASSERT(!(flags & ~IOMMUF_preempt));
+
     for ( i = 0; i < page_count; i += 1UL << order )
     {
         dfn_t dfn = dfn_add(dfn0, i);
         int err;
 
         order = mapping_order(hd, dfn, _mfn(0), page_count - i);
+
+        if ( (flags & IOMMUF_preempt) &&
+             ((!(++j & 0xfff) && general_preempt_check()) ||
+              i > LONG_MAX - (1UL << order)) )
+            return i;
+
         err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
-                         order, flush_flags);
+                         flags | IOMMUF_order(order), flush_flags);
 
         if ( likely(!err) )
             continue;
@@ -425,7 +441,7 @@ int iommu_unmap(struct domain *d, dfn_t
 int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned long page_count)
 {
     unsigned int flush_flags = 0;
-    int rc = iommu_unmap(d, dfn, page_count, &flush_flags);
+    int rc = iommu_unmap(d, dfn, page_count, 0, &flush_flags);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
         rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -124,14 +124,15 @@ void arch_iommu_check_autotranslated_hwd
 void arch_iommu_hwdom_init(struct domain *d);
 
 /*
- * The following flags are passed to map operations and passed by lookup
- * operations.
+ * The following flags are passed to map (applicable ones also to unmap)
+ * operations, while some are passed back by lookup operations.
  */
 #define IOMMUF_order(n)  ((n) & 0x3f)
 #define _IOMMUF_readable 6
 #define IOMMUF_readable  (1u<<_IOMMUF_readable)
 #define _IOMMUF_writable 7
 #define IOMMUF_writable  (1u<<_IOMMUF_writable)
+#define IOMMUF_preempt   (1u << 8)
 
 /*
  * flush_flags:
@@ -153,12 +154,18 @@ enum
 #define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
 #define IOMMU_FLUSHF_all (1u << _IOMMU_FLUSHF_all)
 
-int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
-                           unsigned long page_count, unsigned int flags,
-                           unsigned int *flush_flags);
-int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
-                             unsigned long page_count,
-                             unsigned int *flush_flags);
+/*
+ * For both of these: Negative return values are error indicators. Zero
+ * indicates full successful completion of the request, while positive
+ * values indicate partial completion, which is possible only with
+ * IOMMUF_preempt passed in.
+ */
+long __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+                            unsigned long page_count, unsigned int flags,
+                            unsigned int *flush_flags);
+long __must_check iommu_unmap(struct domain *d, dfn_t dfn,
+                              unsigned long page_count, unsigned int flags,
+                              unsigned int *flush_flags);
 
 int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
                                   unsigned long page_count,




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.