[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 06/14] iommu: track reserved ranges using a rangeset



Ranges that should be considered reserved in the IOMMU are not necessarily
limited to RMRRs. If iommu_inclusive_mapping is set then any frame number
falling within an E820 reserved region should also be considered as
reserved in the IOMMU.
This patch adds a rangeset to the domain_iommu structure that is then used
to track all reserved ranges. This will be needed by a subsequent patch
to test whether it is safe to modify a particular IOMMU entry.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>

v6:
 - Re-base.

v2:
 - New in v2.
---
 xen/drivers/passthrough/iommu.c       | 10 +++++++++-
 xen/drivers/passthrough/vtd/iommu.c   | 20 +++++++++++++-------
 xen/drivers/passthrough/vtd/x86/vtd.c | 15 ++++++++++++++-
 xen/include/xen/iommu.h               |  6 ++++++
 4 files changed, 42 insertions(+), 9 deletions(-)

diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index f75cab130d..9b63e9efee 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -147,6 +147,10 @@ int iommu_domain_init(struct domain *d)
     if ( !iommu_enabled )
         return 0;
 
+    hd->reserved_ranges = rangeset_new(d, NULL, 0);
+    if ( !hd->reserved_ranges )
+        return -ENOMEM;
+
     hd->platform_ops = iommu_get_ops();
     return hd->platform_ops->init(d);
 }
@@ -248,12 +252,16 @@ int iommu_construct(struct domain *d)
 
 void iommu_domain_destroy(struct domain *d)
 {
-    if ( !iommu_enabled || !dom_iommu(d)->platform_ops )
+    const struct domain_iommu *hd = dom_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops )
         return;
 
     iommu_teardown(d);
 
     arch_iommu_domain_destroy(d);
+
+    rangeset_destroy(hd->reserved_ranges);
 }
 
 int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index c9f50f04ad..282e227414 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1910,6 +1910,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
     unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K;
     struct mapped_rmrr *mrmrr;
     struct domain_iommu *hd = dom_iommu(d);
+    int err = 0;
 
     ASSERT(pcidevs_locked());
     ASSERT(rmrr->base_address < rmrr->end_address);
@@ -1923,8 +1924,6 @@ static int rmrr_identity_mapping(struct domain *d, bool_t 
map,
         if ( mrmrr->base == rmrr->base_address &&
              mrmrr->end == rmrr->end_address )
         {
-            int ret = 0;
-
             if ( map )
             {
                 ++mrmrr->count;
@@ -1934,28 +1933,35 @@ static int rmrr_identity_mapping(struct domain *d, 
bool_t map,
             if ( --mrmrr->count )
                 return 0;
 
-            while ( base_pfn < end_pfn )
+            err = rangeset_remove_range(hd->reserved_ranges,
+                                        base_pfn, end_pfn);
+            while ( !err && base_pfn < end_pfn )
             {
                 if ( clear_identity_p2m_entry(d, base_pfn) )
-                    ret = -ENXIO;
+                    err = -ENXIO;
+
                 base_pfn++;
             }
 
             list_del(&mrmrr->list);
             xfree(mrmrr);
-            return ret;
+            return err;
         }
     }
 
     if ( !map )
         return -ENOENT;
 
+    err = rangeset_add_range(hd->reserved_ranges, base_pfn, end_pfn);
+    if ( err )
+        return err;
+
     while ( base_pfn < end_pfn )
     {
-        int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
-
+        err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
         if ( err )
             return err;
+
         base_pfn++;
     }
 
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c 
b/xen/drivers/passthrough/vtd/x86/vtd.c
index fb674cdc68..601d82a16e 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -154,8 +154,21 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
 
         rc = iommu_map_page(d, _bfn(pfn), _mfn(pfn),
                            IOMMUF_readable | IOMMUF_writable);
+
+        /*
+         * The only reason a reserved page would be mapped is that
+         * iommu_inclusive_mapping is set, in which case it needs to be
+         * marked as reserved in the IOMMU.
+         */
+        if ( !rc && page_is_ram_type(pfn, RAM_TYPE_RESERVED) )
+        {
+            ASSERT(iommu_inclusive_mapping);
+
+            rc = rangeset_add_singleton(dom_iommu(d)->reserved_ranges, pfn);
+        }
+
         if ( rc )
-            printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
+            printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping/reservation 
failed: %d\n",
                    d->domain_id, rc);
 
         if (!(i & 0xfffff))
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 6edc6e1a10..8cdb530d49 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -122,6 +122,12 @@ struct domain_iommu {
 
     /* Features supported by the IOMMU */
     DECLARE_BITMAP(features, IOMMU_FEAT_count);
+
+    /*
+     * BFN ranges that are reserved in the domain IOMMU mappings and
+     * must not be modified after initialization.
+     */
+    struct rangeset *reserved_ranges;
 };
 
 #define dom_iommu(d)              (&(d)->iommu)
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.