[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 03/10] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping



When IOMMU mapping is failed, we issue a best effort rollback, stopping
IOMMU mapping, unmapping the previous IOMMU maps and then reporting the
error up to the call trees. When rollback is not feasible (in early
initialization phase or trade-off of complexity) for the hardware domain,
we do things on a best effort basis, only throwing out an error message.

IOMMU unmapping should perhaps continue despite an error, in an attempt
to do best effort cleanup.

Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>

CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c               | 13 ++++++++-----
 xen/arch/x86/mm/p2m-ept.c       | 27 +++++++++++++++++++++++++--
 xen/arch/x86/mm/p2m-pt.c        | 24 ++++++++++++++++++++----
 xen/arch/x86/mm/p2m.c           | 11 +++++++++--
 xen/drivers/passthrough/iommu.c | 14 +++++++++++++-
 5 files changed, 75 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a42097f..427097d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2467,7 +2467,7 @@ static int __get_page_type(struct page_info *page, 
unsigned long type,
                            int preemptible)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
-    int rc = 0;
+    int rc = 0, ret = 0;
 
     ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
 
@@ -2578,11 +2578,11 @@ static int __get_page_type(struct page_info *page, 
unsigned long type,
         if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
         {
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+                ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
             else if ( type == PGT_writable_page )
-                iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
-                               page_to_mfn(page),
-                               IOMMUF_readable|IOMMUF_writable);
+                ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+                                     page_to_mfn(page),
+                                     IOMMUF_readable|IOMMUF_writable);
         }
     }
 
@@ -2599,6 +2599,9 @@ static int __get_page_type(struct page_info *page, 
unsigned long type,
     if ( (x & PGT_partial) && !(nx & PGT_partial) )
         put_page(page);
 
+    if ( !rc )
+        rc = ret;
+
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 1ed5b47..df87944 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -821,6 +821,8 @@ out:
     if ( needs_sync )
         ept_sync_domain(p2m);
 
+    ret = 0;
+
     /* For host p2m, may need to change VT-d page table.*/
     if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) &&
          need_modify_vtd_table )
@@ -831,11 +833,29 @@ out:
         {
             if ( iommu_flags )
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+                {
+                    rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, 
iommu_flags);
+
+                    if ( !ret && unlikely(rc) )
+                    {
+                        while ( i-- )
+                            iommu_unmap_page(d, gfn + i);
+
+                        ret = rc;
+                        break;
+                    }
+                }
             else
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_unmap_page(d, gfn + i);
+                {
+                    rc = iommu_unmap_page(d, gfn + i);
+
+                    if ( !ret && unlikely(rc) )
+                        ret = rc;
+                }
         }
+
+        rc = 0;
     }
 
     unmap_domain_page(table);
@@ -850,6 +870,9 @@ out:
     if ( rc == 0 && p2m_is_hostp2m(p2m) )
         p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
 
+    if ( !rc )
+        rc = ret;
+
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3d80612..9f5539e 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -498,7 +498,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
     l1_pgentry_t intermediate_entry = l1e_empty();
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
-    int rc;
+    int rc, ret;
     unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
     /*
      * old_mfn and iommu_old_flags control possible flush/update needs on the
@@ -680,11 +680,27 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long 
gfn, mfn_t mfn,
         }
         else if ( iommu_pte_flags )
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
-                               iommu_pte_flags);
+            {
+                ret = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+                                    iommu_pte_flags);
+
+                if ( !rc && unlikely(ret) )
+                {
+                    while ( i-- )
+                        iommu_unmap_page(p2m->domain, gfn + i);
+
+                    rc = ret;
+                    break;
+                }
+            }
         else
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_unmap_page(p2m->domain, gfn + i);
+            {
+                ret = iommu_unmap_page(p2m->domain, gfn + i);
+
+                if ( !rc && unlikely(ret) )
+                    rc = ret;
+            }
     }
 
     /*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6eef2f3..6a9bba1 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -636,13 +636,20 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn, unsigned long mfn,
     mfn_t mfn_return;
     p2m_type_t t;
     p2m_access_t a;
+    int rc = 0, ret;
 
     if ( !paging_mode_translate(p2m->domain) )
     {
         if ( need_iommu(p2m->domain) )
             for ( i = 0; i < (1 << page_order); i++ )
-                iommu_unmap_page(p2m->domain, mfn + i);
-        return 0;
+            {
+               ret = iommu_unmap_page(p2m->domain, mfn + i);
+
+               if ( !rc && unlikely(ret) )
+                   rc = ret;
+            }
+
+        return rc;
     }
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index a0003ac..d74433d 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -172,6 +172,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
     {
         struct page_info *page;
         unsigned int i = 0;
+        int ret, rc = 0;
+
         page_list_for_each ( page, &d->page_list )
         {
             unsigned long mfn = page_to_mfn(page);
@@ -182,10 +184,20 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
                  ((page->u.inuse.type_info & PGT_type_mask)
                   == PGT_writable_page) )
                 mapping |= IOMMUF_writable;
-            hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+            ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+            if ( unlikely(ret) )
+                rc = ret;
+
             if ( !(i++ & 0xfffff) )
                 process_pending_softirqs();
         }
+
+        if ( rc )
+            printk(XENLOG_WARNING
+                   "iommu_hwdom_init: IOMMU mapping failed for dom%d.",
+                   d->domain_id);
     }
 
     return hd->platform_ops->hwdom_init(d);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.