[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] AMD/IOMMU: correct shattering of super pages



commit 2a758376f9e2bd277b6067952517a301da87dc86
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Oct 26 14:38:35 2020 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Oct 26 14:38:35 2020 +0100

    AMD/IOMMU: correct shattering of super pages
    
    Fill the new page table _before_ installing into a live page table
    hierarchy, as installing a blank page first risks I/O faults on
    sub-ranges of the original super page which aren't part of the range
    for which mappings are being updated.
    
    While at it also do away with mapping and unmapping the same fresh
    intermediate page table page once per entry to be written.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c | 50 +++++++++++++++++++--------------
 1 file changed, 29 insertions(+), 21 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 17418d39b3..f773ab33fd 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -81,19 +81,34 @@ static unsigned int set_iommu_pde_present(union 
amd_iommu_pte *pte,
     return flush_flags;
 }
 
-static unsigned int set_iommu_pte_present(unsigned long pt_mfn,
-                                          unsigned long dfn,
-                                          unsigned long next_mfn,
-                                          int pde_level,
-                                          bool iw, bool ir)
+static unsigned int set_iommu_ptes_present(unsigned long pt_mfn,
+                                           unsigned long dfn,
+                                           unsigned long next_mfn,
+                                           unsigned int nr_ptes,
+                                           unsigned int pde_level,
+                                           bool iw, bool ir)
 {
     union amd_iommu_pte *table, *pde;
-    unsigned int flush_flags;
+    unsigned int page_sz, flush_flags = 0;
 
     table = map_domain_page(_mfn(pt_mfn));
     pde = &table[pfn_to_pde_idx(dfn, pde_level)];
+    page_sz = 1U << (PTE_PER_TABLE_SHIFT * (pde_level - 1));
+
+    if ( (void *)(pde + nr_ptes) > (void *)table + PAGE_SIZE )
+    {
+        ASSERT_UNREACHABLE();
+        return 0;
+    }
+
+    while ( nr_ptes-- )
+    {
+        flush_flags |= set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
+
+        ++pde;
+        next_mfn += page_sz;
+    }
 
-    flush_flags = set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
     unmap_domain_page(table);
 
     return flush_flags;
@@ -220,11 +235,8 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
         /* Split super page frame into smaller pieces.*/
         if ( pde->pr && !pde->next_level && next_table_mfn )
         {
-            int i;
             unsigned long mfn, pfn;
-            unsigned int page_sz;
 
-            page_sz = 1 << (PTE_PER_TABLE_SHIFT * (next_level - 1));
             pfn =  dfn & ~((1 << (PTE_PER_TABLE_SHIFT * next_level)) - 1);
             mfn = next_table_mfn;
 
@@ -238,17 +250,13 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
             }
 
             next_table_mfn = mfn_x(page_to_mfn(table));
+
+            set_iommu_ptes_present(next_table_mfn, pfn, mfn, 
PTE_PER_TABLE_SIZE,
+                                   next_level, true, true);
+            smp_wmb();
             set_iommu_pde_present(pde, next_table_mfn, next_level, true,
                                   true);
 
-            for ( i = 0; i < PTE_PER_TABLE_SIZE; i++ )
-            {
-                set_iommu_pte_present(next_table_mfn, pfn, mfn, next_level,
-                                      true, true);
-                mfn += page_sz;
-                pfn += page_sz;
-             }
-
             amd_iommu_flush_all_pages(d);
         }
 
@@ -318,9 +326,9 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t 
mfn,
     }
 
     /* Install 4k mapping */
-    *flush_flags |= set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn),
-                                          1, (flags & IOMMUF_writable),
-                                          (flags & IOMMUF_readable));
+    *flush_flags |= set_iommu_ptes_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn),
+                                           1, 1, (flags & IOMMUF_writable),
+                                           (flags & IOMMUF_readable));
 
     spin_unlock(&hd->arch.mapping_lock);
 
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.