[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] IOMMU/x86: avoid pages without GFN in page table creation/updating



commit 7349efc695852c70b8c5e24d9ecf208757e8b471
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed May 13 16:35:54 2015 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed May 13 16:35:54 2015 +0200

    IOMMU/x86: avoid pages without GFN in page table creation/updating
    
    Handing INVALID_GFN to functions like hd->platform_ops->map_page()
    just can't do any good, and the ioreq server code results in such pages
    being on the list of ones owned by a guest.
    
    While - as suggested by Tim - we should use get_gfn()/put_gfn() there
    to eliminate races, we really can't due to holding the domain's page
    alloc lock. Ultimately arch_iommu_populate_page_table() may need to be
    switched to be GFN based. Here is what Tim said in this regard:
    "Ideally this loop would be iterating over all gfns in the p2m rather
     than over all owned MFNs.  As long as needs_iommu gets set first,
     such a loop could safely be paused and restarted without worrying
     about concurrent updates.  The code sould even stay in this file,
     though exposing an iterator from the p2m code would be a lot more
     efficient."
    
    Original by Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, using further
    suggestions from Tim Deegan <tim@xxxxxxx>.
    
    Reported-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Tested-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>
    Acked-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c      |   13 ++++++++++---
 xen/drivers/passthrough/vtd/iommu.h          |    1 -
 xen/drivers/passthrough/x86/iommu.c          |   15 +++++++++++----
 xen/include/asm-x86/hvm/iommu.h              |    2 ++
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h |    2 --
 5 files changed, 23 insertions(+), 10 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 495ff5c..64c5225 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -557,6 +557,10 @@ static int update_paging_mode(struct domain *d, unsigned 
long gfn)
     unsigned long old_root_mfn;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
+    if ( gfn == INVALID_MFN )
+        return -EADDRNOTAVAIL;
+    ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+
     level = hd->arch.paging_mode;
     old_root = hd->arch.root_table;
     offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
@@ -729,12 +733,15 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
gfn)
      * we might need a deeper page table for lager gfn now */
     if ( is_hvm_domain(d) )
     {
-        if ( update_paging_mode(d, gfn) )
+        int rc = update_paging_mode(d, gfn);
+
+        if ( rc )
         {
             spin_unlock(&hd->arch.mapping_lock);
             AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
-            domain_crash(d);
-            return -EFAULT;
+            if ( rc != -EADDRNOTAVAIL )
+                domain_crash(d);
+            return rc;
         }
     }
 
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index d6e6520..80f8830 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -482,7 +482,6 @@ struct qinval_entry {
 #define VTD_PAGE_TABLE_LEVEL_3  3
 #define VTD_PAGE_TABLE_LEVEL_4  4
 
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 #define MAX_IOMMU_REGS 0xc0
 
 extern struct list_head acpi_drhd_units;
diff --git a/xen/drivers/passthrough/x86/iommu.c 
b/xen/drivers/passthrough/x86/iommu.c
index 9eb8d33..90b78d0 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -59,10 +59,17 @@ int arch_iommu_populate_page_table(struct domain *d)
         if ( has_hvm_container_domain(d) ||
             (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
         {
-            BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
-            rc = hd->platform_ops->map_page(
-                d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page),
-                IOMMUF_readable|IOMMUF_writable);
+            unsigned long mfn = page_to_mfn(page);
+            unsigned long gfn = mfn_to_gmfn(d, mfn);
+
+            if ( gfn != INVALID_MFN )
+            {
+                ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+                BUG_ON(SHARED_M2P(gfn));
+                rc = hd->platform_ops->map_page(d, gfn, mfn,
+                                                IOMMUF_readable |
+                                                IOMMUF_writable);
+            }
             if ( rc )
             {
                 page_list_add(page, &d->page_list);
diff --git a/xen/include/asm-x86/hvm/iommu.h b/xen/include/asm-x86/hvm/iommu.h
index 46cb126..3a4c68a 100644
--- a/xen/include/asm-x86/hvm/iommu.h
+++ b/xen/include/asm-x86/hvm/iommu.h
@@ -46,6 +46,8 @@ struct g2m_ioport {
     unsigned int np;
 };
 
+#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+
 struct arch_hvm_iommu
 {
     u64 pgd_maddr;                 /* io page directory machine address */
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h 
b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
index 2ac6f75..44a2fd9 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
@@ -464,8 +464,6 @@
 #define IOMMU_CONTROL_DISABLED 0
 #define IOMMU_CONTROL_ENABLED  1
 
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH    48
-
 /* interrupt remapping table */
 #define INT_REMAP_ENTRY_REMAPEN_MASK    0x00000001
 #define INT_REMAP_ENTRY_REMAPEN_SHIFT   0
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.