[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.4] VT-d: don't crash when PTE bits 52 and up are non-zero



commit b4bde3fd8970ae44b88b5dc3f26ed9c1ea9a22da
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Feb 3 12:28:00 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Feb 3 12:28:00 2015 +0100

    VT-d: don't crash when PTE bits 52 and up are non-zero
    
    This can (and will) be legitimately the case when sharing page tables
    with EPT (more of a problem before p2m_access_rwx became zero, but
    still possible even now when other than that is the default for a
    guest), leading to an unconditional crash (in print_vtd_entries())
    when a DMA remapping fault occurs.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
    master commit: 46e0baf59105200d43612cf0c59de216958b008d
    master date: 2015-01-07 11:13:58 +0100
---
 xen/drivers/passthrough/vtd/iommu.c |   26 ++++++++------------------
 xen/drivers/passthrough/vtd/iommu.h |    2 +-
 xen/drivers/passthrough/vtd/utils.c |    6 +++---
 3 files changed, 12 insertions(+), 22 deletions(-)

diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index be346b2..81ea72f 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -259,8 +259,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, 
u64 addr, int alloc)
     struct dma_pte *parent, *pte = NULL;
     int level = agaw_to_level(hd->agaw);
     int offset;
-    u64 pte_maddr = 0, maddr;
-    u64 *vaddr = NULL;
+    u64 pte_maddr = 0;
 
     addr &= (((u64)1) << addr_width) - 1;
     ASSERT(spin_is_locked(&hd->mapping_lock));
@@ -282,19 +281,19 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, 
u64 addr, int alloc)
         offset = address_level_offset(addr, level);
         pte = &parent[offset];
 
-        if ( dma_pte_addr(*pte) == 0 )
+        pte_maddr = dma_pte_addr(*pte);
+        if ( !pte_maddr )
         {
             if ( !alloc )
                 break;
 
             pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
             drhd = acpi_find_matched_drhd_unit(pdev);
-            maddr = alloc_pgtable_maddr(drhd, 1);
-            if ( !maddr )
+            pte_maddr = alloc_pgtable_maddr(drhd, 1);
+            if ( !pte_maddr )
                 break;
 
-            dma_set_pte_addr(*pte, maddr);
-            vaddr = map_vtd_domain_page(maddr);
+            dma_set_pte_addr(*pte, pte_maddr);
 
             /*
              * high level table always sets r/w, last level
@@ -304,21 +303,12 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, 
u64 addr, int alloc)
             dma_set_pte_writable(*pte);
             iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
         }
-        else
-        {
-            vaddr = map_vtd_domain_page(pte->val);
-        }
 
         if ( level == 2 )
-        {
-            pte_maddr = pte->val & PAGE_MASK_4K;
-            unmap_vtd_domain_page(vaddr);
             break;
-        }
 
         unmap_vtd_domain_page(parent);
-        parent = (struct dma_pte *)vaddr;
-        vaddr = NULL;
+        parent = map_vtd_domain_page(pte_maddr);
         level--;
     }
 
@@ -2447,7 +2437,7 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, 
int level, paddr_t gpa,
             printk("%*sgfn: %08lx mfn: %08lx\n",
                    indent, "",
                    (unsigned long)(address >> PAGE_SHIFT_4K),
-                   (unsigned long)(pte->val >> PAGE_SHIFT_4K));
+                   (unsigned long)(dma_pte_addr(*pte) >> PAGE_SHIFT_4K));
     }
 
     unmap_vtd_domain_page(pt_vaddr);
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index 1a1f20a..a90f81a 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -276,7 +276,7 @@ struct dma_pte {
 #define dma_set_pte_snp(p)  do {(p).val |= DMA_PTE_SNP;} while(0)
 #define dma_set_pte_prot(p, prot) \
             do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
-#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
+#define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
 #define dma_set_pte_addr(p, addr) do {\
             (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
 #define dma_pte_present(p) (((p).val & 3) != 0)
diff --git a/xen/drivers/passthrough/vtd/utils.c 
b/xen/drivers/passthrough/vtd/utils.c
index a33564b..db4c326 100644
--- a/xen/drivers/passthrough/vtd/utils.c
+++ b/xen/drivers/passthrough/vtd/utils.c
@@ -170,16 +170,16 @@ void print_vtd_entries(struct iommu *iommu, int bus, int 
devfn, u64 gmfn)
         l_index = get_level_index(gmfn, level);
         printk("    l%d_index = %x\n", level, l_index);
 
-        pte.val = val = l[l_index];
+        pte.val = l[l_index];
         unmap_vtd_domain_page(l);
-        printk("    l%d[%x] = %"PRIx64"\n", level, l_index, val);
+        printk("    l%d[%x] = %"PRIx64"\n", level, l_index, pte.val);
 
-        pte.val = val;
         if ( !dma_pte_present(pte) )
         {
             printk("    l%d[%x] not present\n", level, l_index);
             break;
         }
+        val = dma_pte_addr(pte);
     } while ( --level );
 }
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.4

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.