[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] vt-d: Fix a bug in addr_to_dma_page_maddr(), and add timeout in infinite loop



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1208856005 -3600
# Node ID da261c25f160ae3cddea1a4e5d1b045cceb7e03b
# Parent  65802c51edb5577faae351194b8b6b495cebfaa8
vt-d: Fix a bug in addr_to_dma_page_maddr(), and add timeout in infinite loop

In addr_to_dma_page_maddr(), pte should not be used after
unmap_vtd_domain_page(parent). In addition, timeout is added in some
infinite loops.

Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c |   34 +++++++++++++++++++++-------------
 1 files changed, 21 insertions(+), 13 deletions(-)

diff -r 65802c51edb5 -r da261c25f160 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Tue Apr 22 10:18:13 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Tue Apr 22 10:20:05 2008 +0100
@@ -229,11 +229,7 @@ static u64 addr_to_dma_page_maddr(struct
             dma_set_pte_addr(*pte, maddr);
             vaddr = map_vtd_domain_page(maddr);
             if ( !vaddr )
-            {
-                unmap_vtd_domain_page(parent);
-                spin_unlock_irqrestore(&hd->mapping_lock, flags);
-                return 0;
-            }
+                break;
 
             /*
              * high level table always sets r/w, last level
@@ -247,14 +243,9 @@ static u64 addr_to_dma_page_maddr(struct
         {
             vaddr = map_vtd_domain_page(pte->val);
             if ( !vaddr )
-            {
-                unmap_vtd_domain_page(parent);
-                spin_unlock_irqrestore(&hd->mapping_lock, flags);
-                return 0;
-            }
+                break;
         }
 
-        unmap_vtd_domain_page(parent);
         if ( level == 2 )
         {
             pte_maddr = pte->val & PAGE_MASK_4K;
@@ -262,11 +253,13 @@ static u64 addr_to_dma_page_maddr(struct
             break;
         }
 
+        unmap_vtd_domain_page(parent);
         parent = (struct dma_pte *)vaddr;
         vaddr = NULL;
         level--;
     }
 
+    unmap_vtd_domain_page(parent);
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
     return pte_maddr;
 }
@@ -641,7 +634,7 @@ void dma_pte_free_pagetable(struct domai
     struct dma_pte *page, *pte;
     int total = agaw_to_level(hd->agaw);
     int level;
-    u32 tmp;
+    u64 tmp;
     u64 pg_maddr;
 
     drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
@@ -662,7 +655,10 @@ void dma_pte_free_pagetable(struct domai
         {
             pg_maddr = dma_addr_level_page_maddr(domain, tmp, level);
             if ( pg_maddr == 0 )
-                return;
+            {
+                tmp += level_size(level);
+                continue;
+            }
             page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
             pte = page + address_level_offset(tmp, level);
             dma_clear_pte(*pte);
@@ -688,6 +684,7 @@ static int iommu_set_root_entry(struct i
 {
     u32 cmd, sts;
     unsigned long flags;
+    s_time_t start_time;
 
     if ( iommu == NULL )
     {
@@ -713,11 +710,14 @@ static int iommu_set_root_entry(struct i
     dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd);
 
     /* Make sure hardware complete it */
+    start_time = NOW();
     for ( ; ; )
     {
         sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
         if ( sts & DMA_GSTS_RTPS )
             break;
+        if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
+            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
         cpu_relax();
     }
 
@@ -730,6 +730,7 @@ static int iommu_enable_translation(stru
 {
     u32 sts;
     unsigned long flags;
+    s_time_t start_time;
 
     dprintk(XENLOG_INFO VTDPREFIX,
             "iommu_enable_translation: iommu->reg = %p\n", iommu->reg);
@@ -737,11 +738,14 @@ static int iommu_enable_translation(stru
     iommu->gcmd |= DMA_GCMD_TE;
     dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
     /* Make sure hardware complete it */
+    start_time = NOW();
     for ( ; ; )
     {
         sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
         if ( sts & DMA_GSTS_TES )
             break;
+        if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
+            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
         cpu_relax();
     }
 
@@ -755,17 +759,21 @@ int iommu_disable_translation(struct iom
 {
     u32 sts;
     unsigned long flags;
+    s_time_t start_time;
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     iommu->gcmd &= ~ DMA_GCMD_TE;
     dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
 
     /* Make sure hardware complete it */
+    start_time = NOW();
     for ( ; ; )
     {
         sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
         if ( !(sts & DMA_GSTS_TES) )
             break;
+        if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
+            panic("DMAR hardware is malfunctional, please disable IOMMU\n");
         cpu_relax();
     }
     spin_unlock_irqrestore(&iommu->register_lock, flags);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.