[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] AMD IOMMU: Destroy passthru guests when IO pagetable allocation fails



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1250696098 -3600
# Node ID 35c0b153982f4355ab9cfa5467f286900ebb16e8
# Parent  4af92eee8be497964912cda863c956691b3a3f89
AMD IOMMU: Destroy passthru guests when IO pagetable allocation fails

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
Acked-by: Wei Huang <wei.huang2@xxxxxxx>
xen-unstable changeset:   20062:64da4ed2be34
xen-unstable date:        Fri Aug 14 17:07:23 2009 +0100
---
 xen/drivers/passthrough/amd/iommu_map.c       |    7 +++++++
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    4 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)

diff -r 4af92eee8be4 -r 35c0b153982f xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Wed Aug 19 16:33:54 2009 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Wed Aug 19 16:34:58 2009 +0100
@@ -426,7 +426,10 @@ static u64 iommu_l2e_from_pfn(struct pag
             {
                 table = alloc_amd_iommu_pgtable();
                 if ( table == NULL )
+                {
+                    printk("AMD-Vi: Cannot allocate I/O page table\n");
                     return 0;
+                }
                 next_table_maddr = page_to_maddr(table);
                 amd_iommu_set_page_directory_entry(
                     (u32 *)pde, next_table_maddr, level - 1);
@@ -462,6 +465,7 @@ int amd_iommu_map_page(struct domain *d,
     {
         spin_unlock(&hd->mapping_lock);
         amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
+        domain_crash(d);
         return -EFAULT;
     }
     set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
@@ -494,6 +498,7 @@ int amd_iommu_unmap_page(struct domain *
     {
         spin_unlock(&hd->mapping_lock);
         amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
+        domain_crash(d);
         return -EFAULT;
     }
 
@@ -535,6 +540,7 @@ int amd_iommu_reserve_domain_unity_map(
             spin_unlock(&hd->mapping_lock);
             amd_iov_error("Invalid IO pagetable entry phys_addr = %lx\n",
                           phys_addr);
+            domain_crash(domain);
             return -EFAULT;
         }
 
@@ -583,6 +589,7 @@ int amd_iommu_sync_p2m(struct domain *d)
             spin_unlock(&d->page_alloc_lock);
             spin_unlock(&hd->mapping_lock);
             amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
+            domain_crash(d);
             return -EFAULT;
         }
 
diff -r 4af92eee8be4 -r 35c0b153982f 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Wed Aug 19 16:33:54 
2009 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Wed Aug 19 16:34:58 
2009 +0100
@@ -123,8 +123,10 @@ static inline struct page_info* alloc_am
     void *vaddr;
 
     pg = alloc_domheap_page(NULL, 0);
+    if ( pg == NULL )
+        return 0;
     vaddr = map_domain_page(page_to_mfn(pg));
-    if ( !vaddr )
+    if ( vaddr == NULL )
         return 0;
     memset(vaddr, 0, PAGE_SIZE);
     unmap_domain_page(vaddr);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.