[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] AMD-Vi: allocate root table on demand



commit 8709b10c7f667ee6c3b76a5ef6a469e096f27502
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Mar 20 17:00:34 2017 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Mar 20 17:00:34 2017 +0100

    AMD-Vi: allocate root table on demand
    
    This was my originally intended fix for the AMD side of XSA-207:
    There's no need to unconditionally allocate the root table, and with
    that there's then also no way to leak it when a guest has no devices
    assigned.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c       | 20 ++++++++++---
 xen/drivers/passthrough/amd/pci_amd_iommu.c   | 42 +++++++++++++--------------
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  1 +
 3 files changed, 38 insertions(+), 25 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 67ba145..fd2327d 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -636,11 +636,10 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
gfn, unsigned long mfn,
 {
     bool_t need_flush = 0;
     struct domain_iommu *hd = dom_iommu(d);
+    int rc;
     unsigned long pt_mfn[7];
     unsigned int merge_level;
 
-    BUG_ON( !hd->arch.root_table );
-
     if ( iommu_use_hap_pt(d) )
         return 0;
 
@@ -648,6 +647,15 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
gfn, unsigned long mfn,
 
     spin_lock(&hd->arch.mapping_lock);
 
+    rc = amd_iommu_alloc_root(hd);
+    if ( rc )
+    {
+        spin_unlock(&hd->arch.mapping_lock);
+        AMD_IOMMU_DEBUG("Root table alloc failed, gfn = %lx\n", gfn);
+        domain_crash(d);
+        return rc;
+    }
+
     /* Since HVM domain is initialized with 2 level IO page table,
      * we might need a deeper page table for lager gfn now */
     if ( is_hvm_domain(d) )
@@ -717,8 +725,6 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
gfn)
     unsigned long pt_mfn[7];
     struct domain_iommu *hd = dom_iommu(d);
 
-    BUG_ON( !hd->arch.root_table );
-
     if ( iommu_use_hap_pt(d) )
         return 0;
 
@@ -726,6 +732,12 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
gfn)
 
     spin_lock(&hd->arch.mapping_lock);
 
+    if ( !hd->arch.root_table )
+    {
+        spin_unlock(&hd->arch.mapping_lock);
+        return 0;
+    }
+
     /* Since HVM domain is initialized with 2 level IO page table,
      * we might need a deeper page table for lager gfn now */
     if ( is_hvm_domain(d) )
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index b2428f8..8c25110 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -222,23 +222,29 @@ int __init amd_iov_detect(void)
     return scan_pci_devices();
 }
 
-static int allocate_domain_resources(struct domain_iommu *hd)
+int amd_iommu_alloc_root(struct domain_iommu *hd)
 {
-    /* allocate root table */
-    spin_lock(&hd->arch.mapping_lock);
-    if ( !hd->arch.root_table )
+    if ( unlikely(!hd->arch.root_table) )
     {
         hd->arch.root_table = alloc_amd_iommu_pgtable();
         if ( !hd->arch.root_table )
-        {
-            spin_unlock(&hd->arch.mapping_lock);
             return -ENOMEM;
-        }
     }
-    spin_unlock(&hd->arch.mapping_lock);
+
     return 0;
 }
 
+static int __must_check allocate_domain_resources(struct domain_iommu *hd)
+{
+    int rc;
+
+    spin_lock(&hd->arch.mapping_lock);
+    rc = amd_iommu_alloc_root(hd);
+    spin_unlock(&hd->arch.mapping_lock);
+
+    return rc;
+}
+
 static int get_paging_mode(unsigned long entries)
 {
     int level = 1;
@@ -259,14 +265,6 @@ static int amd_iommu_domain_init(struct domain *d)
 {
     struct domain_iommu *hd = dom_iommu(d);
 
-    /* allocate page directroy */
-    if ( allocate_domain_resources(hd) != 0 )
-    {
-        if ( hd->arch.root_table )
-            free_domheap_page(hd->arch.root_table);
-        return -ENOMEM;
-    }
-
     /* For pv and dom0, stick with get_paging_mode(max_page)
      * For HVM dom0, use 2 level page table at first */
     hd->arch.paging_mode = is_hvm_domain(d) ?
@@ -280,6 +278,9 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain 
*d)
     unsigned long i; 
     const struct amd_iommu *iommu;
 
+    if ( allocate_domain_resources(dom_iommu(d)) )
+        BUG();
+
     if ( !iommu_passthrough && !need_iommu(d) )
     {
         int rc = 0;
@@ -363,7 +364,7 @@ static int reassign_device(struct domain *source, struct 
domain *target,
                            u8 devfn, struct pci_dev *pdev)
 {
     struct amd_iommu *iommu;
-    int bdf;
+    int bdf, rc;
     struct domain_iommu *t = dom_iommu(target);
 
     bdf = PCI_BDF2(pdev->bus, pdev->devfn);
@@ -385,10 +386,9 @@ static int reassign_device(struct domain *source, struct 
domain *target,
         pdev->domain = target;
     }
 
-    /* IO page tables might be destroyed after pci-detach the last device
-     * In this case, we have to re-allocate root table for next pci-attach.*/
-    if ( t->arch.root_table == NULL )
-        allocate_domain_resources(t);
+    rc = allocate_domain_resources(t);
+    if ( rc )
+        return rc;
 
     amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
     AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h 
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 1cf9c3d..99bc21c 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -56,6 +56,7 @@ int __must_check amd_iommu_map_page(struct domain *d, 
unsigned long gfn,
                                     unsigned long mfn, unsigned int flags);
 int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
+int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
                                        u64 phys_addr, unsigned long size,
                                        int iw, int ir);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.