[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/7] amd-iommu: reduce code duplication...



...by calling update_paging_mode() inside iommu_pde_from_dfn().

Also have iommu_pde_from_dfn() return -EFAULT if pt_mfn[1] is zero, to avoid
the callers having to explictly test it.

NOTE: The return value of iommu_pde_from_dfn() is ignored by
      amd_iommu_map_page(). Instead, to preserve the existing return
      semantics, -EFAULT is returned regardless of the actual error.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c | 276 ++++++++++++++++----------------
 1 file changed, 138 insertions(+), 138 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index d7df8b9161..a186c8d28b 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -431,6 +431,102 @@ static int iommu_merge_pages(struct domain *d, unsigned 
long pt_mfn,
     return 0;
 }
 
+static int update_paging_mode(struct domain *d, unsigned long dfn)
+{
+    uint16_t bdf;
+    void *device_entry;
+    unsigned int req_id, level, offset;
+    unsigned long flags;
+    struct pci_dev *pdev;
+    struct amd_iommu *iommu = NULL;
+    struct page_info *new_root = NULL;
+    struct page_info *old_root = NULL;
+    void *new_root_vaddr;
+    unsigned long old_root_mfn;
+    struct domain_iommu *hd = dom_iommu(d);
+
+    ASSERT(dfn != dfn_x(INVALID_DFN));
+    ASSERT(!(dfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+
+    level = hd->arch.paging_mode;
+    old_root = hd->arch.root_table;
+    offset = dfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
+
+    ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
+
+    while ( offset >= PTE_PER_TABLE_SIZE )
+    {
+        /*
+         * Allocate and install a new root table.
+         * Only upper I/O page table grows, no need to fix next level bits.
+         */
+        new_root = alloc_amd_iommu_pgtable();
+        if ( !new_root )
+        {
+            AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n",
+                            __func__);
+            return -ENOMEM;
+        }
+
+        new_root_vaddr = __map_domain_page(new_root);
+        old_root_mfn = mfn_x(page_to_mfn(old_root));
+        set_iommu_pde_present(new_root_vaddr, old_root_mfn, level,
+                              !!IOMMUF_writable, !!IOMMUF_readable);
+
+        level++;
+        old_root = new_root;
+        offset >>= PTE_PER_TABLE_SHIFT;
+        unmap_domain_page(new_root_vaddr);
+    }
+
+    if ( new_root )
+    {
+        hd->arch.paging_mode = level;
+        hd->arch.root_table = new_root;
+
+        if ( !pcidevs_locked() )
+            AMD_IOMMU_DEBUG("%s Try to access pdev_list "
+                            "without aquiring pcidevs_lock.\n", __func__);
+
+        /*
+         * Update device table entries using new root table and paging
+         * mode.
+         */
+        for_each_pdev ( d, pdev )
+        {
+            bdf = PCI_BDF2(pdev->bus, pdev->devfn);
+            iommu = find_iommu_for_device(pdev->seg, bdf);
+            if ( !iommu )
+            {
+                AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
+                return -ENODEV;
+            }
+
+            spin_lock_irqsave(&iommu->lock, flags);
+            do {
+                req_id = get_dma_requestor_id(pdev->seg, bdf);
+                device_entry = iommu->dev_table.buffer +
+                               (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+
+                /* valid = 0 only works for dom0 passthrough mode */
+                amd_iommu_set_root_page_table(
+                    device_entry, page_to_maddr(hd->arch.root_table),
+                    d->domain_id, hd->arch.paging_mode, 1);
+
+                amd_iommu_flush_device(iommu, req_id);
+                bdf += pdev->phantom_stride;
+            } while ( PCI_DEVFN2(bdf) != pdev->devfn &&
+                      PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
+            spin_unlock_irqrestore(&iommu->lock, flags);
+        }
+
+        /* For safety, invalidate all entries */
+        amd_iommu_flush_all_pages(d);
+    }
+
+    return 0;
+}
+
 /* Walk io page tables and build level page tables if necessary
  * {Re, un}mapping super page frames causes re-allocation of io
  * page tables.
@@ -445,23 +541,37 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
     struct page_info *table;
     const struct domain_iommu *hd = dom_iommu(d);
 
+    /*
+     * Since HVM domain is initialized with 2 level IO page table,
+     * we might need a deeper page table for wider dfn now.
+     */
+    if ( is_hvm_domain(d) )
+    {
+        int rc = update_paging_mode(d, dfn);
+
+        if ( rc )
+        {
+            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
+                            dfn);
+            return rc;
+        }
+    }
+
     table = hd->arch.root_table;
     level = hd->arch.paging_mode;
 
-    BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 || 
+    BUG_ON( !table || level < IOMMU_PAGING_MODE_LEVEL_1 ||
             level > IOMMU_PAGING_MODE_LEVEL_6 );
 
     next_table_mfn = mfn_x(page_to_mfn(table));
 
     if ( level == IOMMU_PAGING_MODE_LEVEL_1 )
-    {
-        pt_mfn[level] = next_table_mfn;
-        return 0;
-    }
+        goto out;
 
     while ( level > IOMMU_PAGING_MODE_LEVEL_1 )
     {
         unsigned int next_level = level - 1;
+
         pt_mfn[level] = next_table_mfn;
 
         next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
@@ -485,11 +595,11 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
 
             /* allocate lower level page table */
             table = alloc_amd_iommu_pgtable();
-            if ( table == NULL )
+            if ( !table )
             {
                 AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
                 unmap_domain_page(next_table_vaddr);
-                return 1;
+                return -EFAULT;
             }
 
             next_table_mfn = mfn_x(page_to_mfn(table));
@@ -510,14 +620,14 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
         /* Install lower level page table for non-present entries */
         else if ( !iommu_is_pte_present(pde) )
         {
-            if ( next_table_mfn == 0 )
+            if ( !next_table_mfn )
             {
                 table = alloc_amd_iommu_pgtable();
                 if ( table == NULL )
                 {
                     AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
                     unmap_domain_page(next_table_vaddr);
-                    return 1;
+                    return -EFAULT;
                 }
                 next_table_mfn = mfn_x(page_to_mfn(table));
                 set_iommu_pde_present(pde, next_table_mfn, next_level,
@@ -526,7 +636,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
             else /* should never reach here */
             {
                 unmap_domain_page(next_table_vaddr);
-                return 1;
+                return -EFAULT;
             }
         }
 
@@ -534,99 +644,17 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
         level--;
     }
 
-    /* mfn of level 1 page table */
-    pt_mfn[level] = next_table_mfn;
-    return 0;
-}
-
-static int update_paging_mode(struct domain *d, unsigned long dfn)
-{
-    uint16_t bdf;
-    void *device_entry;
-    unsigned int req_id, level, offset;
-    unsigned long flags;
-    struct pci_dev *pdev;
-    struct amd_iommu *iommu = NULL;
-    struct page_info *new_root = NULL;
-    struct page_info *old_root = NULL;
-    void *new_root_vaddr;
-    unsigned long old_root_mfn;
-    struct domain_iommu *hd = dom_iommu(d);
-
-    if ( dfn == dfn_x(INVALID_DFN) )
-        return -EADDRNOTAVAIL;
-    ASSERT(!(dfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
-
-    level = hd->arch.paging_mode;
-    old_root = hd->arch.root_table;
-    offset = dfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
-
-    ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
-
-    while ( offset >= PTE_PER_TABLE_SIZE )
+ out:
+    if ( !next_table_mfn )
     {
-        /* Allocate and install a new root table.
-         * Only upper I/O page table grows, no need to fix next level bits */
-        new_root = alloc_amd_iommu_pgtable();
-        if ( new_root == NULL )
-        {
-            AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n",
-                            __func__);
-            return -ENOMEM;
-        }
-
-        new_root_vaddr = __map_domain_page(new_root);
-        old_root_mfn = mfn_x(page_to_mfn(old_root));
-        set_iommu_pde_present(new_root_vaddr, old_root_mfn, level,
-                              !!IOMMUF_writable, !!IOMMUF_readable);
-        level++;
-        old_root = new_root;
-        offset >>= PTE_PER_TABLE_SHIFT;
-        unmap_domain_page(new_root_vaddr);
+        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
+                        dfn);
+        return -EFAULT;
     }
 
-    if ( new_root != NULL )
-    {
-        hd->arch.paging_mode = level;
-        hd->arch.root_table = new_root;
-
-        if ( !pcidevs_locked() )
-            AMD_IOMMU_DEBUG("%s Try to access pdev_list "
-                            "without aquiring pcidevs_lock.\n", __func__);
-
-        /* Update device table entries using new root table and paging mode */
-        for_each_pdev( d, pdev )
-        {
-            bdf = PCI_BDF2(pdev->bus, pdev->devfn);
-            iommu = find_iommu_for_device(pdev->seg, bdf);
-            if ( !iommu )
-            {
-                AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
-                return -ENODEV;
-            }
-
-            spin_lock_irqsave(&iommu->lock, flags);
-            do {
-                req_id = get_dma_requestor_id(pdev->seg, bdf);
-                device_entry = iommu->dev_table.buffer +
-                               (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
-
-                /* valid = 0 only works for dom0 passthrough mode */
-                amd_iommu_set_root_page_table(device_entry,
-                                              
page_to_maddr(hd->arch.root_table),
-                                              d->domain_id,
-                                              hd->arch.paging_mode, 1);
-
-                amd_iommu_flush_device(iommu, req_id);
-                bdf += pdev->phantom_stride;
-            } while ( PCI_DEVFN2(bdf) != pdev->devfn &&
-                      PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
-            spin_unlock_irqrestore(&iommu->lock, flags);
-        }
+    ASSERT(level == IOMMU_PAGING_MODE_LEVEL_1);
+    pt_mfn[level] = next_table_mfn;
 
-        /* For safety, invalidate all entries */
-        amd_iommu_flush_all_pages(d);
-    }
     return 0;
 }
 
@@ -636,13 +664,14 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t 
mfn,
     bool need_flush = false;
     struct domain_iommu *hd = dom_iommu(d);
     int rc;
-    unsigned long pt_mfn[7];
+    unsigned long pt_mfn[7] = {};
     unsigned int merge_level;
 
     if ( iommu_use_hap_pt(d) )
         return 0;
 
-    memset(pt_mfn, 0, sizeof(pt_mfn));
+    if ( dfn_eq(dfn, INVALID_DFN) )
+        return -EFAULT;
 
     spin_lock(&hd->arch.mapping_lock);
 
@@ -655,24 +684,9 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t 
mfn,
         return rc;
     }
 
-    /* Since HVM domain is initialized with 2 level IO page table,
-     * we might need a deeper page table for wider dfn now */
-    if ( is_hvm_domain(d) )
-    {
-        if ( update_paging_mode(d, dfn_x(dfn)) )
-        {
-            spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
-                            dfn_x(dfn));
-            return -EFAULT;
-        }
-    }
-
-    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) )
+    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
-                        dfn_x(dfn));
         return -EFAULT;
     }
 
@@ -721,13 +735,15 @@ out:
 
 int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
 {
-    unsigned long pt_mfn[7];
+    unsigned long pt_mfn[7] = {};
     struct domain_iommu *hd = dom_iommu(d);
+    int rc;
 
     if ( iommu_use_hap_pt(d) )
         return 0;
 
-    memset(pt_mfn, 0, sizeof(pt_mfn));
+    if ( dfn_eq(dfn, INVALID_DFN) )
+        return -EADDRNOTAVAIL;
 
     spin_lock(&hd->arch.mapping_lock);
 
@@ -737,27 +753,11 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
         return 0;
     }
 
-    /* Since HVM domain is initialized with 2 level IO page table,
-     * we might need a deeper page table for lager dfn now */
-    if ( is_hvm_domain(d) )
-    {
-        int rc = update_paging_mode(d, dfn_x(dfn));
-
-        if ( rc )
-        {
-            spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
-                            dfn_x(dfn));
-            return rc;
-        }
-    }
-
-    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) )
+    rc = iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn);
+    if ( rc )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
-                        dfn_x(dfn));
-        return -EFAULT;
+        return rc;
     }
 
     /* mark PTE as 'page not present' */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.