[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] IOMMU/x86: tighten iommu_alloc_pgtable()'s parameter



commit eba09b9dd78f9e8cbaa78ef0edb301b32def2c7a
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Apr 5 14:16:46 2022 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Apr 5 14:16:46 2022 +0200

    IOMMU/x86: tighten iommu_alloc_pgtable()'s parameter
    
    This is to make more obvious that nothing outside of domain_iommu(d)
    actually changes or is otherwise needed by the function.
    
    No functional change intended.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
    Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/include/asm/iommu.h            |  3 ++-
 xen/drivers/passthrough/amd/iommu_map.c     | 10 +++++-----
 xen/drivers/passthrough/amd/pci_amd_iommu.c |  2 +-
 xen/drivers/passthrough/vtd/iommu.c         |  8 ++++----
 xen/drivers/passthrough/x86/iommu.c         |  3 +--
 5 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/include/asm/iommu.h b/xen/arch/x86/include/asm/iommu.h
index e3484ca023..5060f97124 100644
--- a/xen/arch/x86/include/asm/iommu.h
+++ b/xen/arch/x86/include/asm/iommu.h
@@ -141,7 +141,8 @@ static inline void iommu_sync_cache(const void *addr, 
unsigned int size)
 }
 
 int __must_check iommu_free_pgtables(struct domain *d);
-struct page_info *__must_check iommu_alloc_pgtable(struct domain *d);
+struct domain_iommu;
+struct page_info *__must_check iommu_alloc_pgtable(struct domain_iommu *hd);
 
 #endif /* !__ARCH_X86_IOMMU_H__ */
 /*
diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index bf5df5fe5d..70089db76d 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -184,7 +184,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
     unsigned long  next_table_mfn;
     unsigned int level;
     struct page_info *table;
-    const struct domain_iommu *hd = dom_iommu(d);
+    struct domain_iommu *hd = dom_iommu(d);
 
     table = hd->arch.amd.root_table;
     level = hd->arch.amd.paging_mode;
@@ -219,7 +219,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
             mfn = next_table_mfn;
 
             /* allocate lower level page table */
-            table = iommu_alloc_pgtable(d);
+            table = iommu_alloc_pgtable(hd);
             if ( table == NULL )
             {
                 AMD_IOMMU_ERROR("cannot allocate I/O page table\n");
@@ -249,7 +249,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
 
             if ( next_table_mfn == 0 )
             {
-                table = iommu_alloc_pgtable(d);
+                table = iommu_alloc_pgtable(hd);
                 if ( table == NULL )
                 {
                     AMD_IOMMU_ERROR("cannot allocate I/O page table\n");
@@ -555,7 +555,7 @@ int __init cf_check amd_iommu_quarantine_init(struct domain 
*d)
 
     spin_lock(&hd->arch.mapping_lock);
 
-    hd->arch.amd.root_table = iommu_alloc_pgtable(d);
+    hd->arch.amd.root_table = iommu_alloc_pgtable(hd);
     if ( !hd->arch.amd.root_table )
         goto out;
 
@@ -570,7 +570,7 @@ int __init cf_check amd_iommu_quarantine_init(struct domain 
*d)
          * page table pages, and the resulting allocations are always
          * zeroed.
          */
-        pg = iommu_alloc_pgtable(d);
+        pg = iommu_alloc_pgtable(hd);
         if ( !pg )
             break;
 
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 4b59a4efe9..7b21380a46 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -242,7 +242,7 @@ int amd_iommu_alloc_root(struct domain *d)
 
     if ( unlikely(!hd->arch.amd.root_table) )
     {
-        hd->arch.amd.root_table = iommu_alloc_pgtable(d);
+        hd->arch.amd.root_table = iommu_alloc_pgtable(hd);
         if ( !hd->arch.amd.root_table )
             return -ENOMEM;
     }
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 06ba21aad8..0e90089c9b 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -318,7 +318,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, 
u64 addr, int alloc)
     {
         struct page_info *pg;
 
-        if ( !alloc || !(pg = iommu_alloc_pgtable(domain)) )
+        if ( !alloc || !(pg = iommu_alloc_pgtable(hd)) )
             goto out;
 
         hd->arch.vtd.pgd_maddr = page_to_maddr(pg);
@@ -338,7 +338,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, 
u64 addr, int alloc)
             if ( !alloc )
                 break;
 
-            pg = iommu_alloc_pgtable(domain);
+            pg = iommu_alloc_pgtable(hd);
             if ( !pg )
                 break;
 
@@ -2758,7 +2758,7 @@ static int __init cf_check 
intel_iommu_quarantine_init(struct domain *d)
         goto out;
     }
 
-    pg = iommu_alloc_pgtable(d);
+    pg = iommu_alloc_pgtable(hd);
 
     rc = -ENOMEM;
     if ( !pg )
@@ -2777,7 +2777,7 @@ static int __init cf_check 
intel_iommu_quarantine_init(struct domain *d)
          * page table pages, and the resulting allocations are always
          * zeroed.
          */
-        pg = iommu_alloc_pgtable(d);
+        pg = iommu_alloc_pgtable(hd);
 
         if ( !pg )
             goto out;
diff --git a/xen/drivers/passthrough/x86/iommu.c 
b/xen/drivers/passthrough/x86/iommu.c
index 65a622f26d..9c5fb6fa46 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -420,9 +420,8 @@ int iommu_free_pgtables(struct domain *d)
     return 0;
 }
 
-struct page_info *iommu_alloc_pgtable(struct domain *d)
+struct page_info *iommu_alloc_pgtable(struct domain_iommu *hd)
 {
-    struct domain_iommu *hd = dom_iommu(d);
     unsigned int memflags = 0;
     struct page_info *pg;
     void *p;
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.