[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] amd-iommu: get rid of pointless IOMMU_PAGING_MODE_LEVEL_X definitions



commit 1ecb1ee4d8475475c3ccf72f6654644b242ce856
Author:     Paul Durrant <paul.durrant@xxxxxxxxxx>
AuthorDate: Mon Oct 29 13:47:24 2018 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Oct 29 13:47:24 2018 +0100

    amd-iommu: get rid of pointless IOMMU_PAGING_MODE_LEVEL_X definitions
    
    The levels are absolute numbers such that IOMMU_PAGING_MODE_LEVEL_X
    evaluates to X (for the valid range of 0 - 7) so simply use numbers in
    the code.
    
    No functional change.
    
    NOTE: This patch also adds emacs boilerplate to amd-iommu-defs.h
    
    Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Acked-by: Brian Woods <brian.woods@xxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_map.c      | 26 +++++++++++---------------
 xen/drivers/passthrough/amd/pci_amd_iommu.c  |  4 +---
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h | 21 +++++++++++----------
 3 files changed, 23 insertions(+), 28 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index d03a6d72b9..6a2c877d34 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -40,7 +40,7 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned 
long dfn)
     u64 *table, *pte;
 
     table = map_domain_page(_mfn(l1_mfn));
-    pte = table + pfn_to_pde_idx(dfn, IOMMU_PAGING_MODE_LEVEL_1);
+    pte = table + pfn_to_pde_idx(dfn, 1);
     *pte = 0;
     unmap_domain_page(table);
 }
@@ -84,7 +84,7 @@ static bool_t set_iommu_pde_present(u32 *pde, unsigned long 
next_mfn,
     /* FC bit should be enabled in PTE, this helps to solve potential
      * issues with ATS devices
      */
-    if ( next_level == IOMMU_PAGING_MODE_LEVEL_0 )
+    if ( next_level == 0 )
         set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
                              IOMMU_PTE_FC_MASK, IOMMU_PTE_FC_SHIFT, &entry);
     pde[1] = entry;
@@ -116,8 +116,7 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn, 
unsigned long dfn,
 
     pde = (u32*)(table + pfn_to_pde_idx(dfn, pde_level));
 
-    need_flush = set_iommu_pde_present(pde, next_mfn, 
-                                       IOMMU_PAGING_MODE_LEVEL_0, iw, ir);
+    need_flush = set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
     unmap_domain_page(table);
     return need_flush;
 }
@@ -419,8 +418,7 @@ static int iommu_merge_pages(struct domain *d, unsigned 
long pt_mfn,
     }
 
     /* setup super page mapping, next level = 0 */
-    set_iommu_pde_present((u32*)pde, first_mfn,
-                          IOMMU_PAGING_MODE_LEVEL_0,
+    set_iommu_pde_present((u32*)pde, first_mfn, 0,
                           !!(flags & IOMMUF_writable),
                           !!(flags & IOMMUF_readable));
 
@@ -447,18 +445,17 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned 
long dfn,
     table = hd->arch.root_table;
     level = hd->arch.paging_mode;
 
-    BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 || 
-            level > IOMMU_PAGING_MODE_LEVEL_6 );
+    BUG_ON( table == NULL || level < 1 || level > 6 );
 
     next_table_mfn = mfn_x(page_to_mfn(table));
 
-    if ( level == IOMMU_PAGING_MODE_LEVEL_1 )
+    if ( level == 1 )
     {
         pt_mfn[level] = next_table_mfn;
         return 0;
     }
 
-    while ( level > IOMMU_PAGING_MODE_LEVEL_1 )
+    while ( level > 1 )
     {
         unsigned int next_level = level - 1;
         pt_mfn[level] = next_table_mfn;
@@ -679,8 +676,7 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t 
mfn,
     }
 
     /* Install 4k mapping first */
-    need_flush = set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn),
-                                       IOMMU_PAGING_MODE_LEVEL_1,
+    need_flush = set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn), 1,
                                        !!(flags & IOMMUF_writable),
                                        !!(flags & IOMMUF_readable));
 
@@ -693,8 +689,8 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t 
mfn,
     if ( is_hvm_domain(d) )
         amd_iommu_flush_pages(d, dfn_x(dfn), 0);
 
-    for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
-          merge_level <= hd->arch.paging_mode; merge_level++ )
+    for ( merge_level = 2; merge_level <= hd->arch.paging_mode;
+          merge_level++ )
     {
         if ( pt_mfn[merge_level] == 0 )
             break;
@@ -816,7 +812,7 @@ void amd_iommu_share_p2m(struct domain *d)
         hd->arch.root_table = p2m_table;
 
         /* When sharing p2m with iommu, paging mode = 4 */
-        hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
+        hd->arch.paging_mode = 4;
         AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
                         mfn_x(pgd_mfn));
     }
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 257b83e2db..d3909daa8d 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -240,9 +240,7 @@ static int amd_iommu_domain_init(struct domain *d)
 
     /* For pv and dom0, stick with get_paging_mode(max_page)
      * For HVM dom0, use 2 level page table at first */
-    hd->arch.paging_mode = is_hvm_domain(d) ?
-                      IOMMU_PAGING_MODE_LEVEL_2 :
-                      get_paging_mode(max_page);
+    hd->arch.paging_mode = is_hvm_domain(d) ? 2 : get_paging_mode(max_page);
     return 0;
 }
 
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h 
b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
index 1f19cd3d27..a217245249 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
@@ -38,8 +38,7 @@
        PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT)
 
 #define amd_offset_level_address(offset, level) \
-       ((u64)(offset) << (12 + (PTE_PER_TABLE_SHIFT * \
-                                (level - IOMMU_PAGING_MODE_LEVEL_1))))
+        ((uint64_t)(offset) << (12 + (PTE_PER_TABLE_SHIFT * ((level) - 1))))
 
 #define PCI_MIN_CAP_OFFSET     0x40
 #define PCI_MAX_CAP_BLOCKS     48
@@ -449,14 +448,6 @@
 
 /* Paging modes */
 #define IOMMU_PAGING_MODE_DISABLED     0x0
-#define IOMMU_PAGING_MODE_LEVEL_0      0x0
-#define IOMMU_PAGING_MODE_LEVEL_1      0x1
-#define IOMMU_PAGING_MODE_LEVEL_2      0x2
-#define IOMMU_PAGING_MODE_LEVEL_3      0x3
-#define IOMMU_PAGING_MODE_LEVEL_4      0x4
-#define IOMMU_PAGING_MODE_LEVEL_5      0x5
-#define IOMMU_PAGING_MODE_LEVEL_6      0x6
-#define IOMMU_PAGING_MODE_LEVEL_7      0x7
 
 /* Flags */
 #define IOMMU_CONTROL_DISABLED 0
@@ -497,3 +488,13 @@
 #define IOMMU_REG_BASE_ADDR_HIGH_SHIFT              0
 
 #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.