[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] amd-iommu: get rid of pointless IOMMU_PAGING_MODE_LEVEL_X definitions
The levels are absolute numbers such that IOMMU_PAGING_MODE_LEVEL_X evaluates to X (for the valid range of 0 - 7) so simply use numbers in the code. No functional change. NOTE: This patch also adds emacs boilerplate to amd-iommu-defs.h Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Cc: Brian Woods <brian.woods@xxxxxxx> --- xen/drivers/passthrough/amd/iommu_map.c | 26 +++++++++++--------------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 4 +--- xen/include/asm-x86/hvm/svm/amd-iommu-defs.h | 21 +++++++++++---------- 3 files changed, 23 insertions(+), 28 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index 9fa5cd3bd3..ecd55d0573 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -40,7 +40,7 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long gfn) u64 *table, *pte; table = map_domain_page(_mfn(l1_mfn)); - pte = table + pfn_to_pde_idx(gfn, IOMMU_PAGING_MODE_LEVEL_1); + pte = table + pfn_to_pde_idx(gfn, 1); *pte = 0; unmap_domain_page(table); } @@ -84,7 +84,7 @@ static bool_t set_iommu_pde_present(u32 *pde, unsigned long next_mfn, /* FC bit should be enabled in PTE, this helps to solve potential * issues with ATS devices */ - if ( next_level == IOMMU_PAGING_MODE_LEVEL_0 ) + if ( next_level == 0 ) set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, IOMMU_PTE_FC_MASK, IOMMU_PTE_FC_SHIFT, &entry); pde[1] = entry; @@ -116,8 +116,7 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn, unsigned long gfn, pde = (u32*)(table + pfn_to_pde_idx(gfn, pde_level)); - need_flush = set_iommu_pde_present(pde, next_mfn, - IOMMU_PAGING_MODE_LEVEL_0, iw, ir); + need_flush = set_iommu_pde_present(pde, next_mfn, 0, iw, ir); unmap_domain_page(table); return need_flush; } @@ -419,8 +418,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn, } /* setup super page mapping, next level = 0 */ - set_iommu_pde_present((u32*)pde, first_mfn, - IOMMU_PAGING_MODE_LEVEL_0, + set_iommu_pde_present((u32*)pde, first_mfn, 0, !!(flags & IOMMUF_writable), !!(flags & IOMMUF_readable)); @@ -447,18 +445,17 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn, table = hd->arch.root_table; level = hd->arch.paging_mode; - BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 || - level > IOMMU_PAGING_MODE_LEVEL_6 ); + BUG_ON( table == NULL || level < 1 || level > 6 ); next_table_mfn = mfn_x(page_to_mfn(table)); - if ( level == IOMMU_PAGING_MODE_LEVEL_1 ) + if ( level == 1 ) { pt_mfn[level] = next_table_mfn; return 0; } - while ( level > IOMMU_PAGING_MODE_LEVEL_1 ) + while ( level > 1 ) { unsigned int next_level = level - 1; pt_mfn[level] = next_table_mfn; @@ -676,8 +673,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, } /* Install 4k mapping first */ - need_flush = set_iommu_pte_present(pt_mfn[1], gfn, mfn, - IOMMU_PAGING_MODE_LEVEL_1, + need_flush = set_iommu_pte_present(pt_mfn[1], gfn, mfn, 1, !!(flags & IOMMUF_writable), !!(flags & IOMMUF_readable)); @@ -690,8 +686,8 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, if ( is_hvm_domain(d) ) amd_iommu_flush_pages(d, gfn, 0); - for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2; - merge_level <= hd->arch.paging_mode; merge_level++ ) + for ( merge_level = 2; merge_level <= hd->arch.paging_mode; + merge_level++ ) { if ( pt_mfn[merge_level] == 0 ) break; @@ -808,7 +804,7 @@ void amd_iommu_share_p2m(struct domain *d) hd->arch.root_table = p2m_table; /* When sharing p2m with iommu, paging mode = 4 */ - hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4; + hd->arch.paging_mode = 4; AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n", mfn_x(pgd_mfn)); } diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 80d9ae6561..030debb775 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -240,9 +240,7 @@ static int amd_iommu_domain_init(struct domain *d) /* For pv and dom0, stick with get_paging_mode(max_page) * For HVM dom0, use 2 level page table at first */ - hd->arch.paging_mode = is_hvm_domain(d) ? - IOMMU_PAGING_MODE_LEVEL_2 : - get_paging_mode(max_page); + hd->arch.paging_mode = is_hvm_domain(d) ? 2 : get_paging_mode(max_page); return 0; } diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h index c479f0bb02..6b35dbfd32 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h @@ -38,8 +38,7 @@ PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT) #define amd_offset_level_address(offset, level) \ - ((u64)(offset) << (12 + (PTE_PER_TABLE_SHIFT * \ - (level - IOMMU_PAGING_MODE_LEVEL_1)))) + ((uint64_t)(offset) << (12 + (PTE_PER_TABLE_SHIFT * ((level) - 1)))) #define PCI_MIN_CAP_OFFSET 0x40 #define PCI_MAX_CAP_BLOCKS 48 @@ -450,14 +449,6 @@ /* Paging modes */ #define IOMMU_PAGING_MODE_DISABLED 0x0 -#define IOMMU_PAGING_MODE_LEVEL_0 0x0 -#define IOMMU_PAGING_MODE_LEVEL_1 0x1 -#define IOMMU_PAGING_MODE_LEVEL_2 0x2 -#define IOMMU_PAGING_MODE_LEVEL_3 0x3 -#define IOMMU_PAGING_MODE_LEVEL_4 0x4 -#define IOMMU_PAGING_MODE_LEVEL_5 0x5 -#define IOMMU_PAGING_MODE_LEVEL_6 0x6 -#define IOMMU_PAGING_MODE_LEVEL_7 0x7 /* Flags */ #define IOMMU_CONTROL_DISABLED 0 @@ -498,3 +489,13 @@ #define IOMMU_REG_BASE_ADDR_HIGH_SHIFT 0 #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |