[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 04/13] iommu: push use of type-safe BFN and MFN into iommu_ops
This patch modifies the methods in struct iommu_ops to use type-safe BFN and MFN. This follows on from the prior patch that modified the functions exported in xen/iommu.h. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Kevin Tian <kevin.tian@xxxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <george.dunlap@xxxxxxxxxx> v3: - Remove some use of intermediate 'frame' variables. v2: - Addressed comments from Jan. - Extend use of intermediate 'frame' variable to avoid directly encapsulating gfn values as bfns or vice versa. --- xen/drivers/passthrough/amd/iommu_map.c | 46 ++++++++++++++++----------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 2 +- xen/drivers/passthrough/arm/smmu.c | 16 +++++----- xen/drivers/passthrough/iommu.c | 9 +++--- xen/drivers/passthrough/vtd/iommu.c | 26 +++++++-------- xen/drivers/passthrough/vtd/x86/vtd.c | 7 ++-- xen/drivers/passthrough/x86/iommu.c | 2 +- xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 8 ++--- xen/include/xen/iommu.h | 13 +++++--- 9 files changed, 72 insertions(+), 57 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index 4deab9cd2f..5a9a0af320 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -631,7 +631,7 @@ static int update_paging_mode(struct domain *d, unsigned long bfn) return 0; } -int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn, +int amd_iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn, unsigned int flags) { bool_t need_flush = 0; @@ -651,7 +651,8 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn, if ( rc ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Root table alloc failed, bfn = %lx\n", bfn); + AMD_IOMMU_DEBUG("Root table alloc failed, bfn = %"PRI_bfn"\n", + bfn_x(bfn)); domain_crash(d); return rc; } @@ -660,25 +661,27 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn, * we might need a deeper page table for wider bfn now */ if ( is_hvm_domain(d) ) { - if ( update_paging_mode(d, bfn) ) + if ( update_paging_mode(d, bfn_x(bfn)) ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Update page mode failed bfn = %lx\n", bfn); + AMD_IOMMU_DEBUG("Update page mode failed bfn = %"PRI_bfn"\n", + bfn_x(bfn)); domain_crash(d); return -EFAULT; } } - if ( iommu_pde_from_bfn(d, bfn, pt_mfn) || (pt_mfn[1] == 0) ) + if ( iommu_pde_from_bfn(d, bfn_x(bfn), pt_mfn) || (pt_mfn[1] == 0) ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %lx\n", bfn); + AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %"PRI_bfn"\n", + bfn_x(bfn)); domain_crash(d); return -EFAULT; } /* Install 4k mapping first */ - need_flush = set_iommu_pte_present(pt_mfn[1], bfn, mfn, + need_flush = set_iommu_pte_present(pt_mfn[1], bfn_x(bfn), mfn_x(mfn), IOMMU_PAGING_MODE_LEVEL_1, !!(flags & IOMMUF_writable), !!(flags & IOMMUF_readable)); @@ -690,7 +693,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn, /* 4K mapping for PV guests never changes, * no need to flush if we trust non-present bits */ if ( is_hvm_domain(d) ) - amd_iommu_flush_pages(d, bfn, 0); + amd_iommu_flush_pages(d, bfn_x(bfn), 0); for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2; merge_level <= hd->arch.paging_mode; merge_level++ ) @@ -698,15 +701,16 @@ int amd_iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn, if ( pt_mfn[merge_level] == 0 ) break; if ( !iommu_update_pde_count(d, pt_mfn[merge_level], - bfn, mfn, merge_level) ) + bfn_x(bfn), mfn_x(mfn), merge_level) ) break; - if ( iommu_merge_pages(d, pt_mfn[merge_level], bfn, + if ( iommu_merge_pages(d, pt_mfn[merge_level], bfn_x(bfn), flags, merge_level) ) { spin_unlock(&hd->arch.mapping_lock); AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, " - "bfn = %lx mfn = %lx\n", merge_level, bfn, mfn); + "bfn = %"PRI_bfn" mfn = %"PRI_mfn"\n", + merge_level, bfn_x(bfn), mfn_x(mfn)); domain_crash(d); return -EFAULT; } @@ -720,7 +724,7 @@ out: return 0; } -int amd_iommu_unmap_page(struct domain *d, unsigned long bfn) +int amd_iommu_unmap_page(struct domain *d, bfn_t bfn) { unsigned long pt_mfn[7]; struct domain_iommu *hd = dom_iommu(d); @@ -742,31 +746,33 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long bfn) * we might need a deeper page table for lager bfn now */ if ( is_hvm_domain(d) ) { - int rc = update_paging_mode(d, bfn); + int rc = update_paging_mode(d, bfn_x(bfn)); if ( rc ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Update page mode failed bfn = %lx\n", bfn); + AMD_IOMMU_DEBUG("Update page mode failed bfn = %"PRI_bfn"\n", + bfn_x(bfn)); if ( rc != -EADDRNOTAVAIL ) domain_crash(d); return rc; } } - if ( iommu_pde_from_bfn(d, bfn, pt_mfn) || (pt_mfn[1] == 0) ) + if ( iommu_pde_from_bfn(d, bfn_x(bfn), pt_mfn) || (pt_mfn[1] == 0) ) { spin_unlock(&hd->arch.mapping_lock); - AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %lx\n", bfn); + AMD_IOMMU_DEBUG("Invalid IO pagetable entry bfn = %"PRI_bfn"\n", + bfn_x(bfn)); domain_crash(d); return -EFAULT; } /* mark PTE as 'page not present' */ - clear_iommu_pte_present(pt_mfn[1], bfn); + clear_iommu_pte_present(pt_mfn[1], bfn_x(bfn)); spin_unlock(&hd->arch.mapping_lock); - amd_iommu_flush_pages(d, bfn, 0); + amd_iommu_flush_pages(d, bfn_x(bfn), 0); return 0; } @@ -787,7 +793,9 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain, gfn = phys_addr >> PAGE_SHIFT; for ( i = 0; i < npages; i++ ) { - rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags); + unsigned long frame = gfn + i; + + rt = amd_iommu_map_page(domain, _bfn(frame), _mfn(frame), flags); if ( rt != 0 ) return rt; } diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index d608631e6e..eea22c3d0d 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -271,7 +271,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d) */ if ( mfn_valid(_mfn(pfn)) ) { - int ret = amd_iommu_map_page(d, pfn, pfn, + int ret = amd_iommu_map_page(d, _bfn(pfn), _mfn(pfn), IOMMUF_readable|IOMMUF_writable); if ( !rc ) diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c index 1e4d561b47..221b62a59c 100644 --- a/xen/drivers/passthrough/arm/smmu.c +++ b/xen/drivers/passthrough/arm/smmu.c @@ -2550,8 +2550,7 @@ static int __must_check arm_smmu_iotlb_flush_all(struct domain *d) return 0; } -static int __must_check arm_smmu_iotlb_flush(struct domain *d, - unsigned long bfn, +static int __must_check arm_smmu_iotlb_flush(struct domain *d, bfn_t bfn, unsigned int page_count) { /* ARM SMMU v1 doesn't have flush by VMA and VMID */ @@ -2737,8 +2736,8 @@ static void arm_smmu_iommu_domain_teardown(struct domain *d) xfree(xen_domain); } -static int __must_check arm_smmu_map_page(struct domain *d, unsigned long bfn, - unsigned long mfn, unsigned int flags) +static int __must_check arm_smmu_map_page(struct domain *d, bfn_t bfn, + mfn_t mfn, unsigned int flags) { p2m_type_t t; @@ -2751,7 +2750,7 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long bfn, * function should only be used by gnttab code with gfn == mfn == bfn. */ BUG_ON(!is_domain_direct_mapped(d)); - BUG_ON(mfn != bfn); + BUG_ON(mfn_x(mfn) != bfn_x(bfn)); /* We only support readable and writable flags */ if (!(flags & (IOMMUF_readable | IOMMUF_writable))) @@ -2763,10 +2762,11 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long bfn, * The function guest_physmap_add_entry replaces the current mapping * if there is already one... */ - return guest_physmap_add_entry(d, _gfn(bfn), _mfn(bfn), 0, t); + return guest_physmap_add_entry(d, _gfn(bfn_x(bfn)), _mfn(bfn_x(bfn)), + 0, t); } -static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long bfn) +static int __must_check arm_smmu_unmap_page(struct domain *d, bfn_t bfn) { /* * This function should only be used by gnttab code when the domain @@ -2775,7 +2775,7 @@ static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long bfn) if ( !is_domain_direct_mapped(d) ) return -EINVAL; - return guest_physmap_remove_page(d, _gfn(bfn), _mfn(bfn), 0); + return guest_physmap_remove_page(d, _gfn(bfn_x(bfn)), _mfn(bfn_x(bfn)), 0); } static const struct iommu_ops arm_smmu_iommu_ops = { diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 44797f9e92..cf70a5c43c 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -194,7 +194,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d) == PGT_writable_page) ) mapping |= IOMMUF_writable; - ret = hd->platform_ops->map_page(d, bfn, mfn, mapping); + ret = hd->platform_ops->map_page(d, _bfn(bfn), _mfn(mfn), + mapping); if ( !rc ) rc = ret; @@ -264,7 +265,7 @@ int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn, if ( !iommu_enabled || !hd->platform_ops ) return 0; - rc = hd->platform_ops->map_page(d, bfn_x(bfn), mfn_x(mfn), flags); + rc = hd->platform_ops->map_page(d, bfn, mfn, flags); if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) @@ -287,7 +288,7 @@ int iommu_unmap_page(struct domain *d, bfn_t bfn) if ( !iommu_enabled || !hd->platform_ops ) return 0; - rc = hd->platform_ops->unmap_page(d, bfn_x(bfn)); + rc = hd->platform_ops->unmap_page(d, bfn); if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) @@ -327,7 +328,7 @@ int iommu_iotlb_flush(struct domain *d, bfn_t bfn, unsigned int page_count) if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush ) return 0; - rc = hd->platform_ops->iotlb_flush(d, bfn_x(bfn), page_count); + rc = hd->platform_ops->iotlb_flush(d, bfn, page_count); if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 48f62e0e8d..c9f50f04ad 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -584,8 +584,7 @@ static int __must_check iommu_flush_all(void) return rc; } -static int __must_check iommu_flush_iotlb(struct domain *d, - unsigned long bfn, +static int __must_check iommu_flush_iotlb(struct domain *d, bfn_t bfn, bool_t dma_old_pte_present, unsigned int page_count) { @@ -612,12 +611,12 @@ static int __must_check iommu_flush_iotlb(struct domain *d, if ( iommu_domid == -1 ) continue; - if ( page_count != 1 || bfn == bfn_x(INVALID_BFN) ) + if ( page_count != 1 || bfn_eq(bfn, INVALID_BFN) ) rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb); else rc = iommu_flush_iotlb_psi(iommu, iommu_domid, - __bfn_to_baddr(bfn), + bfn_to_baddr(bfn), PAGE_ORDER_4K, !dma_old_pte_present, flush_dev_iotlb); @@ -633,7 +632,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d, } static int __must_check iommu_flush_iotlb_pages(struct domain *d, - unsigned long bfn, + bfn_t bfn, unsigned int page_count) { return iommu_flush_iotlb(d, bfn, 1, page_count); @@ -641,7 +640,7 @@ static int __must_check iommu_flush_iotlb_pages(struct domain *d, static int __must_check iommu_flush_iotlb_all(struct domain *d) { - return iommu_flush_iotlb(d, bfn_x(INVALID_BFN), 0, 0); + return iommu_flush_iotlb(d, INVALID_BFN, 0, 0); } /* clear one page's page table */ @@ -676,7 +675,7 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr) iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); if ( !this_cpu(iommu_dont_flush_iotlb) ) - rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1); + rc = iommu_flush_iotlb_pages(domain, baddr_to_bfn(addr), 1); unmap_vtd_domain_page(page); @@ -1767,8 +1766,7 @@ static void iommu_domain_teardown(struct domain *d) } static int __must_check intel_iommu_map_page(struct domain *d, - unsigned long bfn, - unsigned long mfn, + bfn_t bfn, mfn_t mfn, unsigned int flags) { struct domain_iommu *hd = dom_iommu(d); @@ -1786,16 +1784,16 @@ static int __must_check intel_iommu_map_page(struct domain *d, spin_lock(&hd->arch.mapping_lock); - pg_maddr = addr_to_dma_page_maddr(d, __bfn_to_baddr(bfn), 1); + pg_maddr = addr_to_dma_page_maddr(d, bfn_to_baddr(bfn), 1); if ( pg_maddr == 0 ) { spin_unlock(&hd->arch.mapping_lock); return -ENOMEM; } page = (struct dma_pte *)map_vtd_domain_page(pg_maddr); - pte = page + (bfn & LEVEL_MASK); + pte = page + (bfn_x(bfn) & LEVEL_MASK); old = *pte; - dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K); + dma_set_pte_addr(new, mfn_to_maddr(mfn)); dma_set_pte_prot(new, ((flags & IOMMUF_readable) ? DMA_PTE_READ : 0) | ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0)); @@ -1823,13 +1821,13 @@ static int __must_check intel_iommu_map_page(struct domain *d, } static int __must_check intel_iommu_unmap_page(struct domain *d, - unsigned long bfn) + bfn_t bfn) { /* Do nothing if hardware domain and iommu supports pass thru. */ if ( iommu_passthrough && is_hardware_domain(d) ) return 0; - return dma_pte_clear_one(d, __bfn_to_baddr(bfn)); + return dma_pte_clear_one(d, bfn_to_baddr(bfn)); } int iommu_pte_flush(struct domain *d, uint64_t bfn, uint64_t *pte, diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c index dc37dce4b6..6fed4a92cb 100644 --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -121,6 +121,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d) { unsigned long pfn = pdx_to_pfn(i); bool map; + bfn_t bfn; + mfn_t mfn; int rc = 0; /* @@ -153,10 +155,11 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d) continue; tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K); + bfn = _bfn(pfn * tmp); + mfn = _mfn(pfn * tmp); for ( j = 0; j < tmp; j++ ) { - int ret = iommu_map_page(d, _bfn(pfn * tmp + j), - _mfn(pfn * tmp + j), + int ret = iommu_map_page(d, bfn_add(bfn, j), mfn_add(mfn, j), IOMMUF_readable|IOMMUF_writable); if ( !rc ) diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c index 68182afd91..379882c690 100644 --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -65,7 +65,7 @@ int arch_iommu_populate_page_table(struct domain *d) { ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH)); BUG_ON(SHARED_M2P(gfn)); - rc = hd->platform_ops->map_page(d, gfn, mfn, + rc = hd->platform_ops->map_page(d, _bfn(gfn), _mfn(mfn), IOMMUF_readable | IOMMUF_writable); } diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h index 99bc21c7b3..dce9ed6b83 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -52,9 +52,9 @@ int amd_iommu_init(void); int amd_iommu_update_ivrs_mapping_acpi(void); /* mapping functions */ -int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn, - unsigned long mfn, unsigned int flags); -int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn); +int __must_check amd_iommu_map_page(struct domain *d, bfn_t bfn, + mfn_t mfn, unsigned int flags); +int __must_check amd_iommu_unmap_page(struct domain *d, bfn_t bfn); u64 amd_iommu_get_next_table_from_pte(u32 *entry); int __must_check amd_iommu_alloc_root(struct domain_iommu *hd); int amd_iommu_reserve_domain_unity_map(struct domain *domain, @@ -77,7 +77,7 @@ void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3, /* send cmd to iommu */ void amd_iommu_flush_all_pages(struct domain *d); -void amd_iommu_flush_pages(struct domain *d, unsigned long gfn, +void amd_iommu_flush_pages(struct domain *d, unsigned long bfn, unsigned int order); void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, uint64_t gaddr, unsigned int order); diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index a3c36c1148..624784fec8 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -48,6 +48,11 @@ static inline bfn_t bfn_add(bfn_t bfn, unsigned long i) return _bfn(bfn_x(bfn) + i); } +static inline bool_t bfn_eq(bfn_t x, bfn_t y) +{ + return bfn_x(x) == bfn_x(y); +} + #define IOMMU_PAGE_SHIFT 12 #define IOMMU_PAGE_SIZE (_AC(1,L) << IOMMU_PAGE_SHIFT) #define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1)) @@ -184,9 +189,9 @@ struct iommu_ops { #endif /* HAS_PCI */ void (*teardown)(struct domain *d); - int __must_check (*map_page)(struct domain *d, unsigned long bfn, - unsigned long mfn, unsigned int flags); - int __must_check (*unmap_page)(struct domain *d, unsigned long bfn); + int __must_check (*map_page)(struct domain *d, bfn_t bfn, mfn_t mfn, + unsigned int flags); + int __must_check (*unmap_page)(struct domain *d, bfn_t bfn); void (*free_page_table)(struct page_info *); #ifdef CONFIG_X86 void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value); @@ -197,7 +202,7 @@ struct iommu_ops { void (*resume)(void); void (*share_p2m)(struct domain *d); void (*crash_shutdown)(void); - int __must_check (*iotlb_flush)(struct domain *d, unsigned long bfn, + int __must_check (*iotlb_flush)(struct domain *d, bfn_t bfn, unsigned int page_count); int __must_check (*iotlb_flush_all)(struct domain *d); int (*get_reserved_device_memory)(iommu_grdm_t *, void *); -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |