[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] IOMMU: fold flush-all hook into "flush one"
commit 79faa321f2f31d7794604007a290fb6c8fc05035 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu Jun 9 10:54:30 2022 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Jun 9 10:54:30 2022 +0200 IOMMU: fold flush-all hook into "flush one" Having a separate flush-all hook has always been puzzling me some. We will want to be able to force a full flush via accumulated flush flags from the map/unmap functions. Introduce a respective new flag and fold all flush handling to use the single remaining hook. Note that because of the respective comments in SMMU and IPMMU-VMSA code, I've folded the two prior hook functions into one. For SMMU-v3, which lacks a comment towards incapable hardware, I've left both functions in place on the assumption that selective and full flushes will eventually want separating. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx> # IPMMU-VMSA, SMMU-V2 Reviewed-by: Rahul Singh <rahul.singh@xxxxxxx> # SMMUv3 Acked-by: Julien Grall <jgrall@xxxxxxxxxx> # Arm Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/drivers/passthrough/amd/iommu.h | 1 - xen/drivers/passthrough/amd/iommu_map.c | 18 +++++++----------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 1 - xen/drivers/passthrough/arm/ipmmu-vmsa.c | 19 +++++++------------ xen/drivers/passthrough/arm/smmu-v3.c | 1 - xen/drivers/passthrough/arm/smmu.c | 19 +++++++------------ xen/drivers/passthrough/iommu.c | 9 +++------ xen/drivers/passthrough/vtd/iommu.c | 18 ++++++++++-------- xen/include/xen/iommu.h | 3 ++- 9 files changed, 36 insertions(+), 53 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu.h b/xen/drivers/passthrough/amd/iommu.h index a05dd112bc..8bc3c35b1b 100644 --- a/xen/drivers/passthrough/amd/iommu.h +++ b/xen/drivers/passthrough/amd/iommu.h @@ -258,7 +258,6 @@ int cf_check amd_iommu_get_reserved_device_memory( int __must_check cf_check amd_iommu_flush_iotlb_pages( struct domain *d, dfn_t dfn, unsigned long page_count, unsigned int flush_flags); -int __must_check cf_check amd_iommu_flush_iotlb_all(struct domain *d); void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int dev_id, dfn_t dfn); diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index 9dbd44812d..28da5cc85e 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -506,15 +506,18 @@ int cf_check amd_iommu_flush_iotlb_pages( { unsigned long dfn_l = dfn_x(dfn); - ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN)); - ASSERT(flush_flags); + if ( !(flush_flags & IOMMU_FLUSHF_all) ) + { + ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN)); + ASSERT(flush_flags); + } /* Unless a PTE was modified, no flush is required */ if ( !(flush_flags & IOMMU_FLUSHF_modified) ) return 0; - /* If the range wraps then just flush everything */ - if ( dfn_l + page_count < dfn_l ) + /* If so requested or if the range wraps then just flush everything. */ + if ( (flush_flags & IOMMU_FLUSHF_all) || dfn_l + page_count < dfn_l ) { amd_iommu_flush_all_pages(d); return 0; @@ -540,13 +543,6 @@ int cf_check amd_iommu_flush_iotlb_pages( return 0; } -int cf_check amd_iommu_flush_iotlb_all(struct domain *d) -{ - amd_iommu_flush_all_pages(d); - - return 0; -} - int amd_iommu_reserve_domain_unity_map(struct domain *d, const struct ivrs_unity_map *map, unsigned int flag) diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 4bf87e8c03..fd91f1367f 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -759,7 +759,6 @@ static const struct iommu_ops __initconst_cf_clobber _iommu_ops = { .map_page = amd_iommu_map_page, .unmap_page = amd_iommu_unmap_page, .iotlb_flush = amd_iommu_flush_iotlb_pages, - .iotlb_flush_all = amd_iommu_flush_iotlb_all, .reassign_device = reassign_device, .get_device_group_id = amd_iommu_group_id, .enable_x2apic = iov_enable_xt, diff --git a/xen/drivers/passthrough/arm/ipmmu-vmsa.c b/xen/drivers/passthrough/arm/ipmmu-vmsa.c index 5a7b332bcc..091f09b217 100644 --- a/xen/drivers/passthrough/arm/ipmmu-vmsa.c +++ b/xen/drivers/passthrough/arm/ipmmu-vmsa.c @@ -1000,13 +1000,19 @@ out: } /* Xen IOMMU ops */ -static int __must_check ipmmu_iotlb_flush_all(struct domain *d) +static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn, + unsigned long page_count, + unsigned int flush_flags) { struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv; + ASSERT(flush_flags); + if ( !xen_domain || !xen_domain->root_domain ) return 0; + /* The hardware doesn't support selective TLB flush. */ + spin_lock(&xen_domain->lock); ipmmu_tlb_invalidate(xen_domain->root_domain); spin_unlock(&xen_domain->lock); @@ -1014,16 +1020,6 @@ static int __must_check ipmmu_iotlb_flush_all(struct domain *d) return 0; } -static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn, - unsigned long page_count, - unsigned int flush_flags) -{ - ASSERT(flush_flags); - - /* The hardware doesn't support selective TLB flush. */ - return ipmmu_iotlb_flush_all(d); -} - static struct ipmmu_vmsa_domain *ipmmu_get_cache_domain(struct domain *d, struct device *dev) { @@ -1360,7 +1356,6 @@ static const struct iommu_ops ipmmu_iommu_ops = .hwdom_init = arch_iommu_hwdom_init, .teardown = ipmmu_iommu_domain_teardown, .iotlb_flush = ipmmu_iotlb_flush, - .iotlb_flush_all = ipmmu_iotlb_flush_all, .assign_device = ipmmu_assign_device, .reassign_device = ipmmu_reassign_device, .map_page = arm_iommu_map_page, diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c index 2822ffe05f..1e857f915a 100644 --- a/xen/drivers/passthrough/arm/smmu-v3.c +++ b/xen/drivers/passthrough/arm/smmu-v3.c @@ -3416,7 +3416,6 @@ static const struct iommu_ops arm_smmu_iommu_ops = { .hwdom_init = arch_iommu_hwdom_init, .teardown = arm_smmu_iommu_xen_domain_teardown, .iotlb_flush = arm_smmu_iotlb_flush, - .iotlb_flush_all = arm_smmu_iotlb_flush_all, .assign_device = arm_smmu_assign_dev, .reassign_device = arm_smmu_reassign_dev, .map_page = arm_iommu_map_page, diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c index c21c4f3ac0..69511683b4 100644 --- a/xen/drivers/passthrough/arm/smmu.c +++ b/xen/drivers/passthrough/arm/smmu.c @@ -2648,11 +2648,17 @@ static int force_stage = 2; */ static u32 platform_features = ARM_SMMU_FEAT_COHERENT_WALK; -static int __must_check arm_smmu_iotlb_flush_all(struct domain *d) +static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn, + unsigned long page_count, + unsigned int flush_flags) { struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv; struct iommu_domain *cfg; + ASSERT(flush_flags); + + /* ARM SMMU v1 doesn't have flush by VMA and VMID */ + spin_lock(&smmu_domain->lock); list_for_each_entry(cfg, &smmu_domain->contexts, list) { /* @@ -2669,16 +2675,6 @@ static int __must_check arm_smmu_iotlb_flush_all(struct domain *d) return 0; } -static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn, - unsigned long page_count, - unsigned int flush_flags) -{ - ASSERT(flush_flags); - - /* ARM SMMU v1 doesn't have flush by VMA and VMID */ - return arm_smmu_iotlb_flush_all(d); -} - static struct iommu_domain *arm_smmu_get_domain(struct domain *d, struct device *dev) { @@ -2863,7 +2859,6 @@ static const struct iommu_ops arm_smmu_iommu_ops = { .add_device = arm_smmu_dt_add_device_generic, .teardown = arm_smmu_iommu_domain_teardown, .iotlb_flush = arm_smmu_iotlb_flush, - .iotlb_flush_all = arm_smmu_iotlb_flush_all, .assign_device = arm_smmu_assign_dev, .reassign_device = arm_smmu_reassign_dev, .map_page = arm_iommu_map_page, diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 9393d987c7..75df3aa8dd 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -478,15 +478,12 @@ int iommu_iotlb_flush_all(struct domain *d, unsigned int flush_flags) const struct domain_iommu *hd = dom_iommu(d); int rc; - if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush_all || + if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush || !flush_flags ) return 0; - /* - * The operation does a full flush so we don't need to pass the - * flush_flags in. - */ - rc = iommu_call(hd->platform_ops, iotlb_flush_all, d); + rc = iommu_call(hd->platform_ops, iotlb_flush, d, INVALID_DFN, 0, + flush_flags | IOMMU_FLUSHF_all); if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 6d9513ef41..2acacc3679 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -772,18 +772,21 @@ static int __must_check cf_check iommu_flush_iotlb_pages( struct domain *d, dfn_t dfn, unsigned long page_count, unsigned int flush_flags) { - ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN)); - ASSERT(flush_flags); + if ( flush_flags & IOMMU_FLUSHF_all ) + { + dfn = INVALID_DFN; + page_count = 0; + } + else + { + ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN)); + ASSERT(flush_flags); + } return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified, page_count); } -static int __must_check cf_check iommu_flush_iotlb_all(struct domain *d) -{ - return iommu_flush_iotlb(d, INVALID_DFN, 0, 0); -} - /* clear one page's page table */ static int dma_pte_clear_one(struct domain *domain, daddr_t addr, unsigned int order, @@ -3145,7 +3148,6 @@ static const struct iommu_ops __initconst_cf_clobber vtd_ops = { .resume = vtd_resume, .crash_shutdown = vtd_crash_shutdown, .iotlb_flush = iommu_flush_iotlb_pages, - .iotlb_flush_all = iommu_flush_iotlb_all, .get_reserved_device_memory = intel_iommu_get_reserved_device_memory, .dump_page_tables = vtd_dump_page_tables, }; diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index e0f82712ed..79529adf1f 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -147,9 +147,11 @@ enum { _IOMMU_FLUSHF_added, _IOMMU_FLUSHF_modified, + _IOMMU_FLUSHF_all, }; #define IOMMU_FLUSHF_added (1u << _IOMMU_FLUSHF_added) #define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified) +#define IOMMU_FLUSHF_all (1u << _IOMMU_FLUSHF_all) int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn, unsigned long page_count, unsigned int flags, @@ -281,7 +283,6 @@ struct iommu_ops { int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn, unsigned long page_count, unsigned int flush_flags); - int __must_check (*iotlb_flush_all)(struct domain *d); int (*get_reserved_device_memory)(iommu_grdm_t *, void *); void (*dump_page_tables)(struct domain *d); -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |