[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [Patch v6 08/11] IOMMU: propagate IOMMU Device-TLB flush error (leaf ones).
From: Quan Xu <quan.xu@xxxxxxxxx> Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx> CC: Julien Grall <julien.grall@xxxxxxx> CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> CC: Feng Wu <feng.wu@xxxxxxxxx> v6: Add __must_check annotation to intel_iommu_iotlb_flush_all(). --- xen/drivers/passthrough/arm/smmu.c | 13 ++++++++----- xen/drivers/passthrough/iommu.c | 8 ++------ xen/drivers/passthrough/vtd/iommu.c | 32 ++++++++++++++++++++------------ xen/include/xen/iommu.h | 5 +++-- 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c index ee5c89d..1d21568 100644 --- a/xen/drivers/passthrough/arm/smmu.c +++ b/xen/drivers/passthrough/arm/smmu.c @@ -2540,7 +2540,7 @@ static int force_stage = 2; */ static u32 platform_features = ARM_SMMU_FEAT_COHERENT_WALK; -static void arm_smmu_iotlb_flush_all(struct domain *d) +static int __must_check arm_smmu_iotlb_flush_all(struct domain *d) { struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv; struct iommu_domain *cfg; @@ -2557,13 +2557,16 @@ static void arm_smmu_iotlb_flush_all(struct domain *d) arm_smmu_tlb_inv_context(cfg->priv); } spin_unlock(&smmu_domain->lock); + + return 0; } -static void arm_smmu_iotlb_flush(struct domain *d, unsigned long gfn, - unsigned int page_count) +static int __must_check arm_smmu_iotlb_flush(struct domain *d, + unsigned long gfn, + unsigned int page_count) { - /* ARM SMMU v1 doesn't have flush by VMA and VMID */ - arm_smmu_iotlb_flush_all(d); + /* ARM SMMU v1 doesn't have flush by VMA and VMID */ + return arm_smmu_iotlb_flush_all(d); } static struct iommu_domain *arm_smmu_get_domain(struct domain *d, diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index e611e72..098b601 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -320,9 +320,7 @@ int iommu_iotlb_flush(struct domain *d, unsigned long gfn, if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush ) return 0; - hd->platform_ops->iotlb_flush(d, gfn, page_count); - - return 0; + return hd->platform_ops->iotlb_flush(d, gfn, page_count); } int iommu_iotlb_flush_all(struct domain *d) @@ -332,9 +330,7 @@ int iommu_iotlb_flush_all(struct domain *d) if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all ) return 0; - hd->platform_ops->iotlb_flush_all(d); - - return 0; + return hd->platform_ops->iotlb_flush_all(d); } int __init iommu_setup(void) diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index c2bf1e2..0788a59 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -559,8 +559,10 @@ static int __must_check iommu_flush_all(void) return 0; } -static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn, - int dma_old_pte_present, unsigned int page_count) +static int __must_check iommu_flush_iotlb(struct domain *d, + unsigned long gfn, + bool_t dma_old_pte_present, + unsigned int page_count) { struct domain_iommu *hd = dom_iommu(d); struct acpi_drhd_unit *drhd; @@ -598,16 +600,20 @@ static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn, iommu_flush_write_buffer(iommu); } } + + return 0; } -static void intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count) +static int __must_check iommu_flush_iotlb_pages(struct domain *d, + unsigned long gfn, + unsigned int page_count) { - __intel_iommu_iotlb_flush(d, gfn, 1, page_count); + return iommu_flush_iotlb(d, gfn, 1, page_count); } -static void intel_iommu_iotlb_flush_all(struct domain *d) +static int __must_check iommu_flush_iotlb_all(struct domain *d) { - __intel_iommu_iotlb_flush(d, INVALID_GFN, 0, 0); + return iommu_flush_iotlb(d, INVALID_GFN, 0, 0); } /* clear one page's page table */ @@ -616,6 +622,7 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr) struct domain_iommu *hd = dom_iommu(domain); struct dma_pte *page = NULL, *pte = NULL; u64 pg_maddr; + int rc = 0; spin_lock(&hd->arch.mapping_lock); /* get last level pte */ @@ -641,11 +648,11 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr) iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); if ( !this_cpu(iommu_dont_flush_iotlb) ) - __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1); + rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1); unmap_vtd_domain_page(page); - return 0; + return rc; } static void iommu_free_pagetable(u64 pt_maddr, int level) @@ -1699,6 +1706,7 @@ static int __must_check intel_iommu_map_page(struct domain *d, struct domain_iommu *hd = dom_iommu(d); struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 }; u64 pg_maddr; + int rc = 0; /* Do nothing if VT-d shares EPT page table */ if ( iommu_use_hap_pt(d) ) @@ -1741,9 +1749,9 @@ static int __must_check intel_iommu_map_page(struct domain *d, unmap_vtd_domain_page(page); if ( !this_cpu(iommu_dont_flush_iotlb) ) - __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1); + rc = iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1); - return 0; + return rc; } static int __must_check intel_iommu_unmap_page(struct domain *d, @@ -2574,8 +2582,8 @@ const struct iommu_ops intel_iommu_ops = { .resume = vtd_resume, .share_p2m = iommu_set_pgd, .crash_shutdown = vtd_crash_shutdown, - .iotlb_flush = intel_iommu_iotlb_flush, - .iotlb_flush_all = intel_iommu_iotlb_flush_all, + .iotlb_flush = iommu_flush_iotlb_pages, + .iotlb_flush_all = iommu_flush_iotlb_all, .get_reserved_device_memory = intel_iommu_get_reserved_device_memory, .dump_p2m_table = vtd_dump_p2m_table, }; diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index d197cd0..e917031 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -179,8 +179,9 @@ struct iommu_ops { void (*resume)(void); void (*share_p2m)(struct domain *d); void (*crash_shutdown)(void); - void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count); - void (*iotlb_flush_all)(struct domain *d); + int __must_check (*iotlb_flush)(struct domain *d, unsigned long gfn, + unsigned int page_count); + int __must_check (*iotlb_flush_all)(struct domain *d); int (*get_reserved_device_memory)(iommu_grdm_t *, void *); void (*dump_p2m_table)(struct domain *d); }; -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |