[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 05/11] IOMMU/MMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} (top level ones)
From: Quan Xu <quan.xu@xxxxxxxxx> Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx> CC: Julien Grall <julien.grall@xxxxxxx> CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/arm/p2m.c | 4 +++- xen/common/memory.c | 12 ++++++++++-- xen/drivers/passthrough/iommu.c | 13 +++++++++---- xen/drivers/passthrough/x86/iommu.c | 5 +++-- xen/include/xen/iommu.h | 5 +++-- 5 files changed, 28 insertions(+), 11 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 6a19c57..65d8f1a 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -1178,7 +1178,9 @@ out: if ( flush ) { flush_tlb_domain(d); - iommu_iotlb_flush(d, sgfn, egfn - sgfn); + ret = iommu_iotlb_flush(d, sgfn, egfn - sgfn); + if ( !rc ) + rc = ret; } while ( (pg = page_list_remove_head(&free_pages)) ) diff --git a/xen/common/memory.c b/xen/common/memory.c index ccc6436..46b1d9f 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -683,9 +683,17 @@ static int xenmem_add_to_physmap(struct domain *d, #ifdef CONFIG_HAS_PASSTHROUGH if ( need_iommu(d) ) { + int ret; + this_cpu(iommu_dont_flush_iotlb) = 0; - iommu_iotlb_flush(d, xatp->idx - done, done); - iommu_iotlb_flush(d, xatp->gpfn - done, done); + + ret = iommu_iotlb_flush(d, xatp->idx - done, done); + if ( unlikely(ret) && rc >= 0 ) + rc = ret; + + ret = iommu_iotlb_flush(d, xatp->gpfn - done, done); + if ( unlikely(ret) && rc >= 0 ) + rc = ret; } #endif diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index ec85352..3a73fab 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -311,24 +311,29 @@ static void iommu_free_pagetables(unsigned long unused) cpumask_cycle(smp_processor_id(), &cpu_online_map)); } -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count) +int iommu_iotlb_flush(struct domain *d, unsigned long gfn, + unsigned int page_count) { const struct domain_iommu *hd = dom_iommu(d); if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush ) - return; + return 0; hd->platform_ops->iotlb_flush(d, gfn, page_count); + + return 0; } -void iommu_iotlb_flush_all(struct domain *d) +int iommu_iotlb_flush_all(struct domain *d) { const struct domain_iommu *hd = dom_iommu(d); if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all ) - return; + return 0; hd->platform_ops->iotlb_flush_all(d); + + return 0; } int __init iommu_setup(void) diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c index b64b98f..a18a608 100644 --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -104,8 +104,9 @@ int arch_iommu_populate_page_table(struct domain *d) this_cpu(iommu_dont_flush_iotlb) = 0; if ( !rc ) - iommu_iotlb_flush_all(d); - else if ( rc != -ERESTART ) + rc = iommu_iotlb_flush_all(d); + + if ( rc && rc != -ERESTART ) iommu_teardown(d); return rc; diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index 2b86710..57c9fbc 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -200,8 +200,9 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d, int iommu_do_domctl(struct xen_domctl *, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count); -void iommu_iotlb_flush_all(struct domain *d); +int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn, + unsigned int page_count); +int __must_check iommu_iotlb_flush_all(struct domain *d); /* * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |