[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [Patch v6 02/11] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping
From: Quan Xu <quan.xu@xxxxxxxxx> When IOMMU mapping is failed, we issue a best effort rollback, stopping IOMMU mapping, unmapping the previous IOMMU maps and then reporting the error up to the call trees. When rollback is not feasible (in early initialization phase or trade-off of complexity) for the hardware domain, we do things on a best effort basis, only throwing out an error message. IOMMU unmapping should perhaps continue despite an error, in an attempt to do best effort cleanup. Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx> CC: Keir Fraser <keir@xxxxxxx> CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx> v6: 1. Limit __must_check annotation to IOMMU functions for this patch set. 2. Replace 'ret' with a more specific name 'iommu_ret' in __get_page_type(). 3. Return the first error instead of the last one. --- xen/arch/x86/mm.c | 13 ++++++++----- xen/arch/x86/mm/p2m-ept.c | 33 ++++++++++++++++++++++++++------- xen/arch/x86/mm/p2m-pt.c | 24 +++++++++++++++++++++--- xen/arch/x86/mm/p2m.c | 16 ++++++++++++++-- xen/drivers/passthrough/iommu.c | 14 +++++++++++++- 5 files changed, 82 insertions(+), 18 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 8d10a3e..ae7c8ab 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2467,7 +2467,7 @@ static int __get_page_type(struct page_info *page, unsigned long type, int preemptible) { unsigned long nx, x, y = page->u.inuse.type_info; - int rc = 0; + int rc = 0, iommu_ret = 0; ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2))); @@ -2578,11 +2578,11 @@ static int __get_page_type(struct page_info *page, unsigned long type, if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) ) { if ( (x & PGT_type_mask) == PGT_writable_page ) - iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page))); + iommu_ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page))); else if ( type == PGT_writable_page ) - iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)), - page_to_mfn(page), - IOMMUF_readable|IOMMUF_writable); + iommu_ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)), + page_to_mfn(page), + IOMMUF_readable|IOMMUF_writable); } } @@ -2599,6 +2599,9 @@ static int __get_page_type(struct page_info *page, unsigned long type, if ( (x & PGT_partial) && !(nx & PGT_partial) ) put_page(page); + if ( !rc ) + rc = iommu_ret; + return rc; } diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index 1ed5b47..ebb4343 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -667,6 +667,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned long gfn_remainder = gfn; unsigned int i, target = order / EPT_TABLE_ORDER; int ret, rc = 0; + bool_t entry_written = 0; bool_t direct_mmio = (p2mt == p2m_mmio_direct); uint8_t ipat = 0; bool_t need_modify_vtd_table = 1; @@ -812,10 +813,15 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, rc = atomic_write_ept_entry(ept_entry, new_entry, target); if ( unlikely(rc) ) old_entry.epte = 0; - else if ( p2mt != p2m_invalid && - (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) ) - /* Track the highest gfn for which we have ever had a valid mapping */ - p2m->max_mapped_pfn = gfn + (1UL << order) - 1; + else + { + entry_written = 1; + + if ( p2mt != p2m_invalid && + (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) ) + /* Track the highest gfn for which we have ever had a valid mapping */ + p2m->max_mapped_pfn = gfn + (1UL << order) - 1; + } out: if ( needs_sync ) @@ -831,10 +837,23 @@ out: { if ( iommu_flags ) for ( i = 0; i < (1 << order); i++ ) - iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags); + { + rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags); + if ( unlikely(rc) ) + { + while ( i-- ) + iommu_unmap_page(p2m->domain, gfn + i); + + break; + } + } else for ( i = 0; i < (1 << order); i++ ) - iommu_unmap_page(d, gfn + i); + { + ret = iommu_unmap_page(d, gfn + i); + if ( !rc ) + rc = ret; + } } } @@ -847,7 +866,7 @@ out: if ( is_epte_present(&old_entry) ) ept_free_entry(p2m, &old_entry, target); - if ( rc == 0 && p2m_is_hostp2m(p2m) ) + if ( entry_written && p2m_is_hostp2m(p2m) ) p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma); return rc; diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index 3d80612..262f500 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -673,6 +673,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, if ( iommu_enabled && need_iommu(p2m->domain) && (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) ) { + int ret; + if ( iommu_use_hap_pt(p2m->domain) ) { if ( iommu_old_flags ) @@ -680,11 +682,27 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, } else if ( iommu_pte_flags ) for ( i = 0; i < (1UL << page_order); i++ ) - iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i, - iommu_pte_flags); + { + ret = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i, + iommu_pte_flags); + if ( unlikely(ret) ) + { + while ( i-- ) + iommu_unmap_page(p2m->domain, gfn + i); + + if ( !rc ) + rc = ret; + + break; + } + } else for ( i = 0; i < (1UL << page_order); i++ ) - iommu_unmap_page(p2m->domain, gfn + i); + { + ret = iommu_unmap_page(p2m->domain, gfn + i); + if ( !rc ) + rc = ret; + } } /* diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 9b19769..dd2c74c 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -641,10 +641,21 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, if ( !paging_mode_translate(p2m->domain) ) { + int rc = 0; + if ( need_iommu(p2m->domain) ) + { for ( i = 0; i < (1 << page_order); i++ ) - iommu_unmap_page(p2m->domain, mfn + i); - return 0; + { + int ret; + + ret = iommu_unmap_page(p2m->domain, mfn + i); + if ( !rc ) + rc = ret; + } + } + + return rc; } ASSERT(gfn_locked_by_me(p2m, gfn)); @@ -2535,6 +2546,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, if ( mfn_valid(mfn) ) p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K); rc = 0; + goto out; } diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 673e126..25501cd 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -171,20 +171,32 @@ void __hwdom_init iommu_hwdom_init(struct domain *d) { struct page_info *page; unsigned int i = 0; + int rc = 0; + page_list_for_each ( page, &d->page_list ) { unsigned long mfn = page_to_mfn(page); unsigned long gfn = mfn_to_gmfn(d, mfn); unsigned int mapping = IOMMUF_readable; + int ret; if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) || ((page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page) ) mapping |= IOMMUF_writable; - hd->platform_ops->map_page(d, gfn, mfn, mapping); + + ret = hd->platform_ops->map_page(d, gfn, mfn, mapping); + if ( !rc ) + rc = ret; + if ( !(i++ & 0xfffff) ) process_pending_softirqs(); } + + if ( rc ) + printk(XENLOG_WARNING + "d%d: IOMMU mapping failed: %d for hardware domain\n", + d->domain_id, rc); } return hd->platform_ops->hwdom_init(d); -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |