[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [Patch v6 09/11] vt-d: fix the IOMMU flush issue
From: Quan Xu <quan.xu@xxxxxxxxx> The propagation value from IOMMU flush interfaces may be positive, which indicates callers need to flush cache, not one of faliures. when the propagation value is positive, this patch fixes this flush issue as follows: - call iommu_flush_write_buffer() to flush cache. - return zero. Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> CC: Feng Wu <feng.wu@xxxxxxxxx> CC: Keir Fraser <keir@xxxxxxx> CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> v6: 1. Drop blank lines. 2. Change '(((u16)bus) << 8) | devfn' to 'PCI_BDF2(bus, devfn)'. 3. Introduce rc for iommu_flush_context_device() / ret for iommu_flush_iotlb_dsi(), adding ASSERT() behind. 4. Enhance comments. 5. Fix iommu_flush_context_*(). --- xen/drivers/passthrough/vtd/iommu.c | 170 +++++++++++++++++++++++++++--------- 1 file changed, 128 insertions(+), 42 deletions(-) diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 0788a59..720b867 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -388,17 +388,18 @@ static int flush_context_reg( return 0; } -static int iommu_flush_context_global( - struct iommu *iommu, int flush_non_present_entry) +static int __must_check iommu_flush_context_global(struct iommu *iommu, + int flush_non_present_entry) { struct iommu_flush *flush = iommu_get_flush(iommu); return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, flush_non_present_entry); } -static int iommu_flush_context_device( - struct iommu *iommu, u16 did, u16 source_id, - u8 function_mask, int flush_non_present_entry) +static int __must_check iommu_flush_context_device(struct iommu *iommu, + u16 did, u16 source_id, + u8 function_mask, + int flush_non_present_entry) { struct iommu_flush *flush = iommu_get_flush(iommu); return flush->context(iommu, did, source_id, function_mask, @@ -473,8 +474,9 @@ static int flush_iotlb_reg(void *_iommu, u16 did, return 0; } -static int iommu_flush_iotlb_global(struct iommu *iommu, - int flush_non_present_entry, int flush_dev_iotlb) +static int __must_check iommu_flush_iotlb_global(struct iommu *iommu, + int flush_non_present_entry, + int flush_dev_iotlb) { struct iommu_flush *flush = iommu_get_flush(iommu); int status; @@ -491,8 +493,9 @@ static int iommu_flush_iotlb_global(struct iommu *iommu, return status; } -static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did, - int flush_non_present_entry, int flush_dev_iotlb) +static int __must_check iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did, + int flush_non_present_entry, + int flush_dev_iotlb) { struct iommu_flush *flush = iommu_get_flush(iommu); int status; @@ -509,9 +512,10 @@ static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did, return status; } -static int iommu_flush_iotlb_psi( - struct iommu *iommu, u16 did, u64 addr, unsigned int order, - int flush_non_present_entry, int flush_dev_iotlb) +static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did, + u64 addr, unsigned int order, + int flush_non_present_entry, + int flush_dev_iotlb) { struct iommu_flush *flush = iommu_get_flush(iommu); int status; @@ -546,17 +550,43 @@ static int __must_check iommu_flush_all(void) struct acpi_drhd_unit *drhd; struct iommu *iommu; int flush_dev_iotlb; + int rc = 0; flush_all_cache(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; - iommu_flush_context_global(iommu, 0); - flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0; - iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb); + /* + * The current logic for rc returns: + * - positive invoke iommu_flush_write_buffer to flush cache. + * - zero on success. + * - negative on failure. Continue to flush IOMMU IOTLB on a + * best effort basis. + * + * Moreover, IOMMU flush handlers flush_context_qi and flush_iotlb_qi + * (or flush_context_reg and flush_iotlb_reg, deep functions in the + * call trees of iommu_flush_context_global and iommu_flush_iotlb_global) + * are with the same logic to bubble up positive return value. + */ + rc = iommu_flush_context_global(iommu, 0); + if ( rc <= 0 ) + { + int ret; + + flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0; + ret = iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb); + ASSERT(ret <= 0); + if ( !rc ) + rc = ret; + } + else + { + iommu_flush_write_buffer(iommu); + rc = 0; + } } - return 0; + return rc; } static int __must_check iommu_flush_iotlb(struct domain *d, @@ -569,6 +599,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d, struct iommu *iommu; int flush_dev_iotlb; int iommu_domid; + int rc = 0; /* * No need pcideves_lock here because we have flush @@ -587,21 +618,23 @@ static int __must_check iommu_flush_iotlb(struct domain *d, continue; if ( page_count != 1 || gfn == INVALID_GFN ) - { - if ( iommu_flush_iotlb_dsi(iommu, iommu_domid, - 0, flush_dev_iotlb) ) - iommu_flush_write_buffer(iommu); - } + rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, + 0, flush_dev_iotlb); else + rc = iommu_flush_iotlb_psi(iommu, iommu_domid, + (paddr_t)gfn << PAGE_SHIFT_4K, + PAGE_ORDER_4K, + !dma_old_pte_present, + flush_dev_iotlb); + + if ( rc > 0 ) { - if ( iommu_flush_iotlb_psi(iommu, iommu_domid, - (paddr_t)gfn << PAGE_SHIFT_4K, PAGE_ORDER_4K, - !dma_old_pte_present, flush_dev_iotlb) ) - iommu_flush_write_buffer(iommu); + iommu_flush_write_buffer(iommu); + rc = 0; } } - return 0; + return rc; } static int __must_check iommu_flush_iotlb_pages(struct domain *d, @@ -1291,6 +1324,7 @@ int domain_context_mapping_one( u64 maddr, pgd_maddr; u16 seg = iommu->intel->drhd->segment; int agaw; + int rc; ASSERT(pcidevs_locked()); spin_lock(&iommu->lock); @@ -1404,13 +1438,35 @@ int domain_context_mapping_one( spin_unlock(&iommu->lock); /* Context entry was previously non-present (with domid 0). */ - if ( iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, 1) ) - iommu_flush_write_buffer(iommu); - else + rc = iommu_flush_context_device(iommu, 0, PCI_BDF2(bus, devfn), + DMA_CCMD_MASK_NOBIT, 1); + + /* + * The current logic for rc returns: + * - positive invoke iommu_flush_write_buffer to flush cache. + * - zero on success. + * - negative on failure. Continue to flush IOMMU IOTLB on a + * best effort basis. + * + * Moreover, IOMMU flush handlers flush_context_qi or flush_iotlb_qi + * (or flush_context_reg and flush_iotlb_reg, deep functions in the + * call trees of iommu_flush_context_device and iommu_flush_iotlb_dsi) + * are with the same logic to bubble up positive return value. + */ + if ( rc <= 0 ) { int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0; - iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb); + int ret; + + ret = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb); + ASSERT(ret <= 0); + if ( !rc ) + rc = ret; + } + else + { + iommu_flush_write_buffer(iommu); + rc = 0; } set_bit(iommu->index, &hd->arch.iommu_bitmap); @@ -1420,7 +1476,7 @@ int domain_context_mapping_one( if ( !seg ) me_wifi_quirk(domain, bus, devfn, MAP_ME_PHANTOM_FUNC); - return 0; + return rc; } static int domain_context_mapping( @@ -1515,6 +1571,7 @@ int domain_context_unmap_one( struct context_entry *context, *context_entries; u64 maddr; int iommu_domid; + int rc; ASSERT(pcidevs_locked()); spin_lock(&iommu->lock); @@ -1535,6 +1592,7 @@ int domain_context_unmap_one( iommu_flush_cache_entry(context, sizeof(struct context_entry)); iommu_domid= domain_iommu_domid(domain, iommu); + if ( iommu_domid == -1 ) { spin_unlock(&iommu->lock); @@ -1542,14 +1600,36 @@ int domain_context_unmap_one( return -EINVAL; } - if ( iommu_flush_context_device(iommu, iommu_domid, - (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, 0) ) - iommu_flush_write_buffer(iommu); - else + rc = iommu_flush_context_device(iommu, iommu_domid, + PCI_BDF2(bus, devfn), + DMA_CCMD_MASK_NOBIT, 0); + + /* + * The current logic for rc returns: + * - positive invoke iommu_flush_write_buffer to flush cache. + * - zero on success. + * - negative on failure. Continue to flush IOMMU IOTLB on a + * best effort basis. + * + * Moreover, IOMMU flush handlers flush_context_qi or flush_iotlb_qi + * (or flush_context_reg and flush_iotlb_reg, deep functions in the + * call trees of iommu_flush_context_device and iommu_flush_iotlb_dsi) + * are with the same logic to bubble up positive return value. + */ + if ( rc <= 0 ) { int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0; - iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb); + int ret; + + ret = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb); + ASSERT(ret <= 0); + if ( !rc ) + rc = ret; + } + else + { + iommu_flush_write_buffer(iommu); + rc = 0; } spin_unlock(&iommu->lock); @@ -1558,7 +1638,7 @@ int domain_context_unmap_one( if ( !iommu->intel->drhd->segment ) me_wifi_quirk(domain, bus, devfn, UNMAP_ME_PHANTOM_FUNC); - return 0; + return rc; } static int domain_context_unmap( @@ -1772,6 +1852,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, struct domain_iommu *hd = dom_iommu(d); int flush_dev_iotlb; int iommu_domid; + int rc = 0; iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); @@ -1785,13 +1866,18 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, iommu_domid= domain_iommu_domid(d, iommu); if ( iommu_domid == -1 ) continue; - if ( iommu_flush_iotlb_psi(iommu, iommu_domid, + + rc = iommu_flush_iotlb_psi(iommu, iommu_domid, (paddr_t)gfn << PAGE_SHIFT_4K, - order, !present, flush_dev_iotlb) ) + order, !present, flush_dev_iotlb); + if ( rc > 0 ) + { iommu_flush_write_buffer(iommu); + rc = 0; + } } - return 0; + return rc; } static int __init vtd_ept_page_compatible(struct iommu *iommu) -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |