[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging-4.11] vtd: prune (and rename) cache flush functions
commit 2d11e6d694877620d7784a0bcebf9e869d41a14c Author: Roger Pau Monné <roger.pau@xxxxxxxxxx> AuthorDate: Tue Jul 7 15:22:16 2020 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Jul 7 15:22:16 2020 +0200 vtd: prune (and rename) cache flush functions Rename __iommu_flush_cache to iommu_sync_cache and remove iommu_flush_cache_page. Also remove the iommu_flush_cache_entry wrapper and just use iommu_sync_cache instead. Note the _entry suffix was meaningless as the wrapper was already taking a size parameter in bytes. While there also constify the addr parameter. No functional change intended. This is part of XSA-321. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> master commit: 62298825b9a44f45761acbd758138b5ba059ebd1 master date: 2020-07-07 14:38:13 +0200 --- xen/drivers/passthrough/vtd/extern.h | 3 +-- xen/drivers/passthrough/vtd/intremap.c | 6 +++--- xen/drivers/passthrough/vtd/iommu.c | 33 ++++++++++++--------------------- 3 files changed, 16 insertions(+), 26 deletions(-) diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h index d698b1d50a..87d5ed78a2 100644 --- a/xen/drivers/passthrough/vtd/extern.h +++ b/xen/drivers/passthrough/vtd/extern.h @@ -37,8 +37,7 @@ void disable_qinval(struct iommu *iommu); int enable_intremap(struct iommu *iommu, int eim); void disable_intremap(struct iommu *iommu); -void iommu_flush_cache_entry(void *addr, unsigned int size); -void iommu_flush_cache_page(void *addr, unsigned long npages); +void iommu_sync_cache(const void *addr, unsigned int size); int iommu_alloc(struct acpi_drhd_unit *drhd); void iommu_free(struct acpi_drhd_unit *drhd); diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c index 5f620c3202..eac20ede31 100644 --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -231,7 +231,7 @@ static void free_remap_entry(struct iommu *iommu, int index) iremap_entries, iremap_entry); update_irte(iommu, iremap_entry, &new_ire, false); - iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); + iommu_sync_cache(iremap_entry, sizeof(*iremap_entry)); iommu_flush_iec_index(iommu, 0, index); unmap_vtd_domain_page(iremap_entries); @@ -403,7 +403,7 @@ static int ioapic_rte_to_remap_entry(struct iommu *iommu, } update_irte(iommu, iremap_entry, &new_ire, !init); - iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); + iommu_sync_cache(iremap_entry, sizeof(*iremap_entry)); iommu_flush_iec_index(iommu, 0, index); unmap_vtd_domain_page(iremap_entries); @@ -694,7 +694,7 @@ static int msi_msg_to_remap_entry( update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized); msi_desc->irte_initialized = true; - iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); + iommu_sync_cache(iremap_entry, sizeof(*iremap_entry)); iommu_flush_iec_index(iommu, 0, index); unmap_vtd_domain_page(iremap_entries); diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 52bf0d9d23..ff9151a54b 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -158,7 +158,8 @@ static void __init free_intel_iommu(struct intel_iommu *intel) } static int iommus_incoherent; -static void __iommu_flush_cache(void *addr, unsigned int size) + +void iommu_sync_cache(const void *addr, unsigned int size) { int i; static unsigned int clflush_size = 0; @@ -173,16 +174,6 @@ static void __iommu_flush_cache(void *addr, unsigned int size) cacheline_flush((char *)addr + i); } -void iommu_flush_cache_entry(void *addr, unsigned int size) -{ - __iommu_flush_cache(addr, size); -} - -void iommu_flush_cache_page(void *addr, unsigned long npages) -{ - __iommu_flush_cache(addr, PAGE_SIZE * npages); -} - /* Allocate page table, return its machine address */ u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd, unsigned long npages) { @@ -207,7 +198,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd, unsigned long npages) vaddr = __map_domain_page(cur_pg); memset(vaddr, 0, PAGE_SIZE); - iommu_flush_cache_page(vaddr, 1); + iommu_sync_cache(vaddr, PAGE_SIZE); unmap_domain_page(vaddr); cur_pg++; } @@ -242,7 +233,7 @@ static u64 bus_to_context_maddr(struct iommu *iommu, u8 bus) } set_root_value(*root, maddr); set_root_present(*root); - iommu_flush_cache_entry(root, sizeof(struct root_entry)); + iommu_sync_cache(root, sizeof(struct root_entry)); } maddr = (u64) get_context_addr(*root); unmap_vtd_domain_page(root_entries); @@ -300,7 +291,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc) */ dma_set_pte_readable(*pte); dma_set_pte_writable(*pte); - iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); + iommu_sync_cache(pte, sizeof(struct dma_pte)); } if ( level == 2 ) @@ -674,7 +665,7 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr) dma_clear_pte(*pte); spin_unlock(&hd->arch.mapping_lock); - iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); + iommu_sync_cache(pte, sizeof(struct dma_pte)); if ( !this_cpu(iommu_dont_flush_iotlb) ) rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1); @@ -716,7 +707,7 @@ static void iommu_free_page_table(struct page_info *pg) iommu_free_pagetable(dma_pte_addr(*pte), next_level); dma_clear_pte(*pte); - iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); + iommu_sync_cache(pte, sizeof(struct dma_pte)); } unmap_vtd_domain_page(pt_vaddr); @@ -1449,7 +1440,7 @@ int domain_context_mapping_one( context_set_address_width(*context, agaw); context_set_fault_enable(*context); context_set_present(*context); - iommu_flush_cache_entry(context, sizeof(struct context_entry)); + iommu_sync_cache(context, sizeof(struct context_entry)); spin_unlock(&iommu->lock); /* Context entry was previously non-present (with domid 0). */ @@ -1602,7 +1593,7 @@ int domain_context_unmap_one( context_clear_present(*context); context_clear_entry(*context); - iommu_flush_cache_entry(context, sizeof(struct context_entry)); + iommu_sync_cache(context, sizeof(struct context_entry)); iommu_domid= domain_iommu_domid(domain, iommu); if ( iommu_domid == -1 ) @@ -1828,7 +1819,7 @@ static int __must_check intel_iommu_map_page(struct domain *d, *pte = new; - iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); + iommu_sync_cache(pte, sizeof(struct dma_pte)); spin_unlock(&hd->arch.mapping_lock); unmap_vtd_domain_page(page); @@ -1862,7 +1853,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int iommu_domid; int rc = 0; - iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); + iommu_sync_cache(pte, sizeof(struct dma_pte)); for_each_drhd_unit ( drhd ) { @@ -2725,7 +2716,7 @@ static int __init intel_iommu_quarantine_init(struct domain *d) dma_set_pte_addr(*pte, maddr); dma_set_pte_readable(*pte); } - iommu_flush_cache_page(parent, 1); + iommu_sync_cache(parent, PAGE_SIZE); unmap_vtd_domain_page(parent); parent = map_vtd_domain_page(maddr); -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.11
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |