[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] AMD/IOMMU: correct shattering of super pages
Fill the new page table _before_ installing into a live page table hierarchy, as installing a blank page first risks I/O faults on sub-ranges of the original super page which aren't part of the range for which mappings are being updated. While at it also do away with mapping and unmapping the same fresh intermediate page table page once per entry to be written. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- Afaict this corrects presently dead code: I don't think there are ways for super pages to be created in the first place, i.e. none could ever need shattering. --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -81,19 +81,34 @@ static unsigned int set_iommu_pde_presen return flush_flags; } -static unsigned int set_iommu_pte_present(unsigned long pt_mfn, - unsigned long dfn, - unsigned long next_mfn, - int pde_level, - bool iw, bool ir) +static unsigned int set_iommu_ptes_present(unsigned long pt_mfn, + unsigned long dfn, + unsigned long next_mfn, + unsigned int nr_ptes, + unsigned int pde_level, + bool iw, bool ir) { union amd_iommu_pte *table, *pde; - unsigned int flush_flags; + unsigned int page_sz, flush_flags = 0; table = map_domain_page(_mfn(pt_mfn)); pde = &table[pfn_to_pde_idx(dfn, pde_level)]; + page_sz = 1U << (PTE_PER_TABLE_SHIFT * (pde_level - 1)); + + if ( (void *)(pde + nr_ptes) > (void *)table + PAGE_SIZE ) + { + ASSERT_UNREACHABLE(); + return 0; + } + + while ( nr_ptes-- ) + { + flush_flags |= set_iommu_pde_present(pde, next_mfn, 0, iw, ir); + + ++pde; + next_mfn += page_sz; + } - flush_flags = set_iommu_pde_present(pde, next_mfn, 0, iw, ir); unmap_domain_page(table); return flush_flags; @@ -220,11 +235,8 @@ static int iommu_pde_from_dfn(struct dom /* Split super page frame into smaller pieces.*/ if ( pde->pr && !pde->next_level && next_table_mfn ) { - int i; unsigned long mfn, pfn; - unsigned int page_sz; - page_sz = 1 << (PTE_PER_TABLE_SHIFT * (next_level - 1)); pfn = dfn & ~((1 << (PTE_PER_TABLE_SHIFT * next_level)) - 1); mfn = next_table_mfn; @@ -238,17 +250,13 @@ static int iommu_pde_from_dfn(struct dom } next_table_mfn = mfn_x(page_to_mfn(table)); + + set_iommu_ptes_present(next_table_mfn, pfn, mfn, PTE_PER_TABLE_SIZE, + next_level, true, true); + smp_wmb(); set_iommu_pde_present(pde, next_table_mfn, next_level, true, true); - for ( i = 0; i < PTE_PER_TABLE_SIZE; i++ ) - { - set_iommu_pte_present(next_table_mfn, pfn, mfn, next_level, - true, true); - mfn += page_sz; - pfn += page_sz; - } - amd_iommu_flush_all_pages(d); } @@ -318,9 +326,9 @@ int amd_iommu_map_page(struct domain *d, } /* Install 4k mapping */ - *flush_flags |= set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn), - 1, (flags & IOMMUF_writable), - (flags & IOMMUF_readable)); + *flush_flags |= set_iommu_ptes_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn), + 1, 1, (flags & IOMMUF_writable), + (flags & IOMMUF_readable)); spin_unlock(&hd->arch.mapping_lock);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |