[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 11/18] AMD/IOMMU: return old PTE from {set,clear}_iommu_pte_present()
In order to free intermediate page tables when replacing smaller mappings by a single larger one callers will need to know the full PTE. Flush indicators can be derived from this in the callers (and outside the locked regions). First split set_iommu_pte_present() from set_iommu_ptes_present(): Only the former needs to return the old PTE, while the latter (like also set_iommu_pde_present()) doesn't even need to return flush indicators. Then change return types/values and callers accordingly. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -31,30 +31,28 @@ static unsigned int pfn_to_pde_idx(unsig return idx; } -static unsigned int clear_iommu_pte_present(unsigned long l1_mfn, - unsigned long dfn) +static union amd_iommu_pte clear_iommu_pte_present(unsigned long l1_mfn, + unsigned long dfn) { - union amd_iommu_pte *table, *pte; - unsigned int flush_flags; + union amd_iommu_pte *table, *pte, old; table = map_domain_page(_mfn(l1_mfn)); pte = &table[pfn_to_pde_idx(dfn, 1)]; + old = *pte; - flush_flags = pte->pr ? IOMMU_FLUSHF_modified : 0; write_atomic(&pte->raw, 0); unmap_domain_page(table); - return flush_flags; + return old; } -static unsigned int set_iommu_pde_present(union amd_iommu_pte *pte, - unsigned long next_mfn, - unsigned int next_level, bool iw, - bool ir) +static void set_iommu_pde_present(union amd_iommu_pte *pte, + unsigned long next_mfn, + unsigned int next_level, + bool iw, bool ir) { - union amd_iommu_pte new = {}, old; - unsigned int flush_flags = IOMMU_FLUSHF_added; + union amd_iommu_pte new = {}; /* * FC bit should be enabled in PTE, this helps to solve potential @@ -68,28 +66,42 @@ static unsigned int set_iommu_pde_presen new.next_level = next_level; new.pr = true; - old.raw = read_atomic(&pte->raw); - old.ign0 = 0; - old.ign1 = 0; - old.ign2 = 0; + write_atomic(&pte->raw, new.raw); +} - if ( old.pr && old.raw != new.raw ) - flush_flags |= IOMMU_FLUSHF_modified; +static union amd_iommu_pte set_iommu_pte_present(unsigned long pt_mfn, + unsigned long dfn, + unsigned long next_mfn, + unsigned int level, + bool iw, bool ir) +{ + union amd_iommu_pte *table, *pde, old; - write_atomic(&pte->raw, new.raw); + table = map_domain_page(_mfn(pt_mfn)); + pde = &table[pfn_to_pde_idx(dfn, level)]; + + old = *pde; + if ( !old.pr || old.next_level || + old.mfn != next_mfn || + old.iw != iw || old.ir != ir ) + set_iommu_pde_present(pde, next_mfn, 0, iw, ir); + else + old.pr = false; /* signal "no change" to the caller */ - return flush_flags; + unmap_domain_page(table); + + return old; } -static unsigned int set_iommu_ptes_present(unsigned long pt_mfn, - unsigned long dfn, - unsigned long next_mfn, - unsigned int nr_ptes, - unsigned int pde_level, - bool iw, bool ir) +static void set_iommu_ptes_present(unsigned long pt_mfn, + unsigned long dfn, + unsigned long next_mfn, + unsigned int nr_ptes, + unsigned int pde_level, + bool iw, bool ir) { union amd_iommu_pte *table, *pde; - unsigned int page_sz, flush_flags = 0; + unsigned int page_sz; table = map_domain_page(_mfn(pt_mfn)); pde = &table[pfn_to_pde_idx(dfn, pde_level)]; @@ -98,20 +110,18 @@ static unsigned int set_iommu_ptes_prese if ( (void *)(pde + nr_ptes) > (void *)table + PAGE_SIZE ) { ASSERT_UNREACHABLE(); - return 0; + return; } while ( nr_ptes-- ) { - flush_flags |= set_iommu_pde_present(pde, next_mfn, 0, iw, ir); + set_iommu_pde_present(pde, next_mfn, 0, iw, ir); ++pde; next_mfn += page_sz; } unmap_domain_page(table); - - return flush_flags; } void amd_iommu_set_root_page_table(struct amd_iommu_dte *dte, @@ -284,6 +294,7 @@ int amd_iommu_map_page(struct domain *d, struct domain_iommu *hd = dom_iommu(d); int rc; unsigned long pt_mfn = 0; + union amd_iommu_pte old; spin_lock(&hd->arch.mapping_lock); @@ -320,12 +331,16 @@ int amd_iommu_map_page(struct domain *d, } /* Install 4k mapping */ - *flush_flags |= set_iommu_ptes_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), - 1, 1, (flags & IOMMUF_writable), - (flags & IOMMUF_readable)); + old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), 1, + (flags & IOMMUF_writable), + (flags & IOMMUF_readable)); spin_unlock(&hd->arch.mapping_lock); + *flush_flags |= IOMMU_FLUSHF_added; + if ( old.pr ) + *flush_flags |= IOMMU_FLUSHF_modified; + return 0; } @@ -334,6 +349,7 @@ int amd_iommu_unmap_page(struct domain * { unsigned long pt_mfn = 0; struct domain_iommu *hd = dom_iommu(d); + union amd_iommu_pte old = {}; spin_lock(&hd->arch.mapping_lock); @@ -355,11 +371,14 @@ int amd_iommu_unmap_page(struct domain * if ( pt_mfn ) { /* Mark PTE as 'page not present'. */ - *flush_flags |= clear_iommu_pte_present(pt_mfn, dfn_x(dfn)); + old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn)); } spin_unlock(&hd->arch.mapping_lock); + if ( old.pr ) + *flush_flags |= IOMMU_FLUSHF_modified; + return 0; }
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |