[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 09/21] AMD/IOMMU: return old PTE from {set,clear}_iommu_pte_present()
In order to free intermediate page tables when replacing smaller mappings by a single larger one callers will need to know the full PTE. Flush indicators can be derived from this in the callers (and outside the locked regions). First split set_iommu_pte_present() from set_iommu_ptes_present(): Only the former needs to return the old PTE, while the latter (like also set_iommu_pde_present()) doesn't even need to return flush indicators. Then change return types/values and callers accordingly. Note that for subsequent changes returning merely a boolean (old.pr) is not going to be sufficient; the next_level field will also be required. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- v4: Re-base over changes earlier in the series. --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -31,30 +31,28 @@ static unsigned int pfn_to_pde_idx(unsig return idx; } -static unsigned int clear_iommu_pte_present(unsigned long l1_mfn, - unsigned long dfn) +static union amd_iommu_pte clear_iommu_pte_present(unsigned long l1_mfn, + unsigned long dfn) { - union amd_iommu_pte *table, *pte; - unsigned int flush_flags; + union amd_iommu_pte *table, *pte, old; table = map_domain_page(_mfn(l1_mfn)); pte = &table[pfn_to_pde_idx(dfn, 1)]; + old = *pte; - flush_flags = pte->pr ? IOMMU_FLUSHF_modified : 0; write_atomic(&pte->raw, 0); unmap_domain_page(table); - return flush_flags; + return old; } -static unsigned int set_iommu_pde_present(union amd_iommu_pte *pte, - unsigned long next_mfn, - unsigned int next_level, bool iw, - bool ir) +static void set_iommu_pde_present(union amd_iommu_pte *pte, + unsigned long next_mfn, + unsigned int next_level, + bool iw, bool ir) { - union amd_iommu_pte new = {}, old; - unsigned int flush_flags = IOMMU_FLUSHF_added; + union amd_iommu_pte new = {}; /* * FC bit should be enabled in PTE, this helps to solve potential @@ -68,29 +66,42 @@ static unsigned int set_iommu_pde_presen new.next_level = next_level; new.pr = true; - old.raw = read_atomic(&pte->raw); - old.ign0 = 0; - old.ign1 = 0; - old.ign2 = 0; + write_atomic(&pte->raw, new.raw); +} - if ( old.pr && old.raw != new.raw ) - flush_flags |= IOMMU_FLUSHF_modified; +static union amd_iommu_pte set_iommu_pte_present(unsigned long pt_mfn, + unsigned long dfn, + unsigned long next_mfn, + unsigned int level, + bool iw, bool ir) +{ + union amd_iommu_pte *table, *pde, old; - write_atomic(&pte->raw, new.raw); + table = map_domain_page(_mfn(pt_mfn)); + pde = &table[pfn_to_pde_idx(dfn, level)]; + + old = *pde; + if ( !old.pr || old.next_level || + old.mfn != next_mfn || + old.iw != iw || old.ir != ir ) + set_iommu_pde_present(pde, next_mfn, 0, iw, ir); + else + old.pr = false; /* signal "no change" to the caller */ - return flush_flags; + unmap_domain_page(table); + + return old; } -static unsigned int set_iommu_ptes_present(unsigned long pt_mfn, - unsigned long dfn, - unsigned long next_mfn, - unsigned int nr_ptes, - unsigned int pde_level, - bool iw, bool ir) +static void set_iommu_ptes_present(unsigned long pt_mfn, + unsigned long dfn, + unsigned long next_mfn, + unsigned int nr_ptes, + unsigned int pde_level, + bool iw, bool ir) { union amd_iommu_pte *table, *pde; unsigned long page_sz = 1UL << (PTE_PER_TABLE_SHIFT * (pde_level - 1)); - unsigned int flush_flags = 0; table = map_domain_page(_mfn(pt_mfn)); pde = &table[pfn_to_pde_idx(dfn, pde_level)]; @@ -98,20 +109,18 @@ static unsigned int set_iommu_ptes_prese if ( (void *)(pde + nr_ptes) > (void *)table + PAGE_SIZE ) { ASSERT_UNREACHABLE(); - return 0; + return; } while ( nr_ptes-- ) { - flush_flags |= set_iommu_pde_present(pde, next_mfn, 0, iw, ir); + set_iommu_pde_present(pde, next_mfn, 0, iw, ir); ++pde; next_mfn += page_sz; } unmap_domain_page(table); - - return flush_flags; } /* @@ -349,6 +358,7 @@ int cf_check amd_iommu_map_page( struct domain_iommu *hd = dom_iommu(d); int rc; unsigned long pt_mfn = 0; + union amd_iommu_pte old; spin_lock(&hd->arch.mapping_lock); @@ -385,12 +395,16 @@ int cf_check amd_iommu_map_page( } /* Install 4k mapping */ - *flush_flags |= set_iommu_ptes_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), - 1, 1, (flags & IOMMUF_writable), - (flags & IOMMUF_readable)); + old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), 1, + (flags & IOMMUF_writable), + (flags & IOMMUF_readable)); spin_unlock(&hd->arch.mapping_lock); + *flush_flags |= IOMMU_FLUSHF_added; + if ( old.pr ) + *flush_flags |= IOMMU_FLUSHF_modified; + return 0; } @@ -399,6 +413,7 @@ int cf_check amd_iommu_unmap_page( { unsigned long pt_mfn = 0; struct domain_iommu *hd = dom_iommu(d); + union amd_iommu_pte old = {}; spin_lock(&hd->arch.mapping_lock); @@ -420,11 +435,14 @@ int cf_check amd_iommu_unmap_page( if ( pt_mfn ) { /* Mark PTE as 'page not present'. */ - *flush_flags |= clear_iommu_pte_present(pt_mfn, dfn_x(dfn)); + old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn)); } spin_unlock(&hd->arch.mapping_lock); + if ( old.pr ) + *flush_flags |= IOMMU_FLUSHF_modified; + return 0; }
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |