[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v10 03/13] x86/mm: switch to new APIs in modify_xen_mappings
From: Wei Liu <wei.liu2@xxxxxxxxxx> Page tables allocated in that function should be mapped and unmapped now. Note that pl2e now maybe mapped and unmapped in different iterations, so we need to add clean-ups for that. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- Changed in v7: - use normal unmap in the error path. --- xen/arch/x86/mm.c | 57 ++++++++++++++++++++++++++++++----------------- 1 file changed, 36 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 8a68da26f45f..832e654294b4 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5546,6 +5546,7 @@ int map_pages_to_xen( out: L3T_UNLOCK(current_l3page); + unmap_domain_page(pl2e); unmap_domain_page(pl3e); unmap_domain_page(pl2e); return rc; @@ -5572,7 +5573,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) { bool locking = system_state > SYS_STATE_boot; l3_pgentry_t *pl3e = NULL; - l2_pgentry_t *pl2e; + l2_pgentry_t *pl2e = NULL; l1_pgentry_t *pl1e; unsigned int i; unsigned long v = s; @@ -5592,6 +5593,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) { /* Clean up the previous iteration. */ L3T_UNLOCK(current_l3page); + UNMAP_DOMAIN_PAGE(pl2e); UNMAP_DOMAIN_PAGE(pl3e); pl3e = virt_to_xen_l3e(v); @@ -5614,6 +5616,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( l3e_get_flags(*pl3e) & _PAGE_PSE ) { l2_pgentry_t *l2t; + mfn_t l2mfn; if ( l2_table_offset(v) == 0 && l1_table_offset(v) == 0 && @@ -5630,35 +5633,38 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) } /* PAGE1GB: shatter the superpage and fall through. */ - l2t = alloc_xen_pagetable(); - if ( !l2t ) + l2mfn = alloc_xen_pagetable_new(); + if ( mfn_eq(l2mfn, INVALID_MFN) ) goto out; + l2t = map_domain_page(l2mfn); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) l2e_write(l2t + i, l2e_from_pfn(l3e_get_pfn(*pl3e) + (i << PAGETABLE_ORDER), l3e_get_flags(*pl3e))); + UNMAP_DOMAIN_PAGE(l2t); + if ( locking ) spin_lock(&map_pgdir_lock); if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && (l3e_get_flags(*pl3e) & _PAGE_PSE) ) { - l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(l2t), - __PAGE_HYPERVISOR)); - l2t = NULL; + l3e_write_atomic(pl3e, + l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR)); + l2mfn = INVALID_MFN; } if ( locking ) spin_unlock(&map_pgdir_lock); - if ( l2t ) - free_xen_pagetable(l2t); + + free_xen_pagetable_new(l2mfn); } /* * The L3 entry has been verified to be present, and we've dealt with * 1G pages as well, so the L2 table cannot require allocation. */ - pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v); + pl2e = map_l2t_from_l3e(*pl3e) + l2_table_offset(v); if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ) { @@ -5686,41 +5692,45 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) else { l1_pgentry_t *l1t; - /* PSE: shatter the superpage and try again. */ - l1t = alloc_xen_pagetable(); - if ( !l1t ) + mfn_t l1mfn = alloc_xen_pagetable_new(); + + if ( mfn_eq(l1mfn, INVALID_MFN) ) goto out; + l1t = map_domain_page(l1mfn); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) l1e_write(&l1t[i], l1e_from_pfn(l2e_get_pfn(*pl2e) + i, l2e_get_flags(*pl2e) & ~_PAGE_PSE)); + UNMAP_DOMAIN_PAGE(l1t); + if ( locking ) spin_lock(&map_pgdir_lock); if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) && (l2e_get_flags(*pl2e) & _PAGE_PSE) ) { - l2e_write_atomic(pl2e, l2e_from_mfn(virt_to_mfn(l1t), + l2e_write_atomic(pl2e, l2e_from_mfn(l1mfn, __PAGE_HYPERVISOR)); - l1t = NULL; + l1mfn = INVALID_MFN; } if ( locking ) spin_unlock(&map_pgdir_lock); - if ( l1t ) - free_xen_pagetable(l1t); + + free_xen_pagetable_new(l1mfn); } } else { l1_pgentry_t nl1e, *l1t; + mfn_t l1mfn; /* * Ordinary 4kB mapping: The L2 entry has been verified to be * present, and we've dealt with 2M pages as well, so the L1 table * cannot require allocation. */ - pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v); + pl1e = map_l1t_from_l2e(*pl2e) + l1_table_offset(v); /* Confirm the caller isn't trying to create new mappings. */ if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) ) @@ -5731,6 +5741,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) (l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf); l1e_write_atomic(pl1e, nl1e); + UNMAP_DOMAIN_PAGE(pl1e); v += PAGE_SIZE; /* @@ -5760,10 +5771,12 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) continue; } - l1t = l2e_to_l1e(*pl2e); + l1mfn = l2e_get_mfn(*pl2e); + l1t = map_domain_page(l1mfn); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) if ( l1e_get_intpte(l1t[i]) != 0 ) break; + UNMAP_DOMAIN_PAGE(l1t); if ( i == L1_PAGETABLE_ENTRIES ) { /* Empty: zap the L2E and free the L1 page. */ @@ -5771,7 +5784,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( locking ) spin_unlock(&map_pgdir_lock); flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */ - free_xen_pagetable(l1t); + free_xen_pagetable_new(l1mfn); } else if ( locking ) spin_unlock(&map_pgdir_lock); @@ -5802,11 +5815,13 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) { l2_pgentry_t *l2t; + mfn_t l2mfn = l3e_get_mfn(*pl3e); - l2t = l3e_to_l2e(*pl3e); + l2t = map_domain_page(l2mfn); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) if ( l2e_get_intpte(l2t[i]) != 0 ) break; + UNMAP_DOMAIN_PAGE(l2t); if ( i == L2_PAGETABLE_ENTRIES ) { /* Empty: zap the L3E and free the L2 page. */ @@ -5814,7 +5829,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( locking ) spin_unlock(&map_pgdir_lock); flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */ - free_xen_pagetable(l2t); + free_xen_pagetable_new(l2mfn); } else if ( locking ) spin_unlock(&map_pgdir_lock); -- 2.23.4
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |