[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 07/16] x86/pv: rewrite how building PV dom0 handles domheap mappings
From: Hongyan Xia <hongyxia@xxxxxxxxxx> Building a PV dom0 is allocating from the domheap but uses it like the xenheap. This is clearly wrong. Fix. Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx> --- xen/arch/x86/pv/dom0_build.c | 58 ++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index b052f13462..adaa6afda2 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -309,6 +309,10 @@ int __init dom0_construct_pv(struct domain *d, l3_pgentry_t *l3tab = NULL, *l3start = NULL; l2_pgentry_t *l2tab = NULL, *l2start = NULL; l1_pgentry_t *l1tab = NULL, *l1start = NULL; + mfn_t l4start_mfn = INVALID_MFN; + mfn_t l3start_mfn = INVALID_MFN; + mfn_t l2start_mfn = INVALID_MFN; + mfn_t l1start_mfn = INVALID_MFN; /* * This fully describes the memory layout of the initial domain. All @@ -535,6 +539,7 @@ int __init dom0_construct_pv(struct domain *d, for ( i = 0; i < nr_pages; i++, len -= PAGE_SIZE ) { void *p = __map_domain_page(page + i); + memcpy(p, mfn_to_virt(initrd_mfn + i), min(len, (unsigned long)PAGE_SIZE)); unmap_domain_page(p); @@ -610,23 +615,32 @@ int __init dom0_construct_pv(struct domain *d, v->arch.pv.event_callback_cs = FLAT_COMPAT_KERNEL_CS; } +#define UNMAP_MAP_AND_ADVANCE(mfn_var, virt_var, maddr) \ +do { \ + UNMAP_DOMAIN_PAGE(virt_var); \ + mfn_var = maddr_to_mfn(maddr); \ + maddr += PAGE_SIZE; \ + virt_var = map_domain_page(mfn_var); \ +} while ( false ) + /* WARNING: The new domain must have its 'processor' field filled in! */ if ( !is_pv_32bit_domain(d) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table; - l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + UNMAP_MAP_AND_ADVANCE(l4start_mfn, l4start, mpt_alloc); + l4tab = l4start; clear_page(l4tab); - init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)), - d, INVALID_MFN, true); - v->arch.guest_table = pagetable_from_paddr(__pa(l4start)); + init_xen_l4_slots(l4tab, l4start_mfn, d, INVALID_MFN, true); + v->arch.guest_table = pagetable_from_mfn(l4start_mfn); } else { /* Monitor table already created by switch_compat(). */ - l4start = l4tab = __va(pagetable_get_paddr(v->arch.guest_table)); + l4start_mfn = pagetable_get_mfn(v->arch.guest_table); + l4start = l4tab = map_domain_page(l4start_mfn); /* See public/xen.h on why the following is needed. */ maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table; - l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + UNMAP_MAP_AND_ADVANCE(l3start_mfn, l3start, mpt_alloc); } l4tab += l4_table_offset(v_start); @@ -636,14 +650,16 @@ int __init dom0_construct_pv(struct domain *d, if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table; - l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + UNMAP_MAP_AND_ADVANCE(l1start_mfn, l1start, mpt_alloc); + l1tab = l1start; clear_page(l1tab); if ( count == 0 ) l1tab += l1_table_offset(v_start); if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table; - l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + UNMAP_MAP_AND_ADVANCE(l2start_mfn, l2start, mpt_alloc); + l2tab = l2start; clear_page(l2tab); if ( count == 0 ) l2tab += l2_table_offset(v_start); @@ -653,19 +669,19 @@ int __init dom0_construct_pv(struct domain *d, { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table; - l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; + UNMAP_MAP_AND_ADVANCE(l3start_mfn, l3start, mpt_alloc); } l3tab = l3start; clear_page(l3tab); if ( count == 0 ) l3tab += l3_table_offset(v_start); - *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT); + *l4tab = l4e_from_mfn(l3start_mfn, L4_PROT); l4tab++; } - *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT); + *l3tab = l3e_from_mfn(l2start_mfn, L3_PROT); l3tab++; } - *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT); + *l2tab = l2e_from_mfn(l1start_mfn, L2_PROT); l2tab++; } if ( count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) ) @@ -692,9 +708,9 @@ int __init dom0_construct_pv(struct domain *d, if ( !l3e_get_intpte(*l3tab) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table; - l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE; - clear_page(l2tab); - *l3tab = l3e_from_paddr(__pa(l2tab), L3_PROT); + UNMAP_MAP_AND_ADVANCE(l2start_mfn, l2start, mpt_alloc); + clear_page(l2start); + *l3tab = l3e_from_mfn(l2start_mfn, L3_PROT); } if ( i == 3 ) l3e_get_page(*l3tab)->u.inuse.type_info |= PGT_pae_xen_l2; @@ -705,9 +721,17 @@ int __init dom0_construct_pv(struct domain *d, unmap_domain_page(l2t); } +#undef UNMAP_MAP_AND_ADVANCE + + UNMAP_DOMAIN_PAGE(l1start); + UNMAP_DOMAIN_PAGE(l2start); + UNMAP_DOMAIN_PAGE(l3start); + /* Pages that are part of page tables must be read only. */ mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages); + UNMAP_DOMAIN_PAGE(l4start); + /* Mask all upcalls... */ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ ) shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1; @@ -869,8 +893,12 @@ int __init dom0_construct_pv(struct domain *d, * !CONFIG_VIDEO case so the logic here can be simplified. */ if ( pv_shim ) + { + l4start = map_domain_page(l4start_mfn); pv_shim_setup_dom(d, l4start, v_start, vxenstore_start, vconsole_start, vphysmap_start, si); + UNMAP_DOMAIN_PAGE(l4start); + } if ( is_pv_32bit_domain(d) ) xlat_start_info(si, pv_shim ? XLAT_start_info_console_domU -- 2.24.1.AMZN
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |