[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH V4 04/15] x86: Initialize mapcache for PV, HVM, and idle domains
From: Wei Liu <wei.liu2@xxxxxxxxxx> To support the transition away from the direct map, the mapcache will now be used by HVM and idle domains as well. This patch lifts the `mapcache` to the arch level and moves its initialization earlier in `arch_domain_create()` to cover PV, HVM, and idle domains. For the idle domain to utilize the mapcache, this patch also populates the mapcache page tables within the `PERDOMAIN` region and adjusts the initialization sequence. With this change, mapcache initialization is now unified across all domain types—PV, HVM, and idle. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> Signed-off-by: Wei Wang <wawei@xxxxxxxxx> Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx> Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx> Signed-off-by: Elias El Yandouzi <eliasely@xxxxxxxxxx> ---- Changes in V4: * Reword the commit message * Rebase it on top of staging * The logic for the creation of the domain has been reworked so introduced #ifdef CONFIG_X86 in the common code to initialise the mapcache Changes in V2: * Free resources if mapcache initialisation fails * Remove `is_idle_domain()` check from `create_perdomain_mappings()` diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 78a13e6812c9..38338f519dea 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -777,6 +777,10 @@ void __init arch_init_idle_domain(struct domain *d) }; d->arch.ctxt_switch = &idle_csw; + + /* Slot 260: Per-domain mappings. */ + idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)] = + l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW); } int arch_domain_create(struct domain *d, @@ -870,9 +874,6 @@ int arch_domain_create(struct domain *d, } else if ( is_pv_domain(d) ) { - if ( (rc = mapcache_domain_init(d)) != 0 ) - goto fail; - if ( (rc = pv_domain_initialise(d)) != 0 ) goto fail; } @@ -909,7 +910,6 @@ int arch_domain_create(struct domain *d, XFREE(d->arch.cpu_policy); if ( paging_initialised ) paging_final_teardown(d); - free_perdomain_mappings(d); return rc; } diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index eac5e3304fb8..55e337aaf703 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -82,11 +82,11 @@ void *map_domain_page(mfn_t mfn) #endif v = mapcache_current_vcpu(); - if ( !v || !is_pv_vcpu(v) ) + if ( !v ) return mfn_to_virt(mfn_x(mfn)); - dcache = &v->domain->arch.pv.mapcache; - vcache = &v->arch.pv.mapcache; + dcache = &v->domain->arch.mapcache; + vcache = &v->arch.mapcache; if ( !dcache->inuse ) return mfn_to_virt(mfn_x(mfn)); @@ -187,14 +187,14 @@ void unmap_domain_page(const void *ptr) ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END); v = mapcache_current_vcpu(); - ASSERT(v && is_pv_vcpu(v)); + ASSERT(v); - dcache = &v->domain->arch.pv.mapcache; + dcache = &v->domain->arch.mapcache; ASSERT(dcache->inuse); idx = PFN_DOWN(va - MAPCACHE_VIRT_START); mfn = l1e_get_pfn(MAPCACHE_L1ENT(idx)); - hashent = &v->arch.pv.mapcache.hash[MAPHASH_HASHFN(mfn)]; + hashent = &v->arch.mapcache.hash[MAPHASH_HASHFN(mfn)]; local_irq_save(flags); @@ -233,11 +233,9 @@ void unmap_domain_page(const void *ptr) int mapcache_domain_init(struct domain *d) { - struct mapcache_domain *dcache = &d->arch.pv.mapcache; + struct mapcache_domain *dcache = &d->arch.mapcache; unsigned int bitmap_pages; - ASSERT(is_pv_domain(d)); - #ifdef NDEBUG if ( !mem_hotplug && max_page <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) ) return 0; @@ -261,12 +259,12 @@ int mapcache_domain_init(struct domain *d) int mapcache_vcpu_init(struct vcpu *v) { struct domain *d = v->domain; - struct mapcache_domain *dcache = &d->arch.pv.mapcache; + struct mapcache_domain *dcache = &d->arch.mapcache; unsigned long i; unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES; unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long)); - if ( !is_pv_vcpu(v) || !dcache->inuse ) + if ( !dcache->inuse ) return 0; if ( ents > dcache->entries ) @@ -293,7 +291,7 @@ int mapcache_vcpu_init(struct vcpu *v) BUILD_BUG_ON(MAPHASHENT_NOTINUSE < MAPCACHE_ENTRIES); for ( i = 0; i < MAPHASH_ENTRIES; i++ ) { - struct vcpu_maphash_entry *hashent = &v->arch.pv.mapcache.hash[i]; + struct vcpu_maphash_entry *hashent = &v->arch.mapcache.hash[i]; hashent->mfn = ~0UL; /* never valid to map */ hashent->idx = MAPHASHENT_NOTINUSE; diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h index 478ce41ad8ca..b0fd477c62e7 100644 --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -285,9 +285,6 @@ struct pv_domain /* Mitigate L1TF with shadow/crashing? */ bool check_l1tf; - /* map_domain_page() mapping cache. */ - struct mapcache_domain mapcache; - struct cpuidmasks *cpuidmasks; }; @@ -326,6 +323,9 @@ struct arch_domain uint8_t scf; /* See SCF_DOM_MASK */ + /* map_domain_page() mapping cache. */ + struct mapcache_domain mapcache; + union { struct pv_domain pv; struct hvm_domain hvm; @@ -516,9 +516,6 @@ struct arch_domain struct pv_vcpu { - /* map_domain_page() mapping cache. */ - struct mapcache_vcpu mapcache; - unsigned int vgc_flags; struct trap_info *trap_ctxt; @@ -612,6 +609,9 @@ struct arch_vcpu #define async_exception_state(t) async_exception_state[(t)-1] uint8_t async_exception_mask; + /* map_domain_page() mapping cache. */ + struct mapcache_vcpu mapcache; + /* Virtual Machine Extensions */ union { struct pv_vcpu pv; diff --git a/xen/common/domain.c b/xen/common/domain.c index 92263a4fbdc5..a7f4929b5893 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -738,6 +738,11 @@ struct domain *domain_create(domid_t domid, rangeset_domain_initialise(d); +#ifdef CONFIG_X86 + if ( (err = mapcache_domain_init(d)) != 0) + goto fail; +#endif + if ( is_idle_domain(d) ) arch_init_idle_domain(d); @@ -820,6 +825,10 @@ struct domain *domain_create(domid_t domid, ASSERT(err < 0); /* Sanity check paths leading here. */ err = err ?: -EILSEQ; /* Release build safety. */ +#ifdef CONFIG_X86 + free_perdomain_mappings(d); +#endif + d->is_dying = DOMDYING_dead; if ( hardware_domain == d ) hardware_domain = old_hwdom; -- 2.40.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |