[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 12/21] x86/pv: Provide custom cpumasks for PV domains
And use them in preference to cpumask_defaults on context switch. HVM domains must not be masked (to avoid interfering with cpuid calls within the guest), so always lazily context switch to the host default. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <JBeulich@xxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- v2: * s/cpumasks/cpuidmasks/ * Use structure assignment * Fix error path in arch_domain_create() v3: * Indentation fixes. * Only allocate PV cpuidmasks if the host is has cpumasks to use. --- xen/arch/x86/cpu/amd.c | 4 +++- xen/arch/x86/cpu/intel.c | 5 ++++- xen/arch/x86/domain.c | 14 ++++++++++++++ xen/include/asm-x86/domain.h | 2 ++ 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index 3e2f4a8..d5afc3e 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -206,7 +206,9 @@ static void __init noinline probe_masking_msrs(void) static void amd_ctxt_switch_levelling(const struct domain *nextd) { struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); - const struct cpuidmasks *masks = &cpuidmask_defaults; + const struct cpuidmasks *masks = + (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks) + ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults; #define LAZY(cap, msr, field) \ ({ \ diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c index e21c32d..fe4736e 100644 --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -154,7 +154,7 @@ static void __init probe_masking_msrs(void) static void intel_ctxt_switch_levelling(const struct domain *nextd) { struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); - const struct cpuidmasks *masks = &cpuidmask_defaults; + const struct cpuidmasks *masks; if (cpu_has_cpuid_faulting) { /* @@ -178,6 +178,9 @@ static void intel_ctxt_switch_levelling(const struct domain *nextd) return; } + masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks) + ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults; + #define LAZY(msr, field) \ ({ \ if (unlikely(these_masks->field != masks->field) && \ diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index cba77a2..a64bfdc 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -577,6 +577,14 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags, goto fail; clear_page(d->arch.pv_domain.gdt_ldt_l1tab); + if ( levelling_caps & ~LCAP_faulting ) + { + d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks); + if ( !d->arch.pv_domain.cpuidmasks ) + goto fail; + *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults; + } + rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START, GDT_LDT_MBYTES << (20 - PAGE_SHIFT), NULL, NULL); @@ -672,7 +680,10 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags, paging_final_teardown(d); free_perdomain_mappings(d); if ( is_pv_domain(d) ) + { + xfree(d->arch.pv_domain.cpuidmasks); free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); + } psr_domain_free(d); return rc; } @@ -692,7 +703,10 @@ void arch_domain_destroy(struct domain *d) free_perdomain_mappings(d); if ( is_pv_domain(d) ) + { free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab); + xfree(d->arch.pv_domain.cpuidmasks); + } free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index de60def..90f021f 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -252,6 +252,8 @@ struct pv_domain /* map_domain_page() mapping cache. */ struct mapcache_domain mapcache; + + struct cpuidmasks *cpuidmasks; }; struct monitor_write_data { -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |