[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/2] x86/hvm/viridian: keep APIC assist page mapped...
> -----Original Message----- > From: Konrad Rzeszutek Wilk [mailto:konrad.wilk@xxxxxxxxxx] > Sent: 15 March 2016 23:20 > To: Paul Durrant > Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx; Andrew Cooper; Keir (Xen.org); Jan > Beulich > Subject: Re: [Xen-devel] [PATCH 1/2] x86/hvm/viridian: keep APIC assist > page mapped... > > On Tue, Mar 15, 2016 at 04:14:15PM +0000, Paul Durrant wrote: > > ... for the lifetime of the domain. > > > > If Xen is to make use of the APIC assist enlightenment then a persistent > > mapping needs to be kept, rather than the temporary one which is > currently > > used only to initialize the page content. > > > > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > > Cc: Keir Fraser <keir@xxxxxxx> > > Cc: Jan Beulich <jbeulich@xxxxxxxx> > > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > > --- > > xen/arch/x86/hvm/viridian.c | 54 +++++++++++++++++++++++++++-- > --------- > > xen/include/asm-x86/hvm/viridian.h | 6 ++++- > > 2 files changed, 43 insertions(+), 17 deletions(-) > > > > diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c > > index 6bd844b..c779290 100644 > > --- a/xen/arch/x86/hvm/viridian.c > > +++ b/xen/arch/x86/hvm/viridian.c > > @@ -163,7 +163,7 @@ static void dump_apic_assist(const struct vcpu *v) > > { > > const union viridian_apic_assist *aa; > > > > - aa = &v->arch.hvm_vcpu.viridian.apic_assist; > > + aa = &v->arch.hvm_vcpu.viridian.apic_assist.msr; > > > > printk(XENLOG_G_INFO "%pv: VIRIDIAN APIC_ASSIST: enabled: %x pfn: > %lx\n", > > v, aa->fields.enabled, (unsigned long)aa->fields.pfn); > > @@ -217,9 +217,9 @@ static void enable_hypercall_page(struct domain > *d) > > static void initialize_apic_assist(struct vcpu *v) > > { > > struct domain *d = v->domain; > > - unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn; > > + unsigned long gmfn = v- > >arch.hvm_vcpu.viridian.apic_assist.msr.fields.pfn; > > struct page_info *page = get_page_from_gfn(d, gmfn, NULL, > P2M_ALLOC); > > - uint8_t *p; > > + void *va; > > > > /* > > * We don't yet make use of the APIC assist page but by setting > > @@ -231,21 +231,40 @@ static void initialize_apic_assist(struct vcpu *v) > > * details of how Windows uses the page. > > */ > > > > - if ( !page || !get_page_type(page, PGT_writable_page) ) > > + if ( !page ) > > + return; > > + > > + if ( !get_page_type(page, PGT_writable_page) ) > > { > > - if ( page ) > > - put_page(page); > > - gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, > > - page ? page_to_mfn(page) : INVALID_MFN); > > + put_page(page); > > You don't want to report errors in case this goes south? > Well, the guest should keep on running but without the enlightenment. You're right that I should probably keep the warning though. Paul > > return; > > } > > > > - p = __map_domain_page(page); > > + va = __map_domain_page_global(page); > > + if ( !va ) > > + { > > + put_page_and_type(page); > > + return; > > + } > > > > - *(u32 *)p = 0; > > + *(uint32_t *)va = 0; > > > > - unmap_domain_page(p); > > + v->arch.hvm_vcpu.viridian.apic_assist.page = page; > > + v->arch.hvm_vcpu.viridian.apic_assist.va = va; > > +} > > + > > +static void teardown_apic_assist(struct vcpu *v) > > +{ > > + struct page_info *page = v->arch.hvm_vcpu.viridian.apic_assist.page; > > + void *va = v->arch.hvm_vcpu.viridian.apic_assist.va; > > + > > + if ( !va ) > > + return; > > + > > + v->arch.hvm_vcpu.viridian.apic_assist.va = NULL; > > + v->arch.hvm_vcpu.viridian.apic_assist.page = NULL; > > > > + unmap_domain_page_global(va); > > put_page_and_type(page); > > } > > > > @@ -374,9 +393,9 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val) > > > > case VIRIDIAN_MSR_APIC_ASSIST: > > perfc_incr(mshv_wrmsr_apic_msr); > > - v->arch.hvm_vcpu.viridian.apic_assist.raw = val; > > + v->arch.hvm_vcpu.viridian.apic_assist.msr.raw = val; > > dump_apic_assist(v); > > - if (v->arch.hvm_vcpu.viridian.apic_assist.fields.enabled) > > + if ( v->arch.hvm_vcpu.viridian.apic_assist.msr.fields.enabled ) > > initialize_apic_assist(v); > > break; > > > > @@ -485,7 +504,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) > > > > case VIRIDIAN_MSR_APIC_ASSIST: > > perfc_incr(mshv_rdmsr_apic_msr); > > - *val = v->arch.hvm_vcpu.viridian.apic_assist.raw; > > + *val = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw; > > break; > > > > case VIRIDIAN_MSR_REFERENCE_TSC: > > @@ -530,6 +549,7 @@ int viridian_vcpu_init(struct vcpu *v) > > void viridian_vcpu_deinit(struct vcpu *v) > > { > > free_cpumask_var(v->arch.hvm_vcpu.viridian.flush_cpumask); > > + teardown_apic_assist(v); > > } > > > > int viridian_hypercall(struct cpu_user_regs *regs) > > @@ -728,7 +748,7 @@ static int viridian_save_vcpu_ctxt(struct domain *d, > hvm_domain_context_t *h) > > for_each_vcpu( d, v ) { > > struct hvm_viridian_vcpu_context ctxt; > > > > - ctxt.apic_assist = v->arch.hvm_vcpu.viridian.apic_assist.raw; > > + ctxt.apic_assist = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw; > > > > if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 ) > > return 1; > > @@ -754,7 +774,9 @@ static int viridian_load_vcpu_ctxt(struct domain *d, > hvm_domain_context_t *h) > > if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 ) > > return -EINVAL; > > > > - v->arch.hvm_vcpu.viridian.apic_assist.raw = ctxt.apic_assist; > > + v->arch.hvm_vcpu.viridian.apic_assist.msr.raw = ctxt.apic_assist; > > + if ( v->arch.hvm_vcpu.viridian.apic_assist.msr.fields.enabled ) > > + initialize_apic_assist(v); > > > > return 0; > > } > > diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm- > x86/hvm/viridian.h > > index 2eec85e..c60c113 100644 > > --- a/xen/include/asm-x86/hvm/viridian.h > > +++ b/xen/include/asm-x86/hvm/viridian.h > > @@ -21,7 +21,11 @@ union viridian_apic_assist > > > > struct viridian_vcpu > > { > > - union viridian_apic_assist apic_assist; > > + struct { > > + union viridian_apic_assist msr; > > + struct page_info *page; > > + void *va; > > + } apic_assist; > > cpumask_var_t flush_cpumask; > > }; > > > > -- > > 2.1.4 > > > > > > _______________________________________________ > > Xen-devel mailing list > > Xen-devel@xxxxxxxxxxxxx > > http://lists.xen.org/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |