[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 21/24] PVH xen: HVM support of PVH guest creation/destruction
On Wed, Jul 17, 2013 at 07:33:05PM -0700, Mukesh Rathor wrote: > This patch implements the HVM portion of the guest create, ie > vcpu and domain initilization. Some changes to support the destroy path. > > Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Thanks for splitting this out of the cleanup patch. > --- > xen/arch/x86/hvm/hvm.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++- > 1 files changed, 65 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 3d930eb..7066d7b 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -510,6 +510,30 @@ static int hvm_print_line( > return X86EMUL_OKAY; > } > > +static int pvh_dom_initialise(struct domain *d) > +{ > + int rc; > + > + if ( !d->arch.hvm_domain.hap_enabled ) > + return -EINVAL; > + > + spin_lock_init(&d->arch.hvm_domain.irq_lock); > + > + hvm_init_cacheattr_region_list(d); > + > + if ( (rc = paging_enable(d, PG_refcounts|PG_translate|PG_external)) != 0 > ) > + goto pvh_dominit_fail; > + > + if ( (rc = hvm_funcs.domain_initialise(d)) != 0 ) > + goto pvh_dominit_fail; > + > + return 0; > + > +pvh_dominit_fail: > + hvm_destroy_cacheattr_region_list(d); > + return rc; > +} > + > int hvm_domain_initialise(struct domain *d) > { > int rc; > @@ -520,6 +544,8 @@ int hvm_domain_initialise(struct domain *d) > "on a non-VT/AMDV platform.\n"); > return -EINVAL; > } > + if ( is_pvh_domain(d) ) > + return pvh_dom_initialise(d); > > spin_lock_init(&d->arch.hvm_domain.pbuf_lock); > spin_lock_init(&d->arch.hvm_domain.irq_lock); > @@ -584,6 +610,9 @@ int hvm_domain_initialise(struct domain *d) > > void hvm_domain_relinquish_resources(struct domain *d) > { > + if ( is_pvh_domain(d) ) > + return; > + > if ( hvm_funcs.nhvm_domain_relinquish_resources ) > hvm_funcs.nhvm_domain_relinquish_resources(d); > > @@ -609,10 +638,14 @@ void hvm_domain_relinquish_resources(struct domain *d) > void hvm_domain_destroy(struct domain *d) > { > hvm_funcs.domain_destroy(d); > + hvm_destroy_cacheattr_region_list(d); > + > + if ( is_pvh_domain(d) ) > + return; > + > rtc_deinit(d); > stdvga_deinit(d); > vioapic_deinit(d); > - hvm_destroy_cacheattr_region_list(d); > } > > static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) > @@ -1066,6 +1099,30 @@ static int __init > __hvm_register_CPU_XSAVE_save_and_restore(void) > } > __initcall(__hvm_register_CPU_XSAVE_save_and_restore); > > +static int pvh_vcpu_initialise(struct vcpu *v) > +{ > + int rc; > + > + if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) > + return rc; > + > + softirq_tasklet_init(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet, > + (void(*)(unsigned long))hvm_assert_evtchn_irq, > + (unsigned long)v); > + > + v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */ > + v->arch.user_regs.eflags = 2; > + v->arch.hvm_vcpu.inject_trap.vector = -1; > + > + if ( (rc = hvm_vcpu_cacheattr_init(v)) != 0 ) > + { > + hvm_funcs.vcpu_destroy(v); > + return rc; > + } > + > + return 0; > +} > + > int hvm_vcpu_initialise(struct vcpu *v) > { > int rc; > @@ -1077,6 +1134,9 @@ int hvm_vcpu_initialise(struct vcpu *v) > spin_lock_init(&v->arch.hvm_vcpu.tm_lock); > INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list); > > + if ( is_pvh_vcpu(v) ) > + return pvh_vcpu_initialise(v); > + > if ( (rc = vlapic_init(v)) != 0 ) > goto fail1; > > @@ -1165,7 +1225,10 @@ void hvm_vcpu_destroy(struct vcpu *v) > > tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); > hvm_vcpu_cacheattr_destroy(v); > - vlapic_destroy(v); > + > + if ( !is_pvh_vcpu(v) ) > + vlapic_destroy(v); > + > hvm_funcs.vcpu_destroy(v); > > /* Event channel is already freed by evtchn_destroy(). */ > -- > 1.7.2.3 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxx > http://lists.xen.org/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |