[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/hvm: Don't unconditionally set up nested HVM state
# HG changeset patch # User Tim Deegan <Tim.Deegan@xxxxxxxxxx> # Date 1302171175 -3600 # Node ID e5a750d1bf9bb021713c6721000e655a4054ebea # Parent 751c6dcec0d4a575bed4bda6f221e0f05d040886 x86/hvm: Don't unconditionally set up nested HVM state for domains that aren't going to use it. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- diff -r 751c6dcec0d4 -r e5a750d1bf9b xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Wed Apr 06 16:50:16 2011 +0100 +++ b/xen/arch/x86/hvm/hvm.c Thu Apr 07 11:12:55 2011 +0100 @@ -967,18 +967,8 @@ if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) goto fail2; - /* When I start the l1 guest with 'xm/xend' then HVM_PARAM_NESTEDHVM - * is already evaluated. - * - * When I start the l1 guest with 'xl' then HVM_PARAM_NESTEDHVM - * has not been evaluated yet so we have to initialise nested - * virtualization unconditionally here. - */ - rc = nestedhvm_vcpu_initialise(v); - if ( rc < 0 ) { - printk("%s: nestedhvm_vcpu_initialise returned %i\n", __func__, rc); + if ( (rc = nestedhvm_vcpu_initialise(v)) < 0 ) goto fail3; - } /* Create ioreq event channel. */ rc = alloc_unbound_xen_event_channel(v, 0); @@ -1046,11 +1036,7 @@ void hvm_vcpu_destroy(struct vcpu *v) { - int rc; - - rc = nestedhvm_vcpu_destroy(v); - if (rc) - gdprintk(XENLOG_ERR, "nestedhvm_vcpu_destroy() failed with %i\n", rc); + nestedhvm_vcpu_destroy(v); #ifdef CONFIG_COMPAT free_compat_arg_xlat(v); @@ -3436,6 +3422,11 @@ */ if ( !paging_mode_hap(d) && a.value ) rc = -EINVAL; + /* Set up NHVM state for any vcpus that are already up */ + if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] ) + for_each_vcpu(d, v) + if ( rc == 0 ) + rc = nestedhvm_vcpu_initialise(v); break; } @@ -4035,11 +4026,10 @@ return -EOPNOTSUPP; } -int nhvm_vcpu_destroy(struct vcpu *v) +void nhvm_vcpu_destroy(struct vcpu *v) { - if (hvm_funcs.nhvm_vcpu_destroy) - return hvm_funcs.nhvm_vcpu_destroy(v); - return -EOPNOTSUPP; + if ( hvm_funcs.nhvm_vcpu_destroy ) + hvm_funcs.nhvm_vcpu_destroy(v); } int nhvm_vcpu_reset(struct vcpu *v) diff -r 751c6dcec0d4 -r e5a750d1bf9b xen/arch/x86/hvm/nestedhvm.c --- a/xen/arch/x86/hvm/nestedhvm.c Wed Apr 06 16:50:16 2011 +0100 +++ b/xen/arch/x86/hvm/nestedhvm.c Thu Apr 07 11:12:55 2011 +0100 @@ -33,12 +33,8 @@ bool_t enabled; enabled = !!(d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]); - /* sanity check */ BUG_ON(enabled && !is_hvm_domain(d)); - - if (!is_hvm_domain(d)) - return 0; - + return enabled; } @@ -78,8 +74,11 @@ { int rc; - rc = nhvm_vcpu_initialise(v); - if (rc) { + if ( !nestedhvm_enabled(v->domain) ) + return 0; + + if ( (rc = nhvm_vcpu_initialise(v)) ) + { nhvm_vcpu_destroy(v); return rc; } @@ -88,13 +87,11 @@ return 0; } -int +void nestedhvm_vcpu_destroy(struct vcpu *v) { - if (!nestedhvm_enabled(v->domain)) - return 0; - - return nhvm_vcpu_destroy(v); + if ( nestedhvm_enabled(v->domain) ) + nhvm_vcpu_destroy(v); } static void diff -r 751c6dcec0d4 -r e5a750d1bf9b xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c Wed Apr 06 16:50:16 2011 +0100 +++ b/xen/arch/x86/hvm/svm/nestedsvm.c Thu Apr 07 11:12:55 2011 +0100 @@ -112,7 +112,7 @@ return -ENOMEM; } -int nsvm_vcpu_destroy(struct vcpu *v) +void nsvm_vcpu_destroy(struct vcpu *v) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct nestedsvm *svm = &vcpu_nestedsvm(v); @@ -134,8 +134,6 @@ } if (svm->ns_iomap) svm->ns_iomap = NULL; - - return 0; } int nsvm_vcpu_reset(struct vcpu *v) diff -r 751c6dcec0d4 -r e5a750d1bf9b xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Wed Apr 06 16:50:16 2011 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Thu Apr 07 11:12:55 2011 +0100 @@ -148,7 +148,7 @@ /* Nested HVM */ int (*nhvm_vcpu_initialise)(struct vcpu *v); - int (*nhvm_vcpu_destroy)(struct vcpu *v); + void (*nhvm_vcpu_destroy)(struct vcpu *v); int (*nhvm_vcpu_reset)(struct vcpu *v); int (*nhvm_vcpu_hostrestore)(struct vcpu *v, struct cpu_user_regs *regs); @@ -415,7 +415,7 @@ /* Initialize vcpu's struct nestedhvm */ int nhvm_vcpu_initialise(struct vcpu *v); /* Destroy and free vcpu's struct nestedhvm */ -int nhvm_vcpu_destroy(struct vcpu *v); +void nhvm_vcpu_destroy(struct vcpu *v); /* Reset vcpu's state when l1 guest disables nested virtualization */ int nhvm_vcpu_reset(struct vcpu *v); /* Restores l1 guest state */ diff -r 751c6dcec0d4 -r e5a750d1bf9b xen/include/asm-x86/hvm/nestedhvm.h --- a/xen/include/asm-x86/hvm/nestedhvm.h Wed Apr 06 16:50:16 2011 +0100 +++ b/xen/include/asm-x86/hvm/nestedhvm.h Thu Apr 07 11:12:55 2011 +0100 @@ -38,7 +38,7 @@ /* Nested VCPU */ int nestedhvm_vcpu_initialise(struct vcpu *v); -int nestedhvm_vcpu_destroy(struct vcpu *v); +void nestedhvm_vcpu_destroy(struct vcpu *v); void nestedhvm_vcpu_reset(struct vcpu *v); bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); #define nestedhvm_vcpu_enter_guestmode(v) \ diff -r 751c6dcec0d4 -r e5a750d1bf9b xen/include/asm-x86/hvm/svm/nestedsvm.h --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h Wed Apr 06 16:50:16 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h Thu Apr 07 11:12:55 2011 +0100 @@ -100,7 +100,7 @@ uint64_t exitcode); /* Interface methods */ -int nsvm_vcpu_destroy(struct vcpu *v); +void nsvm_vcpu_destroy(struct vcpu *v); int nsvm_vcpu_initialise(struct vcpu *v); int nsvm_vcpu_reset(struct vcpu *v); int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |