[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86/nhvm: properly clean up after failure to set up all vCPU-s
Otherwise we may leak memory when setting up nHVM fails half way. This implies that the individual destroy functions will have to remain capable (in the VMX case they first need to be made so, following 26486:7648ef657fe7 and 26489:83a3fa9c8434) of being called for a vCPU that the corresponding init function was never run on. Once at it, also clean up some inefficiencies in the corresponding parameter validation code. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: nVMX fixes required by 26486:7648ef657fe7 and 26489:83a3fa9c8434. --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3916,20 +3916,25 @@ long do_hvm_op(unsigned long op, XEN_GUE rc = -EPERM; break; } + if ( !a.value ) + break; if ( a.value > 1 ) rc = -EINVAL; - if ( !is_hvm_domain(d) ) - rc = -EINVAL; /* Remove the check below once we have * shadow-on-shadow. */ - if ( cpu_has_svm && !paging_mode_hap(d) && a.value ) + if ( cpu_has_svm && !paging_mode_hap(d) ) rc = -EINVAL; /* Set up NHVM state for any vcpus that are already up */ if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] ) + { for_each_vcpu(d, v) if ( rc == 0 ) rc = nestedhvm_vcpu_initialise(v); + if ( rc ) + for_each_vcpu(d, v) + nestedhvm_vcpu_destroy(v); + } break; case HVM_PARAM_BUFIOREQ_EVTCHN: rc = -EINVAL; --- a/xen/arch/x86/hvm/nestedhvm.c +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -87,7 +87,7 @@ nestedhvm_vcpu_initialise(struct vcpu *v void nestedhvm_vcpu_destroy(struct vcpu *v) { - if ( nestedhvm_enabled(v->domain) && hvm_funcs.nhvm_vcpu_destroy ) + if ( hvm_funcs.nhvm_vcpu_destroy ) hvm_funcs.nhvm_vcpu_destroy(v); } --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -62,7 +62,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) if ( !nvcpu->nv_n2vmcx ) { gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n"); - goto out; + return -ENOMEM; } /* non-root VMREAD/VMWRITE bitmap. */ @@ -75,7 +75,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) if ( !vmread_bitmap ) { gdprintk(XENLOG_ERR, "nest: allocation for vmread bitmap failed\n"); - goto out1; + return -ENOMEM; } v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap; @@ -83,7 +83,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) if ( !vmwrite_bitmap ) { gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap failed\n"); - goto out2; + return -ENOMEM; } v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap; @@ -118,12 +118,6 @@ int nvmx_vcpu_initialise(struct vcpu *v) nvmx->msrbitmap = NULL; INIT_LIST_HEAD(&nvmx->launched_list); return 0; -out2: - free_domheap_page(v->arch.hvm_vmx.vmread_bitmap); -out1: - free_xenheap_page(nvcpu->nv_n2vmcx); -out: - return -ENOMEM; } void nvmx_vcpu_destroy(struct vcpu *v) @@ -147,16 +141,24 @@ void nvmx_vcpu_destroy(struct vcpu *v) nvcpu->nv_n2vmcx = NULL; } - list_for_each_entry_safe(item, n, &nvmx->launched_list, node) - { - list_del(&item->node); - xfree(item); - } + /* Must also cope with nvmx_vcpu_initialise() not having got called. */ + if ( nvmx->launched_list.next ) + list_for_each_entry_safe(item, n, &nvmx->launched_list, node) + { + list_del(&item->node); + xfree(item); + } if ( v->arch.hvm_vmx.vmread_bitmap ) + { free_domheap_page(v->arch.hvm_vmx.vmread_bitmap); + v->arch.hvm_vmx.vmread_bitmap = NULL; + } if ( v->arch.hvm_vmx.vmwrite_bitmap ) + { free_domheap_page(v->arch.hvm_vmx.vmwrite_bitmap); + v->arch.hvm_vmx.vmwrite_bitmap = NULL; + } } void nvmx_domain_relinquish_resources(struct domain *d) Attachment:
x86-nhvm-enable-failure.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |