[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/4] nested vmx: Use a list to store the launched vvmcs for L1 VMM
Originally we use a virtual VMCS field to store the launch state of a certain vmcs. However if we introduce VMCS shadowing feature, this virtual VMCS should also be able to load into real hardware, and VMREAD/VMWRITE operate invalid fields. The new approach is to store the launch state into a list for L1 VMM. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vvmx.c | 88 ++++++++++++++++++++++++++++++++---- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 - xen/include/asm-x86/hvm/vmx/vvmx.h | 6 +++ 3 files changed, 85 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index d4e9b02..1c7b1d4 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -51,6 +51,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) nvmx->iobitmap[0] = NULL; nvmx->iobitmap[1] = NULL; nvmx->msrbitmap = NULL; + INIT_LIST_HEAD(&nvmx->launched_list); return 0; out: return -ENOMEM; @@ -58,7 +59,9 @@ out: void nvmx_vcpu_destroy(struct vcpu *v) { + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct vvmcs_list *item, *n; /* * When destroying the vcpu, it may be running on behalf of L2 guest. @@ -74,6 +77,11 @@ void nvmx_vcpu_destroy(struct vcpu *v) free_xenheap_page(nvcpu->nv_n2vmcx); nvcpu->nv_n2vmcx = NULL; } + + list_for_each_entry_safe(item, n, &nvmx->launched_list, node) { + list_del(&item->node); + xfree(item); + } } void nvmx_domain_relinquish_resources(struct domain *d) @@ -1198,6 +1206,59 @@ int nvmx_handle_vmxoff(struct cpu_user_regs *regs) return X86EMUL_OKAY; } +static int vvmcs_launched(struct list_head *launched_list, paddr_t vvmcs_pa) +{ + struct vvmcs_list *vvmcs = NULL; + struct list_head *pos; + int launched = 0; + + list_for_each(pos, launched_list) + { + vvmcs = list_entry(pos, struct vvmcs_list, node); + if ( vvmcs_pa == vvmcs->vvmcs_pa ) + { + launched = 1; + break; + } + } + + return launched; +} + +static int set_vvmcs_launched(struct list_head *launched_list, paddr_t vvmcs_pa) +{ + struct vvmcs_list *vvmcs; + + if ( vvmcs_launched(launched_list, vvmcs_pa) ) + return 0; + + vvmcs = xzalloc(struct vvmcs_list); + if ( !vvmcs ) + return -ENOMEM; + + vvmcs->vvmcs_pa = vvmcs_pa; + list_add(&vvmcs->node, launched_list); + + return 0; +} + +static void clear_vvmcs_launched(struct list_head *launched_list, paddr_t vvmcs_pa) +{ + struct vvmcs_list *vvmcs; + struct list_head *pos; + + list_for_each(pos, launched_list) + { + vvmcs = list_entry(pos, struct vvmcs_list, node); + if ( vvmcs_pa == vvmcs->vvmcs_pa ) + { + list_del(&vvmcs->node); + xfree(vvmcs); + break; + } + } +} + int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); @@ -1221,8 +1282,10 @@ int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs) int nvmx_handle_vmresume(struct cpu_user_regs *regs) { - int launched; struct vcpu *v = current; + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); + int launched; if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR ) { @@ -1230,8 +1293,8 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs) return X86EMUL_OKAY; } - launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - NVMX_LAUNCH_STATE); + launched = vvmcs_launched(&nvmx->launched_list, + virt_to_maddr(nvcpu->nv_vvmcx)); if ( !launched ) { vmreturn (regs, VMFAIL_VALID); return X86EMUL_OKAY; @@ -1244,6 +1307,8 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) int launched; int rc; struct vcpu *v = current; + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR ) { @@ -1251,8 +1316,8 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) return X86EMUL_OKAY; } - launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - NVMX_LAUNCH_STATE); + launched = vvmcs_launched(&nvmx->launched_list, + virt_to_maddr(nvcpu->nv_vvmcx)); if ( launched ) { vmreturn (regs, VMFAIL_VALID); return X86EMUL_OKAY; @@ -1260,8 +1325,12 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) else { rc = nvmx_vmresume(v,regs); if ( rc == X86EMUL_OKAY ) - __set_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - NVMX_LAUNCH_STATE, 1); + { + if ( set_vvmcs_launched(&nvmx->launched_list, + virt_to_maddr(nvcpu->nv_vvmcx)) < 0 ) + return X86EMUL_UNHANDLEABLE; + } + } return rc; } @@ -1328,6 +1397,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) struct vcpu *v = current; struct vmx_inst_decoded decode; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); unsigned long gpa = 0; void *vvmcs; int rc; @@ -1344,7 +1414,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) if ( gpa == nvcpu->nv_vvmcxaddr ) { - __set_vvmcs(nvcpu->nv_vvmcx, NVMX_LAUNCH_STATE, 0); + clear_vvmcs_launched(&nvmx->launched_list, virt_to_maddr(nvcpu->nv_vvmcx)); nvmx_purge_vvmcs(v); } else @@ -1352,7 +1422,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) /* Even if this VMCS isn't the current one, we must clear it. */ vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT); if ( vvmcs ) - __set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0); + clear_vvmcs_launched(&nvmx->launched_list, virt_to_maddr(vvmcs)); hvm_unmap_guest_frame(vvmcs); } diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 51df81e..9ff741f 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -421,8 +421,6 @@ enum vmcs_field { HOST_SYSENTER_EIP = 0x00006c12, HOST_RSP = 0x00006c14, HOST_RIP = 0x00006c16, - /* A virtual VMCS field used for nestedvmx only */ - NVMX_LAUNCH_STATE = 0x00006c20, }; #define VMCS_VPID_WIDTH 16 diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index 9e1dc77..1c5313d 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -23,6 +23,11 @@ #ifndef __ASM_X86_HVM_VVMX_H__ #define __ASM_X86_HVM_VVMX_H__ +struct vvmcs_list { + paddr_t vvmcs_pa; + struct list_head node; +}; + struct nestedvmx { paddr_t vmxon_region_pa; void *iobitmap[2]; /* map (va) of L1 guest I/O bitmap */ @@ -38,6 +43,7 @@ struct nestedvmx { uint32_t exit_qual; } ept; uint32_t guest_vpid; + struct list_head launched_list; }; #define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx) -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |