|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] nVMX: virutalize VPID capability to nested VMM
# HG changeset patch
# User Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
# Date 1358245850 -3600
# Node ID 6c982d14bc4a5aa9dcc119ac49938269799d414e
# Parent a16d3f55a3d583bc50ad1c7a79fef5eb75140a5a
nVMX: virutalize VPID capability to nested VMM
Virtualize VPID for the nested vmm, use host's VPID
to emualte guest's VPID. For each virtual vmentry, if
guest'v vpid is changed, allocate a new host VPID for
L2 guest.
Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Acked-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Acked-by: Eddie Dong <eddie.dong@xxxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
diff -r a16d3f55a3d5 -r 6c982d14bc4a xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Tue Jan 15 11:29:41 2013 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Jan 15 11:30:50 2013 +0100
@@ -2585,10 +2585,14 @@ void vmx_vmexit_handler(struct cpu_user_
update_guest_eip();
break;
+ case EXIT_REASON_INVVPID:
+ if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY )
+ update_guest_eip();
+ break;
+
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
case EXIT_REASON_GETSEC:
- case EXIT_REASON_INVVPID:
/*
* We should never exit on GETSEC because CR4.SMXE is always 0 when
* running in guest context, and the CPU checks that before getting
@@ -2706,8 +2710,11 @@ void vmx_vmenter_helper(void)
if ( !cpu_has_vmx_vpid )
goto out;
-
- p_asid = &curr->arch.hvm_vcpu.n1asid;
+ if ( nestedhvm_vcpu_in_guestmode(curr) )
+ p_asid = &vcpu_nestedhvm(curr).nv_n2asid;
+ else
+ p_asid = &curr->arch.hvm_vcpu.n1asid;
+
old_asid = p_asid->asid;
need_flush = hvm_asid_handle_vmenter(p_asid);
new_asid = p_asid->asid;
diff -r a16d3f55a3d5 -r 6c982d14bc4a xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c Tue Jan 15 11:29:41 2013 +0100
+++ b/xen/arch/x86/hvm/vmx/vvmx.c Tue Jan 15 11:30:50 2013 +0100
@@ -42,6 +42,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
goto out;
}
nvmx->ept.enabled = 0;
+ nvmx->guest_vpid = 0;
nvmx->vmxon_region_pa = 0;
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = VMCX_EADDR;
@@ -904,6 +905,16 @@ uint64_t get_shadow_eptp(struct vcpu *v)
return ept_get_eptp(ept);
}
+static bool_t nvmx_vpid_enabled(struct nestedvcpu *nvcpu)
+{
+ uint32_t second_cntl;
+
+ second_cntl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL);
+ if ( second_cntl & SECONDARY_EXEC_ENABLE_VPID )
+ return 1;
+ return 0;
+}
+
static void virtual_vmentry(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
@@ -952,6 +963,19 @@ static void virtual_vmentry(struct cpu_u
if ( nestedhvm_paging_mode_hap(v) )
__vmwrite(EPT_POINTER, get_shadow_eptp(v));
+ /* nested VPID support! */
+ if ( cpu_has_vmx_vpid && nvmx_vpid_enabled(nvcpu) )
+ {
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ uint32_t new_vpid = __get_vvmcs(vvmcs, VIRTUAL_PROCESSOR_ID);
+
+ if ( nvmx->guest_vpid != new_vpid )
+ {
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+ nvmx->guest_vpid = new_vpid;
+ }
+ }
+
}
static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
@@ -1224,7 +1248,7 @@ int nvmx_handle_vmlaunch(struct cpu_user
if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
{
vmreturn (regs, VMFAIL_INVALID);
- return X86EMUL_OKAY;
+ return X86EMUL_OKAY;
}
launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
@@ -1426,6 +1450,31 @@ int nvmx_handle_invept(struct cpu_user_r
return X86EMUL_OKAY;
}
+int nvmx_handle_invvpid(struct cpu_user_regs *regs)
+{
+ struct vmx_inst_decoded decode;
+ unsigned long vpid;
+ int ret;
+
+ if ( (ret = decode_vmx_inst(regs, &decode, &vpid, 0)) != X86EMUL_OKAY )
+ return ret;
+
+ switch ( reg_read(regs, decode.reg2) )
+ {
+ /* Just invalidate all tlb entries for all types! */
+ case INVVPID_INDIVIDUAL_ADDR:
+ case INVVPID_SINGLE_CONTEXT:
+ case INVVPID_ALL_CONTEXT:
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(current).nv_n2asid);
+ break;
+ default:
+ vmreturn(regs, VMFAIL_INVALID);
+ return X86EMUL_OKAY;
+ }
+
+ vmreturn(regs, VMSUCCEED);
+ return X86EMUL_OKAY;
+}
#define __emul_value(enable1, default1) \
((enable1 | default1) << 32 | (default1))
diff -r a16d3f55a3d5 -r 6c982d14bc4a xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h Tue Jan 15 11:29:41 2013 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h Tue Jan 15 11:30:50 2013 +0100
@@ -37,6 +37,7 @@ struct nestedvmx {
uint32_t exit_reason;
uint32_t exit_qual;
} ept;
+ uint32_t guest_vpid;
};
#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
@@ -192,6 +193,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
int nvmx_handle_vmresume(struct cpu_user_regs *regs);
int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
int nvmx_handle_invept(struct cpu_user_regs *regs);
+int nvmx_handle_invvpid(struct cpu_user_regs *regs);
int nvmx_msr_read_intercept(unsigned int msr,
u64 *msr_content);
int nvmx_msr_write_intercept(unsigned int msr,
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |