[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 8/9] x86/vmx: Support removing MSRs from the host/guest load/save lists
Up until this point, the MSR load/save lists have only ever accumulated content. Introduce vmx_del_msr() as a companion to vmx_add_msr(). Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmcs.c | 68 ++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/vmx/vmcs.h | 1 + 2 files changed, 69 insertions(+) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 7bf19a0..e1a8f95 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1465,6 +1465,74 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, return rc; } +int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type) +{ + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; + struct vmx_msr_entry *start = NULL, *ent, *end; + unsigned int substart, subend, total; + + ASSERT(v == current || !vcpu_runnable(v)); + + switch ( type ) + { + case VMX_MSR_HOST: + start = arch_vmx->host_msr_area; + substart = 0; + subend = arch_vmx->host_msr_count; + total = subend; + break; + + case VMX_MSR_GUEST: + start = arch_vmx->msr_area; + substart = 0; + subend = arch_vmx->msr_save_count; + total = arch_vmx->msr_load_count; + break; + + case VMX_MSR_GUEST_LOADONLY: + start = arch_vmx->msr_area; + substart = arch_vmx->msr_save_count; + subend = arch_vmx->msr_load_count; + total = subend; + break; + + default: + ASSERT_UNREACHABLE(); + } + + if ( !start ) + return -ESRCH; + + end = start + total; + ent = locate_msr_entry(start + substart, start + subend, msr); + + if ( (ent == end) || (ent->index != msr) ) + return -ESRCH; + + memmove(ent, ent + 1, sizeof(*ent) * (end - ent)); + + vmx_vmcs_enter(v); + + switch ( type ) + { + case VMX_MSR_HOST: + __vmwrite(VM_EXIT_MSR_LOAD_COUNT, arch_vmx->host_msr_count--); + break; + + case VMX_MSR_GUEST: + __vmwrite(VM_EXIT_MSR_STORE_COUNT, arch_vmx->msr_save_count--); + + /* Fallthrough */ + case VMX_MSR_GUEST_LOADONLY: + __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, arch_vmx->msr_load_count--); + break; + } + + vmx_vmcs_exit(v); + + return 0; +} + void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector) { if ( !test_and_set_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) ) diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index b0fccd2..cfd174c 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -548,6 +548,7 @@ enum vmx_msr_list_type { int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, enum vmx_msr_list_type type); +int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type); static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr, uint64_t val) { -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |