[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging-4.6] x86/vmx: API improvements for MSR load/save infrastructure
commit ff1d0b6bfe2dfc895d2d8652ff6bb3282ac32ccc Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Mon May 7 11:57:00 2018 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue Aug 14 17:39:56 2018 +0100 x86/vmx: API improvements for MSR load/save infrastructure Collect together related infrastructure in vmcs.h, rather than having it spread out. Turn vmx_{read,write}_guest_msr() into static inlines, as they are simple enough. Replace 'int type' with 'enum vmx_msr_list_type', and use switch statements internally. Later changes are going to introduce a new type. Rename the type identifiers for consistency with the other VMX_MSR_* constants. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> (cherry picked from commit f54b63e8617ada823be43d60467a43c8224b7909) --- xen/arch/x86/hvm/vmx/vmcs.c | 88 +++++++++++++++++++++----------------- xen/include/asm-x86/hvm/vmx/vmcs.h | 59 ++++++++++++++++++------- 2 files changed, 93 insertions(+), 54 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index d419f80be3..fcc9a2cb98 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1287,58 +1287,62 @@ static int construct_vmcs(struct vcpu *v) return rc; } -int vmx_read_guest_msr(u32 msr, u64 *val) +struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type) { struct vcpu *curr = current; - unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count; - const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; + unsigned int msr_count; + struct vmx_msr_entry *msr_area = NULL; + unsigned int i; - for ( i = 0; i < msr_count; i++ ) + switch ( type ) { - if ( msr_area[i].index == msr ) - { - *val = msr_area[i].data; - return 0; - } - } + case VMX_MSR_HOST: + msr_count = curr->arch.hvm_vmx.host_msr_count; + msr_area = curr->arch.hvm_vmx.host_msr_area; + break; - return -ESRCH; -} + case VMX_MSR_GUEST: + msr_count = curr->arch.hvm_vmx.msr_count; + msr_area = curr->arch.hvm_vmx.msr_area; + break; -int vmx_write_guest_msr(u32 msr, u64 val) -{ - struct vcpu *curr = current; - unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count; - struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; + default: + ASSERT_UNREACHABLE(); + } + + if ( msr_area == NULL ) + return NULL; for ( i = 0; i < msr_count; i++ ) { if ( msr_area[i].index == msr ) - { - msr_area[i].data = val; - return 0; - } + return &msr_area[i]; } - return -ESRCH; + return NULL; } -int vmx_add_msr(u32 msr, int type) +int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type) { struct vcpu *curr = current; unsigned int idx, *msr_count; struct vmx_msr_entry **msr_area, *msr_area_elem; - if ( type == VMX_GUEST_MSR ) - { - msr_count = &curr->arch.hvm_vmx.msr_count; - msr_area = &curr->arch.hvm_vmx.msr_area; - } - else + switch ( type ) { - ASSERT(type == VMX_HOST_MSR); + case VMX_MSR_HOST: msr_count = &curr->arch.hvm_vmx.host_msr_count; msr_area = &curr->arch.hvm_vmx.host_msr_area; + break; + + case VMX_MSR_GUEST: + msr_count = &curr->arch.hvm_vmx.msr_count; + msr_area = &curr->arch.hvm_vmx.msr_area; + break; + + default: + ASSERT_UNREACHABLE(); + return -EINVAL; } if ( *msr_area == NULL ) @@ -1346,13 +1350,17 @@ int vmx_add_msr(u32 msr, int type) if ( (*msr_area = alloc_xenheap_page()) == NULL ) return -ENOMEM; - if ( type == VMX_GUEST_MSR ) + switch ( type ) { + case VMX_MSR_HOST: + __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area)); + break; + + case VMX_MSR_GUEST: __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area)); __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area)); + break; } - else - __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area)); } for ( idx = 0; idx < *msr_count; idx++ ) @@ -1368,16 +1376,18 @@ int vmx_add_msr(u32 msr, int type) ++*msr_count; - if ( type == VMX_GUEST_MSR ) + switch ( type ) { + case VMX_MSR_HOST: + rdmsrl(msr, msr_area_elem->data); + __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count); + break; + + case VMX_MSR_GUEST: msr_area_elem->data = 0; __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count); __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count); - } - else - { - rdmsrl(msr, msr_area_elem->data); - __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count); + break; } return 0; diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index ab33469a87..1dab1f87a4 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -485,18 +485,56 @@ extern const unsigned int vmx_introspection_force_enabled_msrs_size; #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 -#define VMX_GUEST_MSR 0 -#define VMX_HOST_MSR 1 - /* VM Instruction error numbers. */ #define VMX_INSN_INVALID_CONTROL_STATE 7 #define VMX_INSN_INVALID_HOST_STATE 8 +/* MSR load/save list infrastructure. */ +enum vmx_msr_list_type { + VMX_MSR_HOST, /* MSRs loaded on VMExit. */ + VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */ +}; + +int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type); + +static inline int vmx_add_host_load_msr(uint32_t msr) +{ + return vmx_add_msr(msr, VMX_MSR_HOST); +} + +static inline int vmx_add_guest_msr(uint32_t msr) +{ + return vmx_add_msr(msr, VMX_MSR_GUEST); +} + +struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type); + +static inline int vmx_read_guest_msr(uint32_t msr, uint64_t *val) +{ + const struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST); + + if ( !ent ) + return -ESRCH; + + *val = ent->data; + + return 0; +} + +static inline int vmx_write_guest_msr(uint32_t msr, uint64_t val) +{ + struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST); + + if ( !ent ) + return -ESRCH; + + ent->data = val; + + return 0; +} + void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type); void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type); -int vmx_read_guest_msr(u32 msr, u64 *val); -int vmx_write_guest_msr(u32 msr, u64 val); -int vmx_add_msr(u32 msr, int type); void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to); void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector); void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector); @@ -506,15 +544,6 @@ void virtual_vmcs_exit(void *vvmcs); u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding); void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val); -static inline int vmx_add_guest_msr(u32 msr) -{ - return vmx_add_msr(msr, VMX_GUEST_MSR); -} -static inline int vmx_add_host_load_msr(u32 msr) -{ - return vmx_add_msr(msr, VMX_HOST_MSR); -} - DECLARE_PER_CPU(bool_t, vmxon); bool_t vmx_vcpu_pml_enabled(const struct vcpu *v); -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.6 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |