[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v11 for-xen-4.5 06/20] vmx: Merge MSR management routines



On Mon, Sep 22, 2014 at 07:57:47PM -0400, Boris Ostrovsky wrote:
> vmx_add_host_load_msr() and vmx_add_guest_msr() share fair amount of code. 
> Merge
> them to simplify code maintenance.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/vmx/vmcs.c        | 84 
> +++++++++++++++++++-------------------
>  xen/include/asm-x86/hvm/vmx/vmcs.h | 16 +++++++-
>  2 files changed, 55 insertions(+), 45 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index 4a4f4e1..fcae6bd 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1176,64 +1176,62 @@ int vmx_write_guest_msr(u32 msr, u64 val)
>      return -ESRCH;
>  }
>  
> -int vmx_add_guest_msr(u32 msr)
> +int vmx_add_msr(u32 msr, int type)
>  {
>      struct vcpu *curr = current;
> -    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
> -    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
> +    unsigned int idx, *msr_count;
> +    struct vmx_msr_entry **msr_area, *msr_area_elem;
> +
> +    if ( type == VMX_GUEST_MSR )
> +    {
> +        msr_count = &curr->arch.hvm_vmx.msr_count;
> +        msr_area = &curr->arch.hvm_vmx.msr_area;
> +    }
> +    else
> +    {
> +        ASSERT(type == VMX_HOST_MSR);
> +        msr_count = &curr->arch.hvm_vmx.host_msr_count;
> +        msr_area = &curr->arch.hvm_vmx.host_msr_area;
> +    }
>  
> -    if ( msr_area == NULL )
> +    if ( *msr_area == NULL )
>      {
> -        if ( (msr_area = alloc_xenheap_page()) == NULL )
> +        if ( (*msr_area = alloc_xenheap_page()) == NULL )
>              return -ENOMEM;
> -        curr->arch.hvm_vmx.msr_area = msr_area;
> -        __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
> -        __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
> +
> +        if ( type == VMX_GUEST_MSR )
> +        {
> +            __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
> +            __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
> +        }
> +        else
> +            __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
>      }
>  
> -    for ( i = 0; i < msr_count; i++ )
> -        if ( msr_area[i].index == msr )
> +    for ( idx = 0; idx < *msr_count; idx++ )
> +        if ( (*msr_area)[idx].index == msr )
>              return 0;
>  
> -    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
> +    if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
>          return -ENOSPC;
>  
> -    msr_area[msr_count].index = msr;
> -    msr_area[msr_count].mbz   = 0;
> -    msr_area[msr_count].data  = 0;
> -    curr->arch.hvm_vmx.msr_count = ++msr_count;
> -    __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
> -    __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
> +    msr_area_elem = *msr_area + *msr_count;
> +    msr_area_elem->index = msr;
> +    msr_area_elem->mbz = 0;
>  
> -    return 0;
> -}
> +    ++*msr_count;
>  
> -int vmx_add_host_load_msr(u32 msr)
> -{
> -    struct vcpu *curr = current;
> -    unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
> -    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
> -
> -    if ( msr_area == NULL )
> +    if ( type == VMX_GUEST_MSR )
>      {
> -        if ( (msr_area = alloc_xenheap_page()) == NULL )
> -            return -ENOMEM;
> -        curr->arch.hvm_vmx.host_msr_area = msr_area;
> -        __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
> +        msr_area_elem->data = 0;
> +        __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
> +        __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
> +    }
> +    else
> +    {
> +        rdmsrl(msr, msr_area_elem->data);
> +        __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
>      }
> -
> -    for ( i = 0; i < msr_count; i++ )
> -        if ( msr_area[i].index == msr )
> -            return 0;
> -
> -    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
> -        return -ENOSPC;
> -
> -    msr_area[msr_count].index = msr;
> -    msr_area[msr_count].mbz   = 0;
> -    rdmsrl(msr, msr_area[msr_count].data);
> -    curr->arch.hvm_vmx.host_msr_count = ++msr_count;
> -    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
>  
>      return 0;
>  }
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 215d93c..f6cf010 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -475,12 +475,15 @@ enum vmcs_field {
>  
>  #define MSR_TYPE_R 1
>  #define MSR_TYPE_W 2
> +
> +#define VMX_GUEST_MSR 0
> +#define VMX_HOST_MSR  1
> +
>  void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
>  void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
>  int vmx_read_guest_msr(u32 msr, u64 *val);
>  int vmx_write_guest_msr(u32 msr, u64 val);
> -int vmx_add_guest_msr(u32 msr);
> -int vmx_add_host_load_msr(u32 msr);
> +int vmx_add_msr(u32 msr, int type);
>  void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to);
>  void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
>  void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
> @@ -490,6 +493,15 @@ void virtual_vmcs_exit(void *vvmcs);
>  u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding);
>  void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val);
>  
> +static inline int vmx_add_guest_msr(u32 msr)
> +{
> +    return vmx_add_msr(msr, VMX_GUEST_MSR);
> +}
> +static inline int vmx_add_host_load_msr(u32 msr)
> +{
> +    return vmx_add_msr(msr, VMX_HOST_MSR);
> +}
> +
>  DECLARE_PER_CPU(bool_t, vmxon);
>  
>  #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
> -- 
> 1.8.1.4
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.