[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/3] vVMX: use latched VMCS machine address



Hi Jan,

I found some issues in your patch, see the comments below.



> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxx [mailto:xen-devel-
> bounces@xxxxxxxxxxxxx] On Behalf Of Jan Beulich
> Sent: Monday, October 19, 2015 11:23 PM
> To: xen-devel
> Cc: Tian, Kevin; Nakajima, Jun
> Subject: [Xen-devel] [PATCH 3/3] vVMX: use latched VMCS machine address
> 
> Instead of calling domain_page_map_to_mfn() over and over, latch the
> guest VMCS machine address unconditionally (i.e. independent of whether
> VMCS shadowing is supported by the hardware).
> 
> Since this requires altering the parameters of __[gs]et_vmcs{,_real}() (and
> hence all their callers) anyway, take the opportunity to also drop the bogus
> double underscores from their names (and from
> __[gs]et_vmcs_virtual() as well).
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> 
> --- a/xen/arch/x86/hvm/vmx/intr.c
> +++ b/xen/arch/x86/hvm/vmx/intr.c
> @@ -191,13 +191,13 @@ static int nvmx_intr_intercept(struct vc
>          if ( intack.source == hvm_intsrc_pic ||
>                   intack.source == hvm_intsrc_lapic )
>          {
> -            ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
> PIN_BASED_VM_EXEC_CONTROL);
> +            ctrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL);
>              if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) )
>                  return 0;
> 
>              vmx_inject_extint(intack.vector, intack.source);
> 
> -            ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
> VM_EXIT_CONTROLS);
> +            ctrl = get_vvmcs(v, VM_EXIT_CONTROLS);
>              if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT )
>              {
>                  /* for now, duplicate the ack path in vmx_intr_assist */
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -932,37 +932,36 @@ void vmx_vmcs_switch(paddr_t from, paddr
>      spin_unlock(&vmx->vmcs_lock);
>  }
> 
> -void virtual_vmcs_enter(void *vvmcs)
> +void virtual_vmcs_enter(const struct vcpu *v)
>  {
> -    __vmptrld(pfn_to_paddr(domain_page_map_to_mfn(vvmcs)));
> +    __vmptrld(v->arch.hvm_vmx.vmcs_shadow_maddr);

Debug shows  v->arch.hvm_vmx.vmcs_shadow_maddr  will equal to 0 at this point, 
this will crash the system.

>  }
> 
> -void virtual_vmcs_exit(void *vvmcs)
> +void virtual_vmcs_exit(const struct vcpu *v)
>  {
>      paddr_t cur = this_cpu(current_vmcs);
> 
> -    __vmpclear(pfn_to_paddr(domain_page_map_to_mfn(vvmcs)));
> +    __vmpclear(v->arch.hvm_vmx.vmcs_shadow_maddr);

Debug shows  v->arch.hvm_vmx.vmcs_shadow_maddr  will equal to 0 at this point, 
this will crash the system.
Maybe we should use pfn_to_paddr(domain_page_map_to_mfn(vvmcs))) here.

>      if ( cur )
>          __vmptrld(cur);
> -
>  }

> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c

>  static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned int n, @@ -
> 950,15 +926,15 @@ static void vvmcs_to_shadow_bulk(struct
> 
>  fallback:
>      for ( i = 0; i < n; i++ )
> -        vvmcs_to_shadow(vvmcs, field[i]);
> +        vvmcs_to_shadow(v, field[i]);
>  }

You omitted something in function vvmcs_to_shadow_bulk()
=====================================
static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned int n,
                                 const u16 *field)
{
    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
    void *vvmcs = nvcpu->nv_vvmcx;
    u64 *value = this_cpu(vvmcs_buf);
    unsigned int i;
 
    if ( !cpu_has_vmx_vmcs_shadowing )
        goto fallback;
 
    if ( !value || n > VMCS_BUF_SIZE )
    {
        gdprintk(XENLOG_DEBUG, "vmcs sync fall back to non-bulk mode, \
                 buffer: %p, buffer size: %d, fields number: %d.\n",
                 value, VMCS_BUF_SIZE, n);
        goto fallback;
    }
 
    virtual_vmcs_enter(vvmcs);
    for ( i = 0; i < n; i++ )
        __vmread(field[i], &value[i]);
    virtual_vmcs_exit(vvmcs);
 
    for ( i = 0; i < n; i++ )
        __vmwrite(field[i], value[i]);
 
    return;
 
fallback:
    for ( i = 0; i < n; i++ )
        vvmcs_to_shadow(v, field[i]);
}
================================
You should use virtual_vmcs_enter(v) in this function, not 
virtual_vmcs_enter(vvmcs).
the same thing happened in the function shadow_to_vvmcs_bulk().
==================================
static void shadow_to_vvmcs_bulk(struct vcpu *v, unsigned int n,
                                 const u16 *field)
{
    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
    void *vvmcs = nvcpu->nv_vvmcx;
    u64 *value = this_cpu(vvmcs_buf);
    unsigned int i;
 
    if ( !cpu_has_vmx_vmcs_shadowing )
        goto fallback;
 
    if ( !value || n > VMCS_BUF_SIZE )
    {
        gdprintk(XENLOG_DEBUG, "vmcs sync fall back to non-bulk mode, \
                 buffer: %p, buffer size: %d, fields number: %d.\n",
                 value, VMCS_BUF_SIZE, n);
        goto fallback;
    }
 
    for ( i = 0; i < n; i++ )
        __vmread(field[i], &value[i]);
 
    virtual_vmcs_enter(vvmcs);
    for ( i = 0; i < n; i++ )
        __vmwrite(field[i], value[i]);
    virtual_vmcs_exit(vvmcs);
 
    return;
 
fallback:
    for ( i = 0; i < n; i++ )
        shadow_to_vvmcs(v, field[i]);
}
=====================================

> @@ -1694,10 +1657,10 @@ int nvmx_handle_vmclear(struct cpu_user_
>          rc = VMFAIL_INVALID;
>      else if ( gpa == nvcpu->nv_vvmcxaddr )
>      {
> -        if ( cpu_has_vmx_vmcs_shadowing )
> -            nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
> -        clear_vvmcs_launched(&nvmx->launched_list,
> -            domain_page_map_to_mfn(nvcpu->nv_vvmcx));
> +        unsigned long mfn =
> + PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr);
> +
> +        nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
> +        clear_vvmcs_launched(&nvmx->launched_list, mfn);

v->arch.hvm_vmx.vmcs_shadow_maddr will be set to 0 in nvmx_clear_vmcs_pointer()
so mfn will be 0 at this point, it's incorrect.


Liang

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.