[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 4/4] x86/HVM: prefill cache with PDPTEs when possible



> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: Tuesday, September 11, 2018 9:16 PM
> 
> Since strictly speaking it is incorrect for guest_walk_tables() to read
> L3 entries during PAE page walks, try to overcome this where possible by

can you elaborate? why it's incorrect to read L3 entries?

> pre-loading the values from hardware into the cache. Sadly the
> information is available in the EPT case only. On the positive side for
> NPT the spec spells out that L3 entries are actually read on walks, so
> us reading them is consistent with hardware behavior in that case.

I'm a little bit confused about the description here. you change
VMX code but using NPT spec as the reference?

> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v2: Re-base.
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -2385,6 +2385,23 @@ static int _hvm_emulate_one(struct hvm_e
> 
>      vio->mmio_retry = 0;
> 
> +    if ( !curr->arch.hvm.data_cache->num_ents &&
> +         curr->arch.paging.mode->guest_levels == 3 )
> +    {
> +        unsigned int i;
> +
> +        for ( i = 0; i < 4; ++i )
> +        {
> +            uint64_t pdpte;
> +
> +            if ( hvm_read_pdpte(curr, i, &pdpte) )
> +                hvmemul_write_cache(curr->arch.hvm.data_cache,
> +                                    (curr->arch.hvm.guest_cr[3] &
> +                                     (PADDR_MASK & ~0x1f)) + i * 
> sizeof(pdpte),
> +                                    3, &pdpte, sizeof(pdpte));
> +        }
> +    }
> +
>      rc = x86_emulate(&hvmemul_ctxt->ctxt, ops);
>      if ( rc == X86EMUL_OKAY && vio->mmio_retry )
>          rc = X86EMUL_RETRY;
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1368,6 +1368,25 @@ static void vmx_set_interrupt_shadow(str
>      __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
>  }
> 
> +static bool read_pdpte(struct vcpu *v, unsigned int idx, uint64_t *pdpte)
> +{
> +    if ( !paging_mode_hap(v->domain) || !hvm_pae_enabled(v) ||
> +         (v->arch.hvm.guest_efer & EFER_LMA) )
> +        return false;
> +
> +    if ( idx >= 4 )
> +    {
> +        ASSERT_UNREACHABLE();
> +        return false;
> +    }
> +
> +    vmx_vmcs_enter(v);
> +    __vmread(GUEST_PDPTE(idx), pdpte);
> +    vmx_vmcs_exit(v);
> +
> +    return true;
> +}
> +
>  static void vmx_load_pdptrs(struct vcpu *v)
>  {
>      unsigned long cr3 = v->arch.hvm.guest_cr[3];
> @@ -2466,6 +2485,8 @@ const struct hvm_function_table * __init
>          if ( cpu_has_vmx_ept_1gb )
>              vmx_function_table.hap_capabilities |=
> HVM_HAP_SUPERPAGE_1GB;
> 
> +        vmx_function_table.read_pdpte = read_pdpte;
> +
>          setup_ept_dump();
>      }
> 
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -146,6 +146,8 @@ struct hvm_function_table {
> 
>      void (*fpu_leave)(struct vcpu *v);
> 
> +    bool (*read_pdpte)(struct vcpu *v, unsigned int index, uint64_t *pdpte);
> +
>      int  (*get_guest_pat)(struct vcpu *v, u64 *);
>      int  (*set_guest_pat)(struct vcpu *v, u64);
> 
> @@ -440,6 +442,12 @@ static inline unsigned long hvm_get_shad
>      return hvm_funcs.get_shadow_gs_base(v);
>  }
> 
> +static inline bool hvm_read_pdpte(struct vcpu *v, unsigned int index,
> uint64_t *pdpte)
> +{
> +    return hvm_funcs.read_pdpte &&
> +           alternative_call(hvm_funcs.read_pdpte, v, index, pdpte);
> +}
> +
>  static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
>  {
>      return hvm_funcs.get_guest_bndcfgs &&
> 
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.