[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 1/3] x86/hvm: Don't raise #GP behind the emulators back for MSR accesses



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 20 February 2017 10:29
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>;
> Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> Subject: [PATCH v2 1/3] x86/hvm: Don't raise #GP behind the emulators back
> for MSR accesses
> 
> The current hvm_msr_{read,write}_intercept() infrastructure calls
> hvm_inject_hw_exception() directly to latch a fault, and returns
> X86EMUL_EXCEPTION to its caller.
> 
> This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as
> the
> fault is raised behind the back of the x86 emulator.
> 
> Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns
> X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
>

hvm/emulate.c changes...

Paul Durrant <paul.durrant@xxxxxxxxxx>
 
> v2:
>  * Substantial rebase
>  * Introduce __must_check for hvm_msr_{read,write}_intercept()
> ---
>  xen/arch/x86/hvm/emulate.c        | 14 ++++++++++++--
>  xen/arch/x86/hvm/hvm.c            |  7 ++++---
>  xen/arch/x86/hvm/svm/svm.c        |  4 ++--
>  xen/arch/x86/hvm/vmx/vmx.c        | 23 ++++++++++++++++++-----
>  xen/arch/x86/hvm/vmx/vvmx.c       | 19 ++++++++++++++-----
>  xen/include/asm-x86/hvm/support.h | 12 +++++++++---
>  6 files changed, 59 insertions(+), 20 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 14f9b43..edcae5e 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1544,7 +1544,12 @@ static int hvmemul_read_msr(
>      uint64_t *val,
>      struct x86_emulate_ctxt *ctxt)
>  {
> -    return hvm_msr_read_intercept(reg, val);
> +    int rc = hvm_msr_read_intercept(reg, val);
> +
> +    if ( rc == X86EMUL_EXCEPTION )
> +        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
> +
> +    return rc;
>  }
> 
>  static int hvmemul_write_msr(
> @@ -1552,7 +1557,12 @@ static int hvmemul_write_msr(
>      uint64_t val,
>      struct x86_emulate_ctxt *ctxt)
>  {
> -    return hvm_msr_write_intercept(reg, val, 1);
> +    int rc = hvm_msr_write_intercept(reg, val, 1);
> +
> +    if ( rc == X86EMUL_EXCEPTION )
> +        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
> +
> +    return rc;
>  }
> 
>  static int hvmemul_wbinvd(
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 6621d62..08855c2 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -518,7 +518,10 @@ void hvm_do_resume(struct vcpu *v)
> 
>          if ( w->do_write.msr )
>          {
> -            hvm_msr_write_intercept(w->msr, w->value, 0);
> +            if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
> +                 X86EMUL_EXCEPTION )
> +                hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +
>              w->do_write.msr = 0;
>          }
> 
> @@ -3455,7 +3458,6 @@ int hvm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      return ret;
> 
>   gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      ret = X86EMUL_EXCEPTION;
>      *msr_content = -1ull;
>      goto out;
> @@ -3600,7 +3602,6 @@ int hvm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content,
>      return ret;
> 
>  gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 894c457..b864535 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>      return X86EMUL_OKAY;
> 
>   gpf:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>      return result;
> 
>   gpf:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -1924,6 +1922,8 @@ static void svm_do_msr_access(struct
> cpu_user_regs *regs)
> 
>      if ( rc == X86EMUL_OKAY )
>          __update_guest_eip(regs, inst_len);
> +    else if ( rc == X86EMUL_EXCEPTION )
> +        hvm_inject_hw_exception(TRAP_gp_fault, 0);
>  }
> 
>  static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 597d7ac..b5bfa05 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2734,7 +2734,6 @@ static int vmx_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>      return X86EMUL_OKAY;
> 
>  gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -2971,7 +2970,6 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>      return X86EMUL_OKAY;
> 
>  gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -3664,18 +3662,33 @@ void vmx_vmexit_handler(struct cpu_user_regs
> *regs)
>          break;
>      case EXIT_REASON_MSR_READ:
>      {
> -        uint64_t msr_content;
> -        if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) ==
> X86EMUL_OKAY )
> +        uint64_t msr_content = 0;
> +
> +        switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
>          {
> +        case X86EMUL_OKAY:
>              msr_split(regs, msr_content);
>              update_guest_eip(); /* Safe: RDMSR */
> +            break;
> +
> +        case X86EMUL_EXCEPTION:
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +            break;
>          }
>          break;
>      }
> 
>      case EXIT_REASON_MSR_WRITE:
> -        if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) ==
> X86EMUL_OKAY )
> +        switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
> +        {
> +        case X86EMUL_OKAY:
>              update_guest_eip(); /* Safe: WRMSR */
> +            break;
> +
> +        case X86EMUL_EXCEPTION:
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +            break;
> +        }
>          break;
> 
>      case EXIT_REASON_VMXOFF:
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index f6a25a6..c830d16 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1032,6 +1032,7 @@ static void load_shadow_guest_state(struct vcpu
> *v)
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>      u32 control;
>      u64 cr_gh_mask, cr_read_shadow;
> +    int rc;
> 
>      static const u16 vmentry_fields[] = {
>          VM_ENTRY_INTR_INFO,
> @@ -1053,8 +1054,12 @@ static void load_shadow_guest_state(struct vcpu
> *v)
>      if ( control & VM_ENTRY_LOAD_GUEST_PAT )
>          hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT));
>      if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
> -        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> -                                get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
> +    {
> +        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> +                                     get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 
> 0);
> +        if ( rc == X86EMUL_EXCEPTION )
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +    }
> 
>      hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
> 
> @@ -1222,7 +1227,7 @@ static void sync_vvmcs_ro(struct vcpu *v)
> 
>  static void load_vvmcs_host_state(struct vcpu *v)
>  {
> -    int i;
> +    int i, rc;
>      u64 r;
>      u32 control;
> 
> @@ -1240,8 +1245,12 @@ static void load_vvmcs_host_state(struct vcpu *v)
>      if ( control & VM_EXIT_LOAD_HOST_PAT )
>          hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT));
>      if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
> -        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> -                                get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
> +    {
> +        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> +                                     get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
> +        if ( rc == X86EMUL_EXCEPTION )
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +    }
> 
>      hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
> 
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index 262955d..5e25698 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -121,13 +121,19 @@ int hvm_set_efer(uint64_t value);
>  int hvm_set_cr0(unsigned long value, bool_t may_defer);
>  int hvm_set_cr3(unsigned long value, bool_t may_defer);
>  int hvm_set_cr4(unsigned long value, bool_t may_defer);
> -int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
> -int hvm_msr_write_intercept(
> -    unsigned int msr, uint64_t msr_content, bool_t may_defer);
>  int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
>  int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
>  void hvm_ud_intercept(struct cpu_user_regs *);
> 
> +/*
> + * May return X86EMUL_EXCEPTION, at which point the caller is responsible
> for
> + * injecting a #GP fault.  Used to support speculative reads.
> + */
> +int __must_check hvm_msr_read_intercept(
> +    unsigned int msr, uint64_t *msr_content);
> +int __must_check hvm_msr_write_intercept(
> +    unsigned int msr, uint64_t msr_content, bool_t may_defer);
> +
>  #endif /* __ASM_X86_HVM_SUPPORT_H__ */
> 
>  /*
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.