[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH] x86/hvm: Drop get_shadow_gs_base() hook and use hvm_get_reg()


  • To: "Cooper, Andrew" <andrew.cooper3@xxxxxxxxxx>, Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Fri, 18 Feb 2022 05:30:54 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=intel.com; dmarc=pass action=none header.from=intel.com; dkim=pass header.d=intel.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=prQ3LGXFbhnMdjGsP9kuryjHEr1tIwPjHHynugB9hNk=; b=T4Y6adpmxJFECw3oJmR+V3kM1MYrsYGLmANBOaxZXZryyhvUaqPa3wOLmVf260L16R1VaouigS5UYFfqT8WQzzJ2uaV7KOeDsI//C/4zh4oOP4DNkPNV1OwZNvsIUu24s2qU5Uyw3TZdtWsNwE1Zjx6Bnkg5NE0B+eCvgUWQm8sixB6EJhk0YeXgJVshMCEOoLzFd7r+rGQgoN8WFX6H7N/CR76B/4HzaPS34g6PXQ1tdlQnBO8kbHzr5WVB+IVqFWKycAdInrr4cidwxJc9senMUeZ5bOKMlu/5Kaxt2GRgzqcUDJt0X4oxJ8/sIH19OzsFyh/YVWLxnK35i+3ECQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=Hna/nYgwzl93fUrZymab5SNdiOGT/3F7oxn+mlr3Nq6EyWCESJ8NhI/fb+W7y8S1F+bqjMLXbdkWeNfeQEXrKiM0yyIF/NH6kQMcaroYsbkcrMeKOPWaAG8wWYGQxBg/p3//16F+COfZka7emZ7MN30qFbOGJodjKO6Slkg5o8mg4J4reepv026ivZPi1PQK1dDFHLF1200zHZlVIdA+/8KcmuxdCXzBM+oyqejS5XmhtlOGqU5jXPwUahV4BYmiXXzO6LXd9Dy4mshKLWq152F/CKM496cFYriYOsVfUY541JyKfjrxc3K+jL5hqMDm9l2xANilsWwci/Tkk7AhHg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=intel.com;
  • Cc: "Cooper, Andrew" <andrew.cooper3@xxxxxxxxxx>, "Beulich, Jan" <JBeulich@xxxxxxxx>, Pau Monné, Roger <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, "Nakajima, Jun" <jun.nakajima@xxxxxxxxx>
  • Delivery-date: Fri, 18 Feb 2022 05:31:12 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHYDrlHm/6o22eN0EWvDD5o+20u9KyY89XQ
  • Thread-topic: [PATCH] x86/hvm: Drop get_shadow_gs_base() hook and use hvm_get_reg()

> From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Sent: Friday, January 21, 2022 7:23 PM
> 
> This is a trivial accessor for an MSR, so use hvm_get_reg() rather than a
> dedicated hook.  In arch_get_info_guest(), rework the logic to read
> GS_SHADOW
> only once.
> 
> get_hvm_registers() is called on current, meaning that diagnostics print a
> stale GS_SHADOW from the previous vcpu context switch.  Adjust both
> implementations to obtain the correct value.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> CC: Wei Liu <wl@xxxxxxx>
> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> 
> If we care to backport the bugfix aspect, a far less invasive option would be
> to read MSR_SHADOW_GS_BASE directly.
> 
> The only case where that goes wrong is when vmcb->kerngsbase has been
> modified
> and is pending a VMLOAD.  I'm fairly sure this can only occur when we need
> vcpu diagnostics, after an emulated write of MSR_SHADOW_GS_BASE.
> ---
>  xen/arch/x86/domctl.c              |  8 ++++++--
>  xen/arch/x86/hvm/svm/svm.c         | 12 ++++++------
>  xen/arch/x86/hvm/vmx/vmx.c         | 16 ++++++++++------
>  xen/arch/x86/include/asm/hvm/hvm.h |  7 -------
>  xen/arch/x86/x86_64/traps.c        |  2 +-
>  5 files changed, 23 insertions(+), 22 deletions(-)
> 
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 7d102e0647ec..e49f9e91b9fa 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1447,6 +1447,7 @@ void arch_get_info_guest(struct vcpu *v,
> vcpu_guest_context_u c)
>      if ( is_hvm_domain(d) )
>      {
>          struct segment_register sreg;
> +        unsigned long gs_shadow;
> 
>          c.nat->ctrlreg[0] = v->arch.hvm.guest_cr[0];
>          c.nat->ctrlreg[2] = v->arch.hvm.guest_cr[2];
> @@ -1465,15 +1466,18 @@ void arch_get_info_guest(struct vcpu *v,
> vcpu_guest_context_u c)
>          c.nat->fs_base = sreg.base;
>          hvm_get_segment_register(v, x86_seg_gs, &sreg);
>          c.nat->user_regs.gs = sreg.sel;
> +
> +        gs_shadow = hvm_get_reg(v, MSR_SHADOW_GS_BASE);
> +
>          if ( ring_0(&c.nat->user_regs) )
>          {
>              c.nat->gs_base_kernel = sreg.base;
> -            c.nat->gs_base_user = hvm_get_shadow_gs_base(v);
> +            c.nat->gs_base_user = gs_shadow;
>          }
>          else
>          {
>              c.nat->gs_base_user = sreg.base;
> -            c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
> +            c.nat->gs_base_kernel = gs_shadow;
>          }
>      }
>      else
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index bb6b8e560a9f..e3bc88e78058 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -752,11 +752,6 @@ static void svm_set_segment_register(struct vcpu *v,
> enum x86_segment seg,
>      }
>  }
> 
> -static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
> -{
> -    return v->arch.hvm.svm.vmcb->kerngsbase;
> -}
> -
>  static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
>  {
>      struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
> @@ -2471,10 +2466,16 @@ static bool svm_get_pending_event(struct vcpu
> *v, struct x86_event *info)
> 
>  static uint64_t svm_get_reg(struct vcpu *v, unsigned int reg)
>  {
> +    struct vcpu *curr = current;
>      struct domain *d = v->domain;
> 
>      switch ( reg )
>      {
> +    case MSR_SHADOW_GS_BASE:
> +        if ( v == curr )
> +            svm_sync_vmcb(v, vmcb_in_sync);
> +        return v->arch.hvm.svm.vmcb->kerngsbase;
> +
>      default:
>          printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
>                 __func__, v, reg);
> @@ -2513,7 +2514,6 @@ static struct hvm_function_table __initdata
> svm_function_table = {
>      .get_cpl              = svm_get_cpl,
>      .get_segment_register = svm_get_segment_register,
>      .set_segment_register = svm_set_segment_register,
> -    .get_shadow_gs_base   = svm_get_shadow_gs_base,
>      .update_guest_cr      = svm_update_guest_cr,
>      .update_guest_efer    = svm_update_guest_efer,
>      .cpuid_policy_changed = svm_cpuid_policy_changed,
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index c44cf8f5d425..27c36af6027d 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1183,11 +1183,6 @@ static void vmx_set_segment_register(struct vcpu
> *v, enum x86_segment seg,
>      vmx_vmcs_exit(v);
>  }
> 
> -static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
> -{
> -    return v->arch.hvm.vmx.shadow_gs;
> -}
> -
>  static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
>  {
>      if ( !paging_mode_hap(v->domain) ||
> @@ -2401,6 +2396,7 @@ static int vmtrace_reset(struct vcpu *v)
> 
>  static uint64_t vmx_get_reg(struct vcpu *v, unsigned int reg)
>  {
> +    struct vcpu *curr = current;
>      struct domain *d = v->domain;
>      uint64_t val = 0;
>      int rc;
> @@ -2417,6 +2413,15 @@ static uint64_t vmx_get_reg(struct vcpu *v,
> unsigned int reg)
>              domain_crash(d);
>          }
>          return val;
> +
> +    case MSR_SHADOW_GS_BASE:
> +        if ( v == curr )
> +        {
> +            rdmsrl(MSR_SHADOW_GS_BASE, val);
> +            return val;
> +        }
> +        else
> +            return v->arch.hvm.vmx.shadow_gs;
>      }
> 
>      /* Logic which maybe requires remote VMCS acquisition. */
> @@ -2489,7 +2494,6 @@ static struct hvm_function_table __initdata
> vmx_function_table = {
>      .get_cpl              = _vmx_get_cpl,
>      .get_segment_register = vmx_get_segment_register,
>      .set_segment_register = vmx_set_segment_register,
> -    .get_shadow_gs_base   = vmx_get_shadow_gs_base,
>      .update_host_cr3      = vmx_update_host_cr3,
>      .update_guest_cr      = vmx_update_guest_cr,
>      .update_guest_efer    = vmx_update_guest_efer,
> diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
> b/xen/arch/x86/include/asm/hvm/hvm.h
> index 842f98763c4b..feb9d221a6a6 100644
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -128,7 +128,6 @@ struct hvm_function_table {
>                                   struct segment_register *reg);
>      void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
>                                   struct segment_register *reg);
> -    unsigned long (*get_shadow_gs_base)(struct vcpu *v);
> 
>      /*
>       * Re-set the value of CR3 that Xen runs on when handling VM exits.
> @@ -469,11 +468,6 @@ hvm_get_cpl(struct vcpu *v)
>      return alternative_call(hvm_funcs.get_cpl, v);
>  }
> 
> -static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
> -{
> -    return alternative_call(hvm_funcs.get_shadow_gs_base, v);
> -}
> -
>  #define has_hvm_params(d) \
>      ((d)->arch.hvm.params != NULL)
> 
> @@ -753,7 +747,6 @@ void hvm_set_reg(struct vcpu *v, unsigned int reg,
> uint64_t val);
>   * needed because DCE will kick in.
>   */
>  int hvm_guest_x86_mode(struct vcpu *v);
> -unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
>  void hvm_cpuid_policy_changed(struct vcpu *v);
>  void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
> 
> diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
> index d661d7ffcaaf..d97bf07b22bc 100644
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -79,7 +79,7 @@ static void get_hvm_registers(struct vcpu *v, struct
> cpu_user_regs *regs,
>      hvm_get_segment_register(v, x86_seg_ss, &sreg);
>      regs->ss = sreg.sel;
> 
> -    crs[7] = hvm_get_shadow_gs_base(v);
> +    crs[7] = hvm_get_reg(v, MSR_SHADOW_GS_BASE);
>  }
> 
>  static void _show_registers(
> --
> 2.11.0


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.