[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/6] x86/hvm: Handle viridian MSRs via the new guest_{rd, wr}msr() infrastructure



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 26 February 2018 17:35
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich
> <JBeulich@xxxxxxxx>; Jun Nakajima <jun.nakajima@xxxxxxxxx>; Paul
> Durrant <Paul.Durrant@xxxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>; Boris
> Ostrovsky <boris.ostrovsky@xxxxxxxxxx>; Suravee Suthikulpanit
> <suravee.suthikulpanit@xxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Roger
> Pau Monne <roger.pau@xxxxxxxxxx>; Sergey Dyasli
> <sergey.dyasli@xxxxxxxxxx>
> Subject: [PATCH 2/6] x86/hvm: Handle viridian MSRs via the new
> guest_{rd,wr}msr() infrastructure
> 
> Dispatch from the guest_{rd,wr}msr() functions, after confirming that the
> domain is configured to use viridian.  This allows for simplifiction of the
> viridian helpers as they don't need to cope with the "not a viridian MSR"
> case.  It also means that viridian MSRs which are unimplemented, or
> excluded
> because of features, don't fall back into default handling path.
> 
> Rename {rd,wr}msr_viridian_regs() to guest_{rd,wr}msr_viridian() for
> consistency, and because the _regs suffix isn't very appropriate.
> 
> Update them to take a vcpu pointer rather than presuming that they act on
> current, which is safe for all implemented operations.  Also update them to
> use X86EMUL_* return values.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> CC: Wei Liu <wei.liu2@xxxxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/svm/svm.c         |  6 +----
>  xen/arch/x86/hvm/viridian.c        | 49 ++++++++++++++++++-------------------
> -
>  xen/arch/x86/hvm/vmx/vmx.c         |  6 +----
>  xen/arch/x86/msr.c                 | 41 +++++++++++++++++++++++++++----
>  xen/include/asm-x86/hvm/viridian.h | 11 ++-------
>  5 files changed, 64 insertions(+), 49 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 8b4cefd..6d8ed5c 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1967,8 +1967,7 @@ static int svm_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>          else if ( ret )
>              break;
> 
> -        if ( rdmsr_viridian_regs(msr, msr_content) ||
> -             rdmsr_hypervisor_regs(msr, msr_content) )
> +        if ( rdmsr_hypervisor_regs(msr, msr_content) )
>              break;
> 
>          if ( rdmsr_safe(msr, *msr_content) == 0 )
> @@ -2123,9 +2122,6 @@ static int svm_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          else if ( ret )
>              break;
> 
> -        if ( wrmsr_viridian_regs(msr, msr_content) )
> -            break;
> -
>          switch ( wrmsr_hypervisor_regs(msr, msr_content) )
>          {
>          case -ERESTART:
> diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
> index 70aab52..23de433 100644
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -554,13 +554,11 @@ static void update_reference_tsc(struct domain *d,
> bool_t initialize)
>      put_page_and_type(page);
>  }
> 
> -int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
> +int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val)
>  {
> -    struct vcpu *v = current;
>      struct domain *d = v->domain;
> 
> -    if ( !is_viridian_domain(d) )
> -        return 0;
> +    ASSERT(is_viridian_domain(d));
> 
>      switch ( idx )
>      {
> @@ -615,7 +613,7 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
> 
>      case HV_X64_MSR_REFERENCE_TSC:
>          if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
> -            return 0;
> +            goto gp_fault;
> 
>          perfc_incr(mshv_wrmsr_tsc_msr);
>          d->arch.hvm_domain.viridian.reference_tsc.raw = val;
> @@ -655,14 +653,15 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
>      }
> 
>      default:
> -        if ( idx >= VIRIDIAN_MSR_MIN && idx <= VIRIDIAN_MSR_MAX )
> -            gprintk(XENLOG_WARNING, "write to unimplemented MSR %#x\n",
> -                    idx);
> -
> -        return 0;
> +        gdprintk(XENLOG_WARNING,
> +                 "Write %016"PRIx64" to unimplemented MSR %#x\n", val, idx);
> +        goto gp_fault;
>      }
> 
> -    return 1;
> +    return X86EMUL_OKAY;
> +
> + gp_fault:
> +    return X86EMUL_EXCEPTION;
>  }
> 
>  static int64_t raw_trc_val(struct domain *d)
> @@ -698,13 +697,11 @@ void viridian_time_ref_count_thaw(struct domain
> *d)
>          trc->off = (int64_t)trc->val - raw_trc_val(d);
>  }
> 
> -int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
> +int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val)
>  {
> -    struct vcpu *v = current;
>      struct domain *d = v->domain;
> -
> -    if ( !is_viridian_domain(d) )
> -        return 0;
> +
> +    ASSERT(is_viridian_domain(d));
> 
>      switch ( idx )
>      {
> @@ -725,7 +722,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
> 
>      case HV_X64_MSR_TSC_FREQUENCY:
>          if ( viridian_feature_mask(d) & HVMPV_no_freq )
> -            return 0;
> +            goto gp_fault;
> 
>          perfc_incr(mshv_rdmsr_tsc_frequency);
>          *val = (uint64_t)d->arch.tsc_khz * 1000ull;
> @@ -733,7 +730,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
> 
>      case HV_X64_MSR_APIC_FREQUENCY:
>          if ( viridian_feature_mask(d) & HVMPV_no_freq )
> -            return 0;
> +            goto gp_fault;
> 
>          perfc_incr(mshv_rdmsr_apic_frequency);
>          *val = 1000000000ull / APIC_BUS_CYCLE_NS;
> @@ -757,7 +754,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
> 
>      case HV_X64_MSR_REFERENCE_TSC:
>          if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
> -            return 0;

I have a recollection that for at least one version of Windows, when debug mode 
is enabled, it reads the reference TSC MSR regardless of whether the feature is 
enabled or not so this change may well cause guest boot failures.
In general I would be wary of #GP faulting where the current code does not. I 
think the current code is almost certainly too liberal even in the face of 
buggy versions of Windows but the new code might be too conservative. It will 
need some testing.

In principle though...

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> +            goto gp_fault;
> 
>          perfc_incr(mshv_rdmsr_tsc_msr);
>          *val = d->arch.hvm_domain.viridian.reference_tsc.raw;
> @@ -770,7 +767,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
>          trc = &d->arch.hvm_domain.viridian.time_ref_count;
> 
>          if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) )
> -            return 0;
> +            goto gp_fault;
> 
>          if ( !test_and_set_bit(_TRC_accessed, &trc->flags) )
>              printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT:
> accessed\n",
> @@ -804,14 +801,14 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
>      }
> 
>      default:
> -        if ( idx >= VIRIDIAN_MSR_MIN && idx <= VIRIDIAN_MSR_MAX )
> -            gprintk(XENLOG_WARNING, "read from unimplemented MSR
> %#x\n",
> -                    idx);
> -
> -        return 0;
> +        gdprintk(XENLOG_WARNING, "Read from unimplemented MSR
> %#x\n", idx);
> +        goto gp_fault;
>      }
> 
> -    return 1;
> +    return X86EMUL_OKAY;
> +
> + gp_fault:
> +    return X86EMUL_EXCEPTION;
>  }
> 
>  void viridian_vcpu_deinit(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index e1e4f17..5bf6f62 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2871,8 +2871,7 @@ static int vmx_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>              break;
>          }
> 
> -        if ( rdmsr_viridian_regs(msr, msr_content) ||
> -             rdmsr_hypervisor_regs(msr, msr_content) )
> +        if ( rdmsr_hypervisor_regs(msr, msr_content) )
>              break;
> 
>          if ( rdmsr_safe(msr, *msr_content) == 0 )
> @@ -3112,9 +3111,6 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          if ( passive_domain_do_wrmsr(msr, msr_content) )
>              return X86EMUL_OKAY;
> 
> -        if ( wrmsr_viridian_regs(msr, msr_content) )
> -            break;
> -
>          if ( vmx_write_guest_msr(msr, msr_content) == 0 ||
>               is_last_branch_msr(msr) )
>              break;
> diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
> index 7aaa2b0..2ff9361 100644
> --- a/xen/arch/x86/msr.c
> +++ b/xen/arch/x86/msr.c
> @@ -139,9 +139,11 @@ int init_vcpu_msr_policy(struct vcpu *v)
> 
>  int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
>  {
> -    const struct cpuid_policy *cp = v->domain->arch.cpuid;
> -    const struct msr_domain_policy *dp = v->domain->arch.msr;
> +    const struct domain *d = v->domain;
> +    const struct cpuid_policy *cp = d->arch.cpuid;
> +    const struct msr_domain_policy *dp = d->arch.msr;
>      const struct msr_vcpu_policy *vp = v->arch.msr;
> +    int ret = X86EMUL_OKAY;
> 
>      switch ( msr )
>      {
> @@ -173,11 +175,26 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
> uint64_t *val)
>                 _MSR_MISC_FEATURES_CPUID_FAULTING;
>          break;
> 
> +    case 0x40000000 ... 0x400001ff:
> +        if ( is_viridian_domain(d) )
> +        {
> +            ret = guest_rdmsr_viridian(v, msr, val);
> +            goto out;
> +        }
> +
> +        /* Fallthrough. */
>      default:
>          return X86EMUL_UNHANDLEABLE;
>      }
> 
> -    return X86EMUL_OKAY;
> + out:
> +    /*
> +     * Check that functions we dispatch to don't end up returning
> +     * X86EMUL_UNHANDLEABLE, as that interferes with the transitionary
> period
> +     * meaning of "fall back to the legacy MSR handlers".
> +     */
> +    ASSERT(ret != X86EMUL_UNHANDLEABLE);
> +    return ret;
> 
>   gp_fault:
>      return X86EMUL_EXCEPTION;
> @@ -190,6 +207,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr,
> uint64_t val)
>      const struct cpuid_policy *cp = d->arch.cpuid;
>      struct msr_domain_policy *dp = d->arch.msr;
>      struct msr_vcpu_policy *vp = v->arch.msr;
> +    int ret = X86EMUL_OKAY;
> 
>      switch ( msr )
>      {
> @@ -248,11 +266,26 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr,
> uint64_t val)
>          break;
>      }
> 
> +    case 0x40000000 ... 0x400001ff:
> +        if ( is_viridian_domain(d) )
> +        {
> +            ret = guest_wrmsr_viridian(v, msr, val);
> +            goto out;
> +        }
> +
> +        /* Fallthrough. */
>      default:
>          return X86EMUL_UNHANDLEABLE;
>      }
> 
> -    return X86EMUL_OKAY;
> + out:
> +    /*
> +     * Check that functions we dispatch to don't end up returning
> +     * X86EMUL_UNHANDLEABLE, as that interferes with the transitionary
> period
> +     * meaning of "fall back to the legacy MSR handlers".
> +     */
> +    ASSERT(ret != X86EMUL_UNHANDLEABLE);
> +    return ret;
> 
>   gp_fault:
>      return X86EMUL_EXCEPTION;
> diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm-
> x86/hvm/viridian.h
> index 4cbd133..071fb44 100644
> --- a/xen/include/asm-x86/hvm/viridian.h
> +++ b/xen/include/asm-x86/hvm/viridian.h
> @@ -101,15 +101,8 @@ struct viridian_domain
>  void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
>                             uint32_t subleaf, struct cpuid_leaf *res);
> 
> -int
> -wrmsr_viridian_regs(
> -    uint32_t idx,
> -    uint64_t val);
> -
> -int
> -rdmsr_viridian_regs(
> -    uint32_t idx,
> -    uint64_t *val);
> +int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val);
> +int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val);
> 
>  int
>  viridian_hypercall(struct cpu_user_regs *regs);
> --
> 2.1.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.