[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 06/13] xen/arm: Add ARCH_WORKAROUND_2 support for guests



On Wed, 23 May 2018, Stefano Stabellini wrote:
> On Tue, 22 May 2018, Julien Grall wrote:
> > In order to offer ARCH_WORKAROUND_2 support to guests, we need to track the
> > state of the workaround per-vCPU. The field 'pad' in cpu_info is now
> > repurposed to store flags easily accessible in assembly.
> > 
> > As the hypervisor will always run with the workaround enabled, we may
> > need to enable (on guest exit) or disable (on guest entry) the
> > workaround.
> > 
> > A follow-up patch will add fastpath for the workaround for arm64 guests.
> > 
> > This is part of XSA-263.
> > 
> > Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> > ---
> >  xen/arch/arm/domain.c         |  8 ++++++++
> >  xen/arch/arm/traps.c          | 20 ++++++++++++++++++++
> >  xen/arch/arm/vsmc.c           | 37 +++++++++++++++++++++++++++++++++++++
> >  xen/include/asm-arm/current.h |  6 +++++-
> >  4 files changed, 70 insertions(+), 1 deletion(-)
> > 
> > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> > index e7b33e92fb..9168195a9c 100644
> > --- a/xen/arch/arm/domain.c
> > +++ b/xen/arch/arm/domain.c
> > @@ -21,6 +21,7 @@
> >  #include <xen/wait.h>
> >  
> >  #include <asm/alternative.h>
> > +#include <asm/cpuerrata.h>
> >  #include <asm/cpufeature.h>
> >  #include <asm/current.h>
> >  #include <asm/event.h>
> > @@ -575,6 +576,13 @@ int vcpu_initialise(struct vcpu *v)
> >      if ( (rc = vcpu_vtimer_init(v)) != 0 )
> >          goto fail;
> >  
> > +    /*
> > +     * The workaround 2 (i.e SSBD mitigation) is enabled by default if
> > +     * supported.
> > +     */
> > +    if ( get_ssbd_state() == ARM_SSBD_RUNTIME )
> > +        v->arch.cpu_info->flags |= CPUINFO_WORKAROUND_2_FLAG;
> > +
> >      return rc;
> >  
> >  fail:
> > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> > index 5c18e918b0..020b0b8eef 100644
> > --- a/xen/arch/arm/traps.c
> > +++ b/xen/arch/arm/traps.c
> > @@ -2011,10 +2011,23 @@ inject_abt:
> >          inject_iabt_exception(regs, gva, hsr.len);
> >  }
> >  
> > +static inline bool needs_ssbd_flip(struct vcpu *v)
> > +{
> > +    if ( !check_workaround_ssbd() )
> > +        return false;
> 
> Why not check on get_ssbd_state() == ARM_SSBD_RUNTIME?  I am confused on
> when is the right time to use the cpu capability check
> (check_workaround_ssbd), when is the right time to call get_ssbd_state()
> and when is the right time to call cpu_require_ssbd_mitigation().
> 
> 
> > +    return !((v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) &&
> > +             cpu_require_ssbd_mitigation());
> 
> It looks like this won't do as intended when v->arch.cpu_info->flags = 0
> and cpu_require_ssbd_mitigation() returns false, am I right?
> 
> Maybe needs_ssbd_flip() should be implemented as follows:
> 
>   return get_ssbd_state() == ARM_SSBD_RUNTIME &&
>     !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG)

With the intention of supporting systems where not all CPUs need/have
the workaround, then it should be:

   return cpu_require_ssbd_mitigation() &&
     !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG)
 

> > +}
> > +
> >  static void enter_hypervisor_head(struct cpu_user_regs *regs)
> >  {
> >      if ( guest_mode(regs) )
> >      {
> > +        /* If the guest has disabled the workaround, bring it back on. */
> > +        if ( needs_ssbd_flip(current) )
> > +            arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
> > +
> >          /*
> >           * If we pended a virtual abort, preserve it until it gets cleared.
> >           * See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for 
> > details,
> > @@ -2260,6 +2273,13 @@ void leave_hypervisor_tail(void)
> >               */
> >              SYNCHRONIZE_SERROR(SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT);
> >  
> > +            /*
> > +             * The hypervisor runs with the workaround always present.
> > +             * If the guest wants it disabled, so be it...
> > +             */
> > +            if ( needs_ssbd_flip(current) )
> > +                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, 
> > NULL);
> > +
> >              return;
> >          }
> >          local_irq_enable();
> > diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c
> > index 40a80d5760..c4ccae6030 100644
> > --- a/xen/arch/arm/vsmc.c
> > +++ b/xen/arch/arm/vsmc.c
> > @@ -18,6 +18,7 @@
> >  #include <xen/lib.h>
> >  #include <xen/types.h>
> >  #include <public/arch-arm/smccc.h>
> > +#include <asm/cpuerrata.h>
> >  #include <asm/cpufeature.h>
> >  #include <asm/monitor.h>
> >  #include <asm/regs.h>
> > @@ -104,6 +105,23 @@ static bool handle_arch(struct cpu_user_regs *regs)
> >              if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) )
> >                  ret = 0;
> >              break;
> > +        case ARM_SMCCC_ARCH_WORKAROUND_2_FID:
> > +            switch ( get_ssbd_state() )
> > +            {
> > +            case ARM_SSBD_UNKNOWN:
> > +            case ARM_SSBD_FORCE_DISABLE:
> > +                break;
> > +
> > +            case ARM_SSBD_RUNTIME:
> > +                ret = ARM_SMCCC_SUCCESS;
> > +                break;
> > +
> > +            case ARM_SSBD_FORCE_ENABLE:
> > +            case ARM_SSBD_MITIGATED:
> > +                ret = ARM_SMCCC_NOT_REQUIRED;
> > +                break;
> > +            }
> > +            break;
> >          }
> >  
> >          set_user_reg(regs, 0, ret);
> > @@ -114,6 +132,25 @@ static bool handle_arch(struct cpu_user_regs *regs)
> >      case ARM_SMCCC_ARCH_WORKAROUND_1_FID:
> >          /* No return value */
> >          return true;
> > +
> > +    case ARM_SMCCC_ARCH_WORKAROUND_2_FID:
> > +    {
> > +        bool enable = (uint32_t)get_user_reg(regs, 1);
> > +
> > +        /*
> > +         * ARM_WORKAROUND_2_FID should only be called when mitigation
> > +         * state can be changed at runtime.
> > +         */
> > +        if ( unlikely(get_ssbd_state() != ARM_SSBD_RUNTIME) )
> > +            return true;
> > +
> > +        if ( enable )
> > +            get_cpu_info()->flags |= CPUINFO_WORKAROUND_2_FLAG;
> > +        else
> > +            get_cpu_info()->flags &= ~CPUINFO_WORKAROUND_2_FLAG;
> > +
> > +        return true;
> > +    }
> >      }
> >  
> >      return false;
> > diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h
> > index 7a0971fdea..f9819b34fc 100644
> > --- a/xen/include/asm-arm/current.h
> > +++ b/xen/include/asm-arm/current.h
> > @@ -7,6 +7,10 @@
> >  #include <asm/percpu.h>
> >  #include <asm/processor.h>
> >  
> > +/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */
> > +#define CPUINFO_WORKAROUND_2_FLAG_SHIFT   0
> > +#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << 
> > CPUINFO_WORKAROUND_2_FLAG_SHIFT)
> > +
> >  #ifndef __ASSEMBLY__
> >  
> >  struct vcpu;
> > @@ -21,7 +25,7 @@ DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
> >  struct cpu_info {
> >      struct cpu_user_regs guest_cpu_user_regs;
> >      unsigned long elr;
> > -    unsigned int pad;
> > +    uint32_t flags;
> >  };
> >  
> >  static inline struct cpu_info *get_cpu_info(void)
> > -- 
> > 2.11.0
> > 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.