[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 07/20] x86: move and rename XSTATE_*



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 28 February 2018 13:02
> To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>
> Subject: [PATCH v4 07/20] x86: move and rename XSTATE_*
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> v4: New, split off from later patch.
> 
> --- a/xen/arch/x86/cpuid.c
> +++ b/xen/arch/x86/cpuid.c
> @@ -122,42 +122,42 @@ static void recalculate_xstate(struct cp
> 
>      if ( p->basic.avx )
>      {
> -        xstates |= XSTATE_YMM;
> +        xstates |= X86_XCR0_YMM;
>          xstate_size = max(xstate_size,
> -                          xstate_offsets[_XSTATE_YMM] +
> -                          xstate_sizes[_XSTATE_YMM]);
> +                          xstate_offsets[X86_XCR0_YMM_POS] +
> +                          xstate_sizes[X86_XCR0_YMM_POS]);
>      }
> 
>      if ( p->feat.mpx )
>      {
> -        xstates |= XSTATE_BNDREGS | XSTATE_BNDCSR;
> +        xstates |= X86_XCR0_BNDREGS | X86_XCR0_BNDCSR;
>          xstate_size = max(xstate_size,
> -                          xstate_offsets[_XSTATE_BNDCSR] +
> -                          xstate_sizes[_XSTATE_BNDCSR]);
> +                          xstate_offsets[X86_XCR0_BNDCSR_POS] +
> +                          xstate_sizes[X86_XCR0_BNDCSR_POS]);
>      }
> 
>      if ( p->feat.avx512f )
>      {
> -        xstates |= XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM;
> +        xstates |= X86_XCR0_OPMASK | X86_XCR0_ZMM |
> X86_XCR0_HI_ZMM;
>          xstate_size = max(xstate_size,
> -                          xstate_offsets[_XSTATE_HI_ZMM] +
> -                          xstate_sizes[_XSTATE_HI_ZMM]);
> +                          xstate_offsets[X86_XCR0_HI_ZMM_POS] +
> +                          xstate_sizes[X86_XCR0_HI_ZMM_POS]);
>      }
> 
>      if ( p->feat.pku )
>      {
> -        xstates |= XSTATE_PKRU;
> +        xstates |= X86_XCR0_PKRU;
>          xstate_size = max(xstate_size,
> -                          xstate_offsets[_XSTATE_PKRU] +
> -                          xstate_sizes[_XSTATE_PKRU]);
> +                          xstate_offsets[X86_XCR0_PKRU_POS] +
> +                          xstate_sizes[X86_XCR0_PKRU_POS]);
>      }
> 
>      if ( p->extd.lwp )
>      {
> -        xstates |= XSTATE_LWP;
> +        xstates |= X86_XCR0_LWP;
>          xstate_size = max(xstate_size,
> -                          xstate_offsets[_XSTATE_LWP] +
> -                          xstate_sizes[_XSTATE_LWP]);
> +                          xstate_offsets[X86_XCR0_LWP_POS] +
> +                          xstate_sizes[X86_XCR0_LWP_POS]);
>      }
> 
>      p->xstate.max_size  =  xstate_size;
> @@ -1016,7 +1016,7 @@ void guest_cpuid(const struct vcpu *v, u
>          break;
> 
>      case 0x8000001c:
> -        if ( (v->arch.xcr0 & XSTATE_LWP) && cpu_has_svm )
> +        if ( (v->arch.xcr0 & X86_XCR0_LWP) && cpu_has_svm )
>              /* Turn on available bit and other features specified in 
> lwp_cfg. */
>              res->a = (res->d & v->arch.hvm_svm.guest_lwp_cfg) | 1;
>          break;
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1882,8 +1882,8 @@ static int hvmemul_get_fpu(
>      case X86EMUL_FPU_xmm:
>          break;
>      case X86EMUL_FPU_ymm:
> -        if ( !(curr->arch.xcr0 & XSTATE_SSE) ||
> -             !(curr->arch.xcr0 & XSTATE_YMM) )
> +        if ( !(curr->arch.xcr0 & X86_XCR0_SSE) ||
> +             !(curr->arch.xcr0 & X86_XCR0_YMM) )
>              return X86EMUL_UNHANDLEABLE;
>          break;
>      default:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -318,7 +318,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
>       * enabled in BNDCFGS.
>       */
>      if ( (val & IA32_BNDCFGS_ENABLE) &&
> -         !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) )
> +         !(v->arch.xcr0_accum & (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR)) )
>      {
>          uint64_t xcr0 = get_xcr0();
>          int rc;
> @@ -327,7 +327,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
>              return false;
> 
>          rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
> -                           xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR);
> +                           xcr0 | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR);
> 
>          if ( rc )
>          {
> @@ -2409,10 +2409,10 @@ int hvm_set_cr4(unsigned long value, boo
>       * guest may enable the feature in CR4 without enabling it in XCR0. We
>       * need to context switch / migrate PKRU nevertheless.
>       */
> -    if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & XSTATE_PKRU) )
> +    if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & X86_XCR0_PKRU)
> )
>      {
>          int rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
> -                               get_xcr0() | XSTATE_PKRU);
> +                               get_xcr0() | X86_XCR0_PKRU);
> 
>          if ( rc )
>          {
> @@ -2421,7 +2421,7 @@ int hvm_set_cr4(unsigned long value, boo
>          }
> 
>          if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
> -                           get_xcr0() & ~XSTATE_PKRU) )
> +                           get_xcr0() & ~X86_XCR0_PKRU) )
>              /* nothing, best effort only */;
>      }
> 
> @@ -3890,7 +3890,7 @@ void hvm_vcpu_reset_state(struct vcpu *v
>      fpu_ctxt->mxcsr = MXCSR_DEFAULT;
>      if ( v->arch.xsave_area )
>      {
> -        v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP;
> +        v->arch.xsave_area->xsave_hdr.xstate_bv = X86_XCR0_FP;
>          v->arch.xsave_area->xsave_hdr.xcomp_bv = 0;
>      }
> 
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -2157,7 +2157,7 @@ static void adjust_bnd(struct x86_emulat
>           * (in which case executing any suitable non-prefixed branch
>           * instruction would do), or use XRSTOR.
>           */
> -        xstate_set_init(XSTATE_BNDREGS);
> +        xstate_set_init(X86_XCR0_BNDREGS);
>      }
>   done:;
>  }
> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -304,7 +304,7 @@ void xsave(struct vcpu *v, uint64_t mask
>                             "=m" (*ptr), \
>                             "a" (lmask), "d" (hmask), "D" (ptr))
> 
> -    if ( fip_width == 8 || !(mask & XSTATE_FP) )
> +    if ( fip_width == 8 || !(mask & X86_XCR0_FP) )
>      {
>          XSAVE("0x48,");
>      }
> @@ -357,7 +357,7 @@ void xsave(struct vcpu *v, uint64_t mask
>              fip_width = 8;
>      }
>  #undef XSAVE
> -    if ( mask & XSTATE_FP )
> +    if ( mask & X86_XCR0_FP )
>          ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET] = fip_width;
>  }
> 
> @@ -375,7 +375,7 @@ void xrstor(struct vcpu *v, uint64_t mas
>       * sometimes new user value. Both should be ok. Use the FPU saved
>       * data block as a safe address because it should be in L1.
>       */
> -    if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
> +    if ( (mask & ptr->xsave_hdr.xstate_bv & X86_XCR0_FP) &&
>           !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
>           boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
>          asm volatile ( "fnclex\n\t"        /* clear exceptions */
> @@ -451,8 +451,8 @@ void xrstor(struct vcpu *v, uint64_t mas
>               * Also try to eliminate fault reasons, even if this shouldn't be
>               * needed here (other code should ensure the sanity of the data).
>               */
> -            if ( ((mask & XSTATE_SSE) ||
> -                  ((mask & XSTATE_YMM) &&
> +            if ( ((mask & X86_XCR0_SSE) ||
> +                  ((mask & X86_XCR0_YMM) &&
>                     !(ptr->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED)))
> )
>                  ptr->fpu_sse.mxcsr &= mxcsr_mask;
>              if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY )
> @@ -595,7 +595,7 @@ void xstate_init(struct cpuinfo_x86 *c)
>      cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
> 
>      BUG_ON((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE);
> -    BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE));
> +    BUG_ON((eax & X86_XCR0_YMM) && !(eax & X86_XCR0_SSE));
>      feature_mask = (((u64)edx << 32) | eax) & XCNTXT_MASK;
> 
>      /*
> @@ -648,26 +648,26 @@ void xstate_init(struct cpuinfo_x86 *c)
>  static bool valid_xcr0(u64 xcr0)
>  {
>      /* FP must be unconditionally set. */
> -    if ( !(xcr0 & XSTATE_FP) )
> +    if ( !(xcr0 & X86_XCR0_FP) )
>          return false;
> 
>      /* YMM depends on SSE. */
> -    if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) )
> +    if ( (xcr0 & X86_XCR0_YMM) && !(xcr0 & X86_XCR0_SSE) )
>          return false;
> 
> -    if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) )
> +    if ( xcr0 & (X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM)
> )
>      {
>          /* OPMASK, ZMM, and HI_ZMM require YMM. */
> -        if ( !(xcr0 & XSTATE_YMM) )
> +        if ( !(xcr0 & X86_XCR0_YMM) )
>              return false;
> 
>          /* OPMASK, ZMM, and HI_ZMM must be the same. */
> -        if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) )
> +        if ( ~xcr0 & (X86_XCR0_OPMASK | X86_XCR0_ZMM |
> X86_XCR0_HI_ZMM) )
>              return false;
>      }
> 
>      /* BNDREGS and BNDCSR must be the same. */
> -    return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR);
> +    return !(xcr0 & X86_XCR0_BNDREGS) == !(xcr0 & X86_XCR0_BNDCSR);
>  }
> 
>  int validate_xstate(u64 xcr0, u64 xcr0_accum, const struct xsave_hdr *hdr)
> @@ -703,7 +703,7 @@ int handle_xsetbv(u32 index, u64 new_bv)
>          return -EINVAL;
> 
>      /* XCR0.PKRU is disabled on PV mode. */
> -    if ( is_pv_vcpu(curr) && (new_bv & XSTATE_PKRU) )
> +    if ( is_pv_vcpu(curr) && (new_bv & X86_XCR0_PKRU) )
>          return -EOPNOTSUPP;
> 
>      if ( !set_xcr0(new_bv) )
> @@ -714,7 +714,7 @@ int handle_xsetbv(u32 index, u64 new_bv)
>      curr->arch.xcr0_accum |= new_bv;
> 
>      /* LWP sets nonlazy_xstate_used independently. */
> -    if ( new_bv & (XSTATE_NONLAZY & ~XSTATE_LWP) )
> +    if ( new_bv & (XSTATE_NONLAZY & ~X86_XCR0_LWP) )
>          curr->arch.nonlazy_xstate_used = 1;
> 
>      mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY;
> @@ -755,7 +755,7 @@ uint64_t read_bndcfgu(void)
>      {
>          asm ( ".byte 0x0f,0xc7,0x27\n" /* xsavec */
>                : "=m" (*xstate)
> -              : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
> +              : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
> 
>          bndcsr = (void *)(xstate + 1);
>      }
> @@ -763,15 +763,15 @@ uint64_t read_bndcfgu(void)
>      {
>          asm ( ".byte 0x0f,0xae,0x27\n" /* xsave */
>                : "=m" (*xstate)
> -              : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
> +              : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
> 
> -        bndcsr = (void *)xstate + xstate_offsets[_XSTATE_BNDCSR];
> +        bndcsr = (void *)xstate + xstate_offsets[X86_XCR0_BNDCSR_POS];
>      }
> 
>      if ( cr0 & X86_CR0_TS )
>          write_cr0(cr0);
> 
> -    return xstate->xsave_hdr.xstate_bv & XSTATE_BNDCSR ? bndcsr-
> >bndcfgu : 0;
> +    return xstate->xsave_hdr.xstate_bv & X86_XCR0_BNDCSR ? bndcsr-
> >bndcfgu : 0;
>  }
> 
>  void xstate_set_init(uint64_t mask)
> --- a/xen/include/asm-x86/x86-defns.h
> +++ b/xen/include/asm-x86/x86-defns.h
> @@ -66,4 +66,28 @@
>  #define X86_CR4_SMAP       0x00200000 /* enable SMAP */
>  #define X86_CR4_PKE        0x00400000 /* enable PKE */
> 
> +/*
> + * XSTATE component flags in XCR0
> + */
> +#define X86_XCR0_FP_POS           0
> +#define X86_XCR0_FP               (1ULL << X86_XCR0_FP_POS)
> +#define X86_XCR0_SSE_POS          1
> +#define X86_XCR0_SSE              (1ULL << X86_XCR0_SSE_POS)
> +#define X86_XCR0_YMM_POS          2
> +#define X86_XCR0_YMM              (1ULL << X86_XCR0_YMM_POS)
> +#define X86_XCR0_BNDREGS_POS      3
> +#define X86_XCR0_BNDREGS          (1ULL << X86_XCR0_BNDREGS_POS)
> +#define X86_XCR0_BNDCSR_POS       4
> +#define X86_XCR0_BNDCSR           (1ULL << X86_XCR0_BNDCSR_POS)
> +#define X86_XCR0_OPMASK_POS       5
> +#define X86_XCR0_OPMASK           (1ULL << X86_XCR0_OPMASK_POS)
> +#define X86_XCR0_ZMM_POS          6
> +#define X86_XCR0_ZMM              (1ULL << X86_XCR0_ZMM_POS)
> +#define X86_XCR0_HI_ZMM_POS       7
> +#define X86_XCR0_HI_ZMM           (1ULL << X86_XCR0_HI_ZMM_POS)
> +#define X86_XCR0_PKRU_POS         9
> +#define X86_XCR0_PKRU             (1ULL << X86_XCR0_PKRU_POS)
> +#define X86_XCR0_LWP_POS          62
> +#define X86_XCR0_LWP              (1ULL << X86_XCR0_LWP_POS)
> +
>  #endif       /* __XEN_X86_DEFNS_H__ */
> --- a/xen/include/asm-x86/xstate.h
> +++ b/xen/include/asm-x86/xstate.h
> @@ -10,6 +10,7 @@
> 
>  #include <xen/sched.h>
>  #include <asm/cpufeature.h>
> +#include <asm/x86-defns.h>
> 
>  #define FCW_DEFAULT               0x037f
>  #define FCW_RESET                 0x0040
> @@ -28,34 +29,14 @@ extern uint32_t mxcsr_mask;
>  #define XSAVE_HDR_OFFSET          FXSAVE_SIZE
>  #define XSTATE_AREA_MIN_SIZE      (FXSAVE_SIZE + XSAVE_HDR_SIZE)
> 
> -#define _XSTATE_FP                0
> -#define XSTATE_FP                 (1ULL << _XSTATE_FP)
> -#define _XSTATE_SSE               1
> -#define XSTATE_SSE                (1ULL << _XSTATE_SSE)
> -#define _XSTATE_YMM               2
> -#define XSTATE_YMM                (1ULL << _XSTATE_YMM)
> -#define _XSTATE_BNDREGS           3
> -#define XSTATE_BNDREGS            (1ULL << _XSTATE_BNDREGS)
> -#define _XSTATE_BNDCSR            4
> -#define XSTATE_BNDCSR             (1ULL << _XSTATE_BNDCSR)
> -#define _XSTATE_OPMASK            5
> -#define XSTATE_OPMASK             (1ULL << _XSTATE_OPMASK)
> -#define _XSTATE_ZMM               6
> -#define XSTATE_ZMM                (1ULL << _XSTATE_ZMM)
> -#define _XSTATE_HI_ZMM            7
> -#define XSTATE_HI_ZMM             (1ULL << _XSTATE_HI_ZMM)
> -#define _XSTATE_PKRU              9
> -#define XSTATE_PKRU               (1ULL << _XSTATE_PKRU)
> -#define _XSTATE_LWP               62
> -#define XSTATE_LWP                (1ULL << _XSTATE_LWP)
> -
> -#define XSTATE_FP_SSE  (XSTATE_FP | XSTATE_SSE)
> -#define XCNTXT_MASK    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM |
> XSTATE_OPMASK | \
> -                        XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY)
> +#define XSTATE_FP_SSE  (X86_XCR0_FP | X86_XCR0_SSE)
> +#define XCNTXT_MASK    (X86_XCR0_FP | X86_XCR0_SSE |
> X86_XCR0_YMM | \
> +                        X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM |
> \
> +                        XSTATE_NONLAZY)
> 
>  #define XSTATE_ALL     (~(1ULL << 63))
> -#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS |
> XSTATE_BNDCSR | \
> -                        XSTATE_PKRU)
> +#define XSTATE_NONLAZY (X86_XCR0_LWP | X86_XCR0_BNDREGS |
> X86_XCR0_BNDCSR | \
> +                        X86_XCR0_PKRU)
>  #define XSTATE_LAZY    (XSTATE_ALL & ~XSTATE_NONLAZY)
>  #define XSTATE_XSAVES_ONLY         0
>  #define XSTATE_COMPACTION_ENABLED  (1ULL << 63)
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.