[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 22/25] x86/HVM: do actual CMPXCHG in hvmemul_cmpxchg()



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 07 December 2017 14:17
> To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>
> Subject: [PATCH v3 22/25] x86/HVM: do actual CMPXCHG in
> hvmemul_cmpxchg()
> 
> ..., at least as far as currently possible, i.e. when a mapping can be
> obtained.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> v3: New.
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1296,8 +1296,83 @@ static int hvmemul_cmpxchg(
>      bool lock,
>      struct x86_emulate_ctxt *ctxt)
>  {
> -    /* Fix this in case the guest is really relying on r-m-w atomicity. */
> -    return hvmemul_write(seg, offset, p_new, bytes, ctxt);
> +    struct hvm_emulate_ctxt *hvmemul_ctxt =
> +        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> +    struct vcpu *curr = current;
> +    unsigned long addr, reps = 1;
> +    uint32_t pfec = PFEC_page_present | PFEC_write_access;
> +    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
> +    int rc;
> +    void *mapping = NULL;
> +
> +    rc = hvmemul_virtual_to_linear(
> +        seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> +    if ( rc != X86EMUL_OKAY )
> +        return rc;
> +
> +    if ( is_x86_system_segment(seg) )
> +        pfec |= PFEC_implicit;
> +    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
> +        pfec |= PFEC_user_mode;
> +
> +    mapping = hvmemul_map_linear_addr(addr, bytes, pfec,
> hvmemul_ctxt);
> +    if ( IS_ERR(mapping) )
> +        return ~PTR_ERR(mapping);
> +
> +    if ( !mapping )
> +    {
> +        /* Fix this in case the guest is really relying on r-m-w atomicity. 
> */
> +        return hvmemul_linear_mmio_write(addr, bytes, p_new, pfec,
> +                                         hvmemul_ctxt,
> +                                         vio->mmio_access.write_access &&
> +                                         vio->mmio_gla == (addr & 
> PAGE_MASK));
> +    }
> +
> +    switch ( bytes )
> +    {
> +    case 1: case 2: case 4: case 8:
> +    {
> +        unsigned long old = 0, new = 0, cur;
> +
> +        memcpy(&old, p_old, bytes);
> +        memcpy(&new, p_new, bytes);
> +        if ( lock )
> +            cur = __cmpxchg(mapping, old, new, bytes);
> +        else
> +            cur = cmpxchg_local_(mapping, old, new, bytes);
> +        if ( cur != old )
> +        {
> +            memcpy(p_old, &cur, bytes);
> +            rc = X86EMUL_CMPXCHG_FAILED;
> +        }
> +        break;
> +    }
> +
> +    case 16:
> +        if ( cpu_has_cx16 )
> +        {
> +            __uint128_t *old = p_old, cur;
> +
> +            if ( lock )
> +                cur = __cmpxchg16b(mapping, old, p_new);
> +            else
> +                cur = cmpxchg16b_local_(mapping, old, p_new);
> +            if ( cur != *old )
> +            {
> +                *old = cur;
> +                rc = X86EMUL_CMPXCHG_FAILED;
> +            }
> +            break;
> +        }
> +        /* fall through */
> +    default:
> +        rc = X86EMUL_UNHANDLEABLE;
> +        break;
> +    }
> +
> +    hvmemul_unmap_linear_addr(mapping, addr, bytes, hvmemul_ctxt);
> +
> +    return rc;
>  }
> 
>  static int hvmemul_validate(
> --- a/xen/include/asm-x86/system.h
> +++ b/xen/include/asm-x86/system.h
> @@ -110,6 +110,38 @@ static always_inline unsigned long __cmp
>      return old;
>  }
> 
> +static always_inline unsigned long cmpxchg_local_(
> +    void *ptr, unsigned long old, unsigned long new, unsigned int size)
> +{
> +    unsigned long prev = ~old;
> +
> +    switch ( size )
> +    {
> +    case 1:
> +        asm volatile ( "cmpxchgb %b2, %1"
> +                       : "=a" (prev), "+m" (*(uint8_t *)ptr)
> +                       : "q" (new), "0" (old) );
> +        break;
> +    case 2:
> +        asm volatile ( "cmpxchgw %w2, %1"
> +                       : "=a" (prev), "+m" (*(uint16_t *)ptr)
> +                       : "r" (new), "0" (old) );
> +        break;
> +    case 4:
> +        asm volatile ( "cmpxchgl %k2, %1"
> +                       : "=a" (prev), "+m" (*(uint32_t *)ptr)
> +                       : "r" (new), "0" (old) );
> +        break;
> +    case 8:
> +        asm volatile ( "cmpxchgq %2, %1"
> +                       : "=a" (prev), "+m" (*(uint64_t *)ptr)
> +                       : "r" (new), "0" (old) );
> +        break;
> +    }
> +
> +    return prev;
> +}
> +
>  #define cmpxchgptr(ptr,o,n) ({                                          \
>      const __typeof__(**(ptr)) *__o = (o);                               \
>      __typeof__(**(ptr)) *__n = (n);                                     \
> --- a/xen/include/asm-x86/x86_64/system.h
> +++ b/xen/include/asm-x86/x86_64/system.h
> @@ -31,6 +31,24 @@ static always_inline __uint128_t __cmpxc
>      return prev.raw;
>  }
> 
> +static always_inline __uint128_t cmpxchg16b_local_(
> +    void *ptr, const __uint128_t *oldp, const __uint128_t *newp)
> +{
> +    union {
> +        struct { uint64_t lo, hi; };
> +        __uint128_t raw;
> +    } new = { .raw = *newp }, old = { .raw = *oldp }, prev;
> +
> +    ASSERT(cpu_has_cx16);
> +
> +    /* Don't use "=A" here - clang can't deal with that. */
> +    asm volatile ( "cmpxchg16b %2"
> +                   : "=d" (prev.hi), "=a" (prev.lo), "+m" (*(__uint128_t 
> *)ptr)
> +                   : "c" (new.hi), "b" (new.lo), "0" (old.hi), "1" (old.lo) 
> );
> +
> +    return prev.raw;
> +}
> +
>  #define cmpxchg16b(ptr, o, n) ({                           \
>      volatile void *_p = (ptr);                             \
>      ASSERT(!((unsigned long)_p & 0xf));                    \
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.