[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 04/17] x86/PV: harden guest memory accesses against speculative abuse


  • To: Jan Beulich <jbeulich@xxxxxxxx>
  • From: Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Date: Fri, 12 Feb 2021 11:41:18 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=SOkdkfX61/MIq9/1Z6cE/L6pzSpsxc2u4oE10N2uiS0=; b=jbUaV72Q1JNDYRYnNd69DR1ArUfIhWAJze2LiA1T+dA6frOwA2XX7JVmV/h/HEYWV3McZIVx8FA9MFJ3nqfw5toD/UJjIfaa1VBzgWMF6X4ywRXzlOXkCdR5IvznIxbb/XtGqnBRx8yehlg7wJ43BBiFq+4LJTk9NZXA39tDUtqk/6cKGICU7PLziLmxRl5RBQRfBCZOjFPdNVA7uGs8Z9nBkpzibMdEol8Fp0MUjJ1qAtHCNXA+uZa/5zboXaZjK3AKGhXA2c2tBFy5deqw1JHpgaX1tv9UK3wN8DcxPtMHqtfXOYn7JU00jLCsHFPkIfpTbHTbJmwcC1a1LHYZ8w==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=NBuvHzEXLPEPLdxe8KvAgFBbKdYjgDMJ1hiCuiBkXK/jwo3pkJTE1W2FBZ1AQa56B8kc4/nwqQBXecx3plPQjlFlXD+6y8lW+t/YrjrbjfXjJ6i/8kz9xnckBCO1cNfKyQGrpRjLCSYa2FOY9QNCAY4EplCdzSaHWqLBWc7OZ/d0Hvb4VKyZ/flF8ii6kDW5VYisRf0dGrnPwbPDbiJ6412IKSGDUHqTM86tWiiqHLNZFb1RgTRNGzq4nY79FoC44URy2IYcOSYlRdbkbOBygzoDt1QtfAty7Td2S2Tm5F24F5u83QQkzq2ymMDB2FTaqLlBOV6q5MtJj2QyYffKsQ==
  • Authentication-results: esa6.hc3370-68.iphmx.com; dkim=pass (signature verified) header.i=@citrix.onmicrosoft.com
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Tim Deegan <tim@xxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>
  • Delivery-date: Fri, 12 Feb 2021 10:46:31 +0000
  • Ironport-sdr: /6XuCIqyUnRfjRhoXKl+gA8WRfxMXOgVcuNSc4mTPCTXUZ8aDTfRFW6KIwb/DJNzylGvJgqid/ o7j8nk9UDWX9ryn9tIZvYEoGwY7lCu192E/WA38h/plWkRQocNBUgOLbSSWGFjad+mo3/xJCbF dr1ougIPae5+0eYhQA6wqsUYu0Q2zAgDXCe/X3a1/1nYG6M54XtrUvl+nJzcZ05k2zzlBvwUmL wxJw8omU6qmNaSVePIM80dmVTrc/WR2el3jpjsPwK0se9CIkUqWsWqmO4VLGcszaunUK3nmT3b ukQ=
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On Thu, Jan 14, 2021 at 04:04:57PM +0100, Jan Beulich wrote:
> Inspired by
> https://lore.kernel.org/lkml/f12e7d3cecf41b2c29734ea45a393be21d4a8058.1597848273.git.jpoimboe@xxxxxxxxxx/
> and prior work in that area of x86 Linux, suppress speculation with
> guest specified pointer values by suitably masking the addresses to
> non-canonical space in case they fall into Xen's virtual address range.
> 
> Introduce a new Kconfig control.
> 
> Note that it is necessary in such code to avoid using "m" kind operands:
> If we didn't, there would be no guarantee that the register passed to
> guest_access_mask_ptr is also the (base) one used for the memory access.
> 
> As a minor unrelated change in get_unsafe_asm() the unnecessary "itype"
> parameter gets dropped and the XOR on the fixup path gets changed to be
> a 32-bit one in all cases: This way we avoid pointless REX.W or operand
> size overrides, or writes to partial registers.
> 
> Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> The insn sequence chosen is certainly up for discussion; I've picked
> this one despite the RCR because alternatives I could come up with,
> like
> 
>       mov     $(HYPERVISOR_VIRT_END), %rax
>       mov     $~0, %rdx
>       mov     $0x7fffffffffffffff, %rcx
>       cmp     %rax, %rdi
>       cmovb   %rcx, %rdx
>       and     %rdx, %rdi
> 
> weren't necessarily better: Either, as above, they are longer and
> require a 3rd scratch register, or they also utilize the carry flag in
> some similar way.
> ---
> Judging from the comment ahead of put_unsafe_asm() we might as well not
> tell gcc at all anymore about the memory access there, now that there's
> no use of the operand anymore in the assembly code.
> 
> --- a/xen/arch/x86/usercopy.c
> +++ b/xen/arch/x86/usercopy.c
> @@ -10,12 +10,19 @@
>  #include <xen/sched.h>
>  #include <asm/uaccess.h>
>  
> -unsigned __copy_to_user_ll(void __user *to, const void *from, unsigned n)
> +#ifndef GUARD
> +# define GUARD UA_KEEP
> +#endif
> +
> +unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned 
> int n)
>  {
>      unsigned dummy;
>  
>      stac();
>      asm volatile (
> +        GUARD(
> +        "    guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
> +        )
>          "    cmp  $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
>          "    jbe  1f\n"
>          "    mov  %k[to], %[cnt]\n"
> @@ -42,6 +49,7 @@ unsigned __copy_to_user_ll(void __user *
>          _ASM_EXTABLE(1b, 2b)
>          : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
>            [aux] "=&r" (dummy)
> +          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
>          : "[aux]" (n)
>          : "memory" );
>      clac();
> @@ -49,12 +57,15 @@ unsigned __copy_to_user_ll(void __user *
>      return n;
>  }
>  
> -unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned n)
> +unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned 
> int n)
>  {
>      unsigned dummy;
>  
>      stac();
>      asm volatile (
> +        GUARD(
> +        "    guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
> +        )
>          "    cmp  $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
>          "    jbe  1f\n"
>          "    mov  %k[to], %[cnt]\n"
> @@ -87,6 +98,7 @@ unsigned __copy_from_user_ll(void *to, c
>          _ASM_EXTABLE(1b, 6b)
>          : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
>            [aux] "=&r" (dummy)
> +          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
>          : "[aux]" (n)
>          : "memory" );
>      clac();
> @@ -94,6 +106,8 @@ unsigned __copy_from_user_ll(void *to, c
>      return n;
>  }
>  
> +#if GUARD(1) + 0

Why do you need the '+ 0' here? I guess it's to prevent the
preprocessor from complaining when GUARD(1) gets replaced by nothing?

> +
>  /**
>   * copy_to_user: - Copy a block of data into user space.
>   * @to:   Destination address, in user space.
> @@ -128,8 +142,11 @@ unsigned clear_user(void __user *to, uns
>  {
>      if ( access_ok(to, n) )
>      {
> +        long dummy;
> +
>          stac();
>          asm volatile (
> +            "    guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
>              "0:  rep stos"__OS"\n"
>              "    mov  %[bytes], %[cnt]\n"
>              "1:  rep stosb\n"
> @@ -140,7 +157,8 @@ unsigned clear_user(void __user *to, uns
>              ".previous\n"
>              _ASM_EXTABLE(0b,3b)
>              _ASM_EXTABLE(1b,2b)
> -            : [cnt] "=&c" (n), [to] "+D" (to)
> +            : [cnt] "=&c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy),
> +              [scratch2] "=&r" (dummy)
>              : [bytes] "r" (n & (BYTES_PER_LONG - 1)),
>                [longs] "0" (n / BYTES_PER_LONG), "a" (0) );
>          clac();
> @@ -174,6 +192,16 @@ unsigned copy_from_user(void *to, const
>      return n;
>  }
>  
> +# undef GUARD
> +# define GUARD UA_DROP
> +# define copy_to_guest_ll copy_to_unsafe_ll
> +# define copy_from_guest_ll copy_from_unsafe_ll
> +# undef __user
> +# define __user
> +# include __FILE__
> +
> +#endif /* GUARD(1) */
> +
>  /*
>   * Local variables:
>   * mode: C
> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -446,6 +446,8 @@ UNLIKELY_START(g, create_bounce_frame_ba
>          jmp   asm_domain_crash_synchronous  /* Does not return */
>  __UNLIKELY_END(create_bounce_frame_bad_sp)
>  
> +        guest_access_mask_ptr %rsi, %rax, %rcx
> +
>  #define STORE_GUEST_STACK(reg, n) \
>  0:      movq  %reg,(n)*8(%rsi); \
>          _ASM_EXTABLE(0b, domain_crash_page_fault_ ## n ## x8)
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -114,6 +114,24 @@ config SPECULATIVE_HARDEN_BRANCH
>  
>         If unsure, say Y.
>  
> +config SPECULATIVE_HARDEN_GUEST_ACCESS
> +     bool "Speculative PV Guest Memory Access Hardening"
> +     default y
> +     depends on PV
> +     help
> +       Contemporary processors may use speculative execution as a
> +       performance optimisation, but this can potentially be abused by an
> +       attacker to leak data via speculative sidechannels.
> +
> +       One source of data leakage is via speculative accesses to hypervisor
> +       memory through guest controlled values used to access guest memory.
> +
> +       When enabled, code paths accessing PV guest memory will have guest
> +       controlled addresses massaged such that memory accesses through them
> +       won't touch hypervisor address space.
> +
> +       If unsure, say Y.
> +
>  endmenu
>  
>  config HYPFS
> --- a/xen/include/asm-x86/asm-defns.h
> +++ b/xen/include/asm-x86/asm-defns.h
> @@ -44,3 +44,16 @@
>  .macro INDIRECT_JMP arg:req
>      INDIRECT_BRANCH jmp \arg
>  .endm
> +
> +.macro guest_access_mask_ptr ptr:req, scratch1:req, scratch2:req
> +#if defined(CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS)
> +    mov $(HYPERVISOR_VIRT_END - 1), \scratch1
> +    mov $~0, \scratch2
> +    cmp \ptr, \scratch1
> +    rcr $1, \scratch2
> +    and \scratch2, \ptr

If my understanding is correct, that's equivalent to:

ptr &= ~0ull >> (ptr < HYPERVISOR_VIRT_END);

It might be helpful to add this as a comment, to clarify the indented
functionality of the assembly bit.

I wonder if the C code above can generate any jumps? As you pointed
out, we already use something similar in array_index_mask_nospec and
that's fine to do in C.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.