[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86: split __copy_{from,to}_user() into "guest" and "unsafe" variants



commit 2d824791504f4119f04f95bafffec2e37d319c25
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Feb 19 17:19:19 2021 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Feb 19 17:19:19 2021 +0100

    x86: split __copy_{from,to}_user() into "guest" and "unsafe" variants
    
    The "guest" variants are intended to work with (potentially) fully guest
    controlled addresses, while the "unsafe" variants are intended to be
    used in order to access addresses not (directly) under guest control,
    within Xen's part of virtual address space. Subsequently we will want
    them to have distinct behavior, so as first step identify which one is
    which. For now, both groups of constructs alias one another.
    
    Double underscore prefixes are retained only on
    __copy_{from,to}_guest_pv(), to allow still distinguishing them from
    their "checking" counterparts once they also get renamed (to
    copy_{from,to}_guest_pv()).
    
    Add previously missing __user at some call sites.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx> [shadow]
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Release-Acked-by: Ian Jackson <iwj@xxxxxxxxxxxxxx>
---
 xen/arch/x86/gdbstub.c             |  4 ++--
 xen/arch/x86/mm/shadow/multi.c     | 16 ++++++++--------
 xen/arch/x86/pv/emul-gate-op.c     |  4 ++--
 xen/arch/x86/pv/emul-priv-op.c     | 10 ++++++----
 xen/arch/x86/pv/mm.c               |  2 +-
 xen/arch/x86/pv/mm.h               |  2 +-
 xen/arch/x86/pv/ro-page-fault.c    |  2 +-
 xen/arch/x86/traps.c               |  2 +-
 xen/arch/x86/usercopy.c            |  4 ++--
 xen/include/asm-x86/guest_access.h |  4 ++--
 xen/include/asm-x86/uaccess.h      | 28 ++++++++++++++--------------
 11 files changed, 40 insertions(+), 38 deletions(-)

diff --git a/xen/arch/x86/gdbstub.c b/xen/arch/x86/gdbstub.c
index ff9f7f9a69..8f4f49fd3b 100644
--- a/xen/arch/x86/gdbstub.c
+++ b/xen/arch/x86/gdbstub.c
@@ -33,13 +33,13 @@ gdb_arch_signal_num(struct cpu_user_regs *regs, unsigned 
long cookie)
 unsigned int
 gdb_arch_copy_from_user(void *dest, const void *src, unsigned len)
 {
-    return __copy_from_user(dest, src, len);
+    return copy_from_unsafe(dest, src, len);
 }
 
 unsigned int 
 gdb_arch_copy_to_user(void *dest, const void *src, unsigned len)
 {
-    return __copy_to_user(dest, src, len);
+    return copy_to_unsafe(dest, src, len);
 }
 
 void
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 36f548b554..797ebde586 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2614,7 +2614,7 @@ static int sh_page_fault(struct vcpu *v,
         {
             shadow_l2e_t sl2e;
             mfn_t gl1mfn;
-            if ( (__copy_from_user(&sl2e,
+            if ( (copy_from_unsafe(&sl2e,
                                    (sh_linear_l2_table(v)
                                     + shadow_l2_linear_offset(va)),
                                    sizeof(sl2e)) != 0)
@@ -2633,7 +2633,7 @@ static int sh_page_fault(struct vcpu *v,
 #endif /* SHOPT_OUT_OF_SYNC */
         /* The only reasons for reserved bits to be set in shadow entries
          * are the two "magic" shadow_l1e entries. */
-        if ( likely((__copy_from_user(&sl1e,
+        if ( likely((copy_from_unsafe(&sl1e,
                                       (sh_linear_l1_table(v)
                                        + shadow_l1_linear_offset(va)),
                                       sizeof(sl1e)) == 0)
@@ -3308,10 +3308,10 @@ static bool sh_invlpg(struct vcpu *v, unsigned long 
linear)
                    sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
                & _PAGE_PRESENT) )
             return false;
-        /* This must still be a copy-from-user because we don't have the
+        /* This must still be a copy-from-unsafe because we don't have the
          * paging lock, and the higher-level shadows might disappear
          * under our feet. */
-        if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
+        if ( copy_from_unsafe(&sl3e, (sh_linear_l3_table(v)
                                       + shadow_l3_linear_offset(linear)),
                               sizeof (sl3e)) != 0 )
         {
@@ -3330,9 +3330,9 @@ static bool sh_invlpg(struct vcpu *v, unsigned long 
linear)
         return false;
 #endif
 
-    /* This must still be a copy-from-user because we don't have the shadow
+    /* This must still be a copy-from-unsafe because we don't have the shadow
      * lock, and the higher-level shadows might disappear under our feet. */
-    if ( __copy_from_user(&sl2e,
+    if ( copy_from_unsafe(&sl2e,
                           sh_linear_l2_table(v) + 
shadow_l2_linear_offset(linear),
                           sizeof (sl2e)) != 0 )
     {
@@ -3371,11 +3371,11 @@ static bool sh_invlpg(struct vcpu *v, unsigned long 
linear)
              * hold the paging lock yet.  Check again with the lock held. */
             paging_lock(d);
 
-            /* This must still be a copy-from-user because we didn't
+            /* This must still be a copy-from-unsafe because we didn't
              * have the paging lock last time we checked, and the
              * higher-level shadows might have disappeared under our
              * feet. */
-            if ( __copy_from_user(&sl2e,
+            if ( copy_from_unsafe(&sl2e,
                                   sh_linear_l2_table(v)
                                   + shadow_l2_linear_offset(linear),
                                   sizeof (sl2e)) != 0 )
diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c
index 90a0a47aef..43d847abd0 100644
--- a/xen/arch/x86/pv/emul-gate-op.c
+++ b/xen/arch/x86/pv/emul-gate-op.c
@@ -149,12 +149,12 @@ static int read_mem(enum x86_segment seg, unsigned long 
offset, void *p_data,
 
     addr = (uint32_t)addr;
 
-    if ( (rc = __copy_from_user(p_data, (void *)addr, bytes)) )
+    if ( (rc = __copy_from_guest_pv(p_data, (void __user *)addr, bytes)) )
     {
         /*
          * TODO: This should report PFEC_insn_fetch when goc->insn_fetch &&
          * cpu_has_nx, but we'd then need a "fetch" variant of
-         * __copy_from_user() respecting NX, SMEP, and protection keys.
+         * __copy_from_guest_pv() respecting NX, SMEP, and protection keys.
          */
         x86_emul_pagefault(0, addr + bytes - rc, ctxt);
         return X86EMUL_EXCEPTION;
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index dbceed8a05..e5a22b9347 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -649,7 +649,8 @@ static int rep_ins(uint16_t port,
         if ( rc != X86EMUL_OKAY )
             return rc;
 
-        if ( (rc = __copy_to_user((void *)addr, &data, bytes_per_rep)) != 0 )
+        if ( (rc = __copy_to_guest_pv((void __user *)addr, &data,
+                                      bytes_per_rep)) != 0 )
         {
             x86_emul_pagefault(PFEC_write_access,
                                addr + bytes_per_rep - rc, ctxt);
@@ -716,7 +717,8 @@ static int rep_outs(enum x86_segment seg, unsigned long 
offset,
         if ( rc != X86EMUL_OKAY )
             return rc;
 
-        if ( (rc = __copy_from_user(&data, (void *)addr, bytes_per_rep)) != 0 )
+        if ( (rc = __copy_from_guest_pv(&data, (void __user *)addr,
+                                        bytes_per_rep)) != 0 )
         {
             x86_emul_pagefault(0, addr + bytes_per_rep - rc, ctxt);
             return X86EMUL_EXCEPTION;
@@ -1253,12 +1255,12 @@ static int insn_fetch(enum x86_segment seg,
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( (rc = __copy_from_user(p_data, (void *)addr, bytes)) != 0 )
+    if ( (rc = __copy_from_guest_pv(p_data, (void __user *)addr, bytes)) != 0 )
     {
         /*
          * TODO: This should report PFEC_insn_fetch when goc->insn_fetch &&
          * cpu_has_nx, but we'd then need a "fetch" variant of
-         * __copy_from_user() respecting NX, SMEP, and protection keys.
+         * __copy_from_guest_pv() respecting NX, SMEP, and protection keys.
          */
         x86_emul_pagefault(0, addr + bytes - rc, ctxt);
         return X86EMUL_EXCEPTION;
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index 5d74d11cba..cb86143832 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -41,7 +41,7 @@ l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t 
*gl1mfn)
         return NULL;
 
     /* Find this l1e and its enclosing l1mfn in the linear map. */
-    if ( __copy_from_user(&l2e,
+    if ( copy_from_unsafe(&l2e,
                           &__linear_l2_table[l2_linear_offset(linear)],
                           sizeof(l2_pgentry_t)) )
         return NULL;
diff --git a/xen/arch/x86/pv/mm.h b/xen/arch/x86/pv/mm.h
index a11cc88e5a..7e2259c488 100644
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -22,7 +22,7 @@ static inline l1_pgentry_t guest_get_eff_kern_l1e(unsigned 
long linear)
         toggle_guest_pt(curr);
 
     if ( unlikely(!__addr_ok(linear)) ||
-         __copy_from_user(&l1e,
+         copy_from_unsafe(&l1e,
                           &__linear_l1_table[l1_linear_offset(linear)],
                           sizeof(l1_pgentry_t)) )
         l1e = l1e_empty();
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 8d0007ede5..5d2dd9fae0 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -43,7 +43,7 @@ static int ptwr_emulated_read(enum x86_segment seg, unsigned 
long offset,
     unsigned long addr = offset;
 
     if ( !__addr_ok(addr) ||
-         (rc = __copy_from_user(p_data, (void *)addr, bytes)) )
+         (rc = __copy_from_guest_pv(p_data, (void *)addr, bytes)) )
     {
         x86_emul_pagefault(0, addr + bytes - rc, ctxt);  /* Read fault. */
         return X86EMUL_EXCEPTION;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c65835844d..3c2e563cce 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1103,7 +1103,7 @@ void do_invalid_op(struct cpu_user_regs *regs)
     }
 
     if ( !is_active_kernel_text(regs->rip) ||
-         __copy_from_user(bug_insn, eip, sizeof(bug_insn)) ||
+         copy_from_unsafe(bug_insn, eip, sizeof(bug_insn)) ||
          memcmp(bug_insn, "\xf\xb", sizeof(bug_insn)) )
         goto die;
 
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
index 1d1ef075c9..84f83d2a46 100644
--- a/xen/arch/x86/usercopy.c
+++ b/xen/arch/x86/usercopy.c
@@ -110,7 +110,7 @@ unsigned __copy_from_user_ll(void *to, const void __user 
*from, unsigned n)
 unsigned copy_to_user(void __user *to, const void *from, unsigned n)
 {
     if ( access_ok(to, n) )
-        n = __copy_to_user(to, from, n);
+        n = __copy_to_guest_pv(to, from, n);
     return n;
 }
 
@@ -168,7 +168,7 @@ unsigned clear_user(void __user *to, unsigned n)
 unsigned copy_from_user(void *to, const void __user *from, unsigned n)
 {
     if ( access_ok(from, n) )
-        n = __copy_from_user(to, from, n);
+        n = __copy_from_guest_pv(to, from, n);
     else
         memset(to, 0, n);
     return n;
diff --git a/xen/include/asm-x86/guest_access.h 
b/xen/include/asm-x86/guest_access.h
index 8f0e2c71f1..45e28f3220 100644
--- a/xen/include/asm-x86/guest_access.h
+++ b/xen/include/asm-x86/guest_access.h
@@ -28,11 +28,11 @@
 #define __raw_copy_to_guest(dst, src, len)      \
     (is_hvm_vcpu(current) ?                     \
      copy_to_user_hvm((dst), (src), (len)) :    \
-     __copy_to_user((dst), (src), (len)))
+     __copy_to_guest_pv(dst, src, len))
 #define __raw_copy_from_guest(dst, src, len)    \
     (is_hvm_vcpu(current) ?                     \
      copy_from_user_hvm((dst), (src), (len)) :  \
-     __copy_from_user((dst), (src), (len)))
+     __copy_from_guest_pv(dst, src, len))
 #define __raw_clear_guest(dst,  len)            \
     (is_hvm_vcpu(current) ?                     \
      clear_user_hvm((dst), (len)) :             \
diff --git a/xen/include/asm-x86/uaccess.h b/xen/include/asm-x86/uaccess.h
index 570b6b8080..db0802c4be 100644
--- a/xen/include/asm-x86/uaccess.h
+++ b/xen/include/asm-x86/uaccess.h
@@ -196,21 +196,20 @@ do {                                                      
                 \
 #define get_guest_size get_unsafe_size
 
 /**
- * __copy_to_user: - Copy a block of data into user space, with less checking
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
+ * __copy_to_guest_pv: - Copy a block of data into guest space, with less
+ *                       checking
+ * @to:   Destination address, in guest space.
+ * @from: Source address, in hypervisor space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.  Caller must check
+ * Copy data from hypervisor space to guest space.  Caller must check
  * the specified block with access_ok() before calling this function.
  *
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
  */
 static always_inline unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+__copy_to_guest_pv(void __user *to, const void *from, unsigned long n)
 {
     if (__builtin_constant_p(n)) {
         unsigned long ret;
@@ -232,16 +231,16 @@ __copy_to_user(void __user *to, const void *from, 
unsigned long n)
     }
     return __copy_to_user_ll(to, from, n);
 }
+#define copy_to_unsafe __copy_to_guest_pv
 
 /**
- * __copy_from_user: - Copy a block of data from user space, with less checking
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
+ * __copy_from_guest_pv: - Copy a block of data from guest space, with less
+ *                         checking
+ * @to:   Destination address, in hypervisor space.
+ * @from: Source address, in guest space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.  Caller must check
+ * Copy data from guest space to hypervisor space.  Caller must check
  * the specified block with access_ok() before calling this function.
  *
  * Returns number of bytes that could not be copied.
@@ -251,7 +250,7 @@ __copy_to_user(void __user *to, const void *from, unsigned 
long n)
  * data to the requested size using zero bytes.
  */
 static always_inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+__copy_from_guest_pv(void *to, const void __user *from, unsigned long n)
 {
     if (__builtin_constant_p(n)) {
         unsigned long ret;
@@ -273,6 +272,7 @@ __copy_from_user(void *to, const void __user *from, 
unsigned long n)
     }
     return __copy_from_user_ll(to, from, n);
 }
+#define copy_from_unsafe __copy_from_guest_pv
 
 /*
  * The exception table consists of pairs of addresses: the first is the
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.