[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/libxc: Move TMEM_AUTH to XEN_SYSCTL_TMEM_OP_SET_AUTH



commit 1144c8f12bcff65aa11e5d822ea6a6389862b153
Author:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
AuthorDate: Tue Apr 4 12:34:14 2017 -0400
Commit:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CommitDate: Wed Apr 5 09:39:20 2017 -0400

    xen/libxc: Move TMEM_AUTH to XEN_SYSCTL_TMEM_OP_SET_AUTH
    
    which surprisingly (or maybe not) looks like
    XEN_SYSCTL_TMEM_OP_SET_POOLS.
    
    This hypercall came about, as explained in docs/misc/tmem-internals.html:
    
    When tmem was first proposed to the linux kernel mailing list
    (LKML), there was concern expressed about security of shared ephemeral
    pools.  The initial tmem implementation only
    required a client to provide a 128-bit UUID to identify a shared pool, and 
the
    linux-side tmem implementation obtained this UUID from the superblock of the
    shared filesystem (in ocfs2).  It was
    pointed out on LKML that the UUID was essentially a security key and any
    malicious domain that guessed it would have access to any data from the 
shared
    filesystem that found its way into tmem.
    ..
    As a result, a Xen boot option -- tmem_shared_auth; -- was
    added.  The option defaults to disabled,
    but when it is enabled, management tools must explicitly authenticate (or 
may
    explicitly deny) shared pool access to any client.
    On Xen, this is done with the xm tmem-shared-auth command.
    "
    
    However the implementation has some rather large holes:
    
    a) The hypercall was accessed from any guest.
    
    b) If the ->domain id value is 0xFFFF then one can toggle the
       tmem_global.shared_auth knob on/off. That with a)
       made it pretty bad.
    
    c) If one toggles the tmem_global.shared_auth off, then the
      'tmem_shared_auth=1' bootup parameter is ignored and
       one can join any shared pool (if UUID is known)!
    
    d) If the 'tmem_shared_auth=1' and tmem_global.shared_auth is
      set to 1, then one can only join an shared pool if the
      UUID has been set by 'xl tmem-shared-auth'. Otherwise
      the joining of a pool fails and a non-shared pool is
      created (without errors to guest). Not exactly sure if
      the shared pool creation at that point should error out
      or not.
    
    e) If a guest is migrated, the policy values (which UUID
      can be shared, whether tmem_global.shared_auth is set, etc)
      are completely ignored.
    
    This patch only fixes a) and only allows the hypercall to
    be called by the control domain. Subsequent patches will
    fix the remaining issues.
    
    We also have to call client_create as the guest at this
    point may not have done any tmem hypercalls - and hence
    the '->tmem' from 'struct domain' is still NULL. Us calling
    client_create fixes this.
    
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> [libxc changes]
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> [hypervisor changes]
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h  |  2 +-
 tools/libxc/xc_tmem.c          | 52 +++++++++++++-----------------------------
 xen/common/tmem.c              | 10 +++-----
 xen/common/tmem_control.c      | 50 ++++++++++++++++++++++++++++++++++++++++
 xen/include/public/sysctl.h    | 12 ++++++----
 xen/include/public/tmem.h      |  9 ++++----
 xen/include/xen/tmem_control.h |  2 ++
 xen/include/xen/tmem_xen.h     |  1 -
 8 files changed, 83 insertions(+), 55 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 2d97d36..e2b4cc1 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1901,7 +1901,7 @@ int xc_tmem_control_oid(xc_interface *xch, int32_t 
pool_id, uint32_t subop,
 int xc_tmem_control(xc_interface *xch,
                     int32_t pool_id, uint32_t subop, uint32_t cli_id,
                     uint32_t len, uint32_t arg, void *buf);
-int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int arg1);
+int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int enable);
 int xc_tmem_save(xc_interface *xch, int dom, int live, int fd, int 
field_marker);
 int xc_tmem_save_extra(xc_interface *xch, int dom, int fd, int field_marker);
 void xc_tmem_save_done(xc_interface *xch, int dom);
diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 181de48..5f5e18f 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -22,30 +22,6 @@
 #include <assert.h>
 #include <xen/tmem.h>
 
-static int do_tmem_op(xc_interface *xch, tmem_op_t *op)
-{
-    int ret;
-    DECLARE_HYPERCALL_BOUNCE(op, sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, op) )
-    {
-        PERROR("Could not bounce buffer for tmem op hypercall");
-        return -EFAULT;
-    }
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_tmem_op,
-                   HYPERCALL_BUFFER_AS_ARG(op));
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("tmem operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-    xc_hypercall_bounce_post(xch, op);
-
-    return ret;
-}
-
 int xc_tmem_control(xc_interface *xch,
                     int32_t pool_id,
                     uint32_t cmd,
@@ -69,7 +45,8 @@ int xc_tmem_control(xc_interface *xch,
     sysctl.u.tmem_op.oid.oid[1] = 0;
     sysctl.u.tmem_op.oid.oid[2] = 0;
 
-    if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+    if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO ||
+         cmd == XEN_SYSCTL_TMEM_OP_SET_AUTH )
         HYPERCALL_BOUNCE_SET_DIR(buf, XC_HYPERCALL_BUFFER_BOUNCE_IN);
     if ( len )
     {
@@ -176,22 +153,25 @@ static int xc_tmem_uuid_parse(char *uuid_str, uint64_t 
*uuid_lo, uint64_t *uuid_
 int xc_tmem_auth(xc_interface *xch,
                  int cli_id,
                  char *uuid_str,
-                 int arg1)
+                 int enable)
 {
-    tmem_op_t op;
-
-    op.cmd = TMEM_AUTH;
-    op.pool_id = 0;
-    op.u.creat.arg1 = cli_id;
-    op.u.creat.flags = arg1;
-    if ( xc_tmem_uuid_parse(uuid_str, &op.u.creat.uuid[0],
-                                      &op.u.creat.uuid[1]) < 0 )
+    xen_tmem_pool_info_t pool = {
+        .flags.u.auth = enable,
+        .id = 0,
+        .n_pages = 0,
+        .uuid[0] = 0,
+        .uuid[1] = 0,
+    };
+    if ( xc_tmem_uuid_parse(uuid_str, &pool.uuid[0],
+                                      &pool.uuid[1]) < 0 )
     {
         PERROR("Can't parse uuid, use xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx");
         return -1;
     }
-
-    return do_tmem_op(xch, &op);
+    return xc_tmem_control(xch, 0 /* pool_id */,
+                           XEN_SYSCTL_TMEM_OP_SET_AUTH,
+                           cli_id, sizeof(pool),
+                           0 /* arg */, &pool);
 }
 
 /* Save/restore/live migrate */
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index ee43f13..158d660 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1604,8 +1604,8 @@ fail:
 
 /************ TMEM CONTROL OPERATIONS ************************************/
 
-static int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
-                                  uint64_t uuid_hi, bool_t auth)
+int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
+                           uint64_t uuid_hi, bool auth)
 {
     struct client *client;
     int i, free = -1;
@@ -1912,12 +1912,8 @@ long do_tmem_op(tmem_cli_op_t uops)
     {
     case TMEM_CONTROL:
     case TMEM_RESTORE_NEW:
-        rc = -EOPNOTSUPP;
-        break;
-
     case TMEM_AUTH:
-        rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0],
-                         op.u.creat.uuid[1],op.u.creat.flags);
+        rc = -EOPNOTSUPP;
         break;
 
     default:
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index 3e99257..2d980e3 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -450,6 +450,53 @@ static int tmemc_set_pools(int cli_id,
     return rc ? : i;
 }
 
+static int tmemc_auth_pools(int cli_id,
+                            XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools,
+                            uint32_t len)
+{
+    unsigned int i;
+    int rc = 0;
+    unsigned int nr = len / sizeof(xen_tmem_pool_info_t);
+    struct client *client = tmem_client_from_cli_id(cli_id);
+
+    if ( len % sizeof(xen_tmem_pool_info_t) )
+        return -EINVAL;
+
+    if ( nr > MAX_POOLS_PER_DOMAIN )
+        return -E2BIG;
+
+    if ( !guest_handle_okay(pools, nr) )
+        return -EINVAL;
+
+    if ( !client )
+    {
+        client = client_create(cli_id);
+        if ( !client )
+            return -ENOMEM;
+    }
+
+    for ( i = 0; i < nr; i++ )
+    {
+        xen_tmem_pool_info_t pool;
+
+        if ( __copy_from_guest_offset(&pool, pools, i, 1 ) )
+            return -EFAULT;
+
+        if ( pool.n_pages )
+            return -EINVAL;
+
+        rc = tmemc_shared_pool_auth(cli_id, pool.uuid[0], pool.uuid[1],
+                                    pool.flags.u.auth);
+
+        if ( rc < 0 )
+            break;
+
+    }
+
+    /* And how many we have processed. */
+    return rc ? : i;
+}
+
 int tmem_control(struct xen_sysctl_tmem_op *op)
 {
     int ret;
@@ -489,6 +536,9 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     case XEN_SYSCTL_TMEM_OP_SET_POOLS: /* TMEM_RESTORE_NEW */
         ret = tmemc_set_pools(op->cli_id, op->u.pool, op->len);
         break;
+    case XEN_SYSCTL_TMEM_OP_SET_AUTH: /* TMEM_AUTH */
+        ret = tmemc_auth_pools(op->cli_id, op->u.pool, op->len);
+        break;
     default:
         ret = do_tmem_control(op);
         break;
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index c03d027..ee76a66 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -772,6 +772,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
 #define XEN_SYSCTL_TMEM_OP_SET_POOLS              9
 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
+#define XEN_SYSCTL_TMEM_OP_SET_AUTH               11
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE     19
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV      20
 #define XEN_SYSCTL_TMEM_OP_SAVE_END               21
@@ -813,12 +814,12 @@ typedef struct xen_tmem_client xen_tmem_client_t;
 DEFINE_XEN_GUEST_HANDLE(xen_tmem_client_t);
 
 /*
- * XEN_SYSCTL_TMEM_OP_[GET|SET]_POOLS uses the 'pool' array in
- * xen_sysctl_tmem_op with this structure.
+ * XEN_SYSCTL_TMEM_OP_[GET|SET]_POOLS or XEN_SYSCTL_TMEM_OP_SET_AUTH
+ * uses the 'pool' array in * xen_sysctl_tmem_op with this structure.
  * The XEN_SYSCTL_TMEM_OP_GET_POOLS hypercall will
  * return the number of entries in 'pool' or a negative value
  * if an error was encountered.
- * The XEN_SYSCTL_TMEM_OP_SET_POOLS will return the number of
+ * The XEN_SYSCTL_TMEM_OP_SET_[AUTH|POOLS] will return the number of
  * entries in 'pool' processed or a negative value if an error
  * was encountered.
  */
@@ -828,14 +829,15 @@ struct xen_tmem_pool_info {
         struct {
             uint32_t persist:1,    /* See TMEM_POOL_PERSIST. */
                      shared:1,     /* See TMEM_POOL_SHARED. */
-                     rsv:2,
+                     auth:1,       /* See TMEM_POOL_AUTH. */
+                     rsv1:1,
                      pagebits:8,   /* TMEM_POOL_PAGESIZE_[SHIFT,MASK]. */
                      rsv2:12,
                      version:8;    /* TMEM_POOL_VERSION_[SHIFT,MASK]. */
         } u;
     } flags;
     uint32_t id;                  /* Less than tmem_client.maxpools. */
-    uint64_t n_pages;             /* Zero on XEN_SYSCTL_TMEM_OP_SET_POOLS. */
+    uint64_t n_pages;             /* Zero on 
XEN_SYSCTL_TMEM_OP_SET_[AUTH|POOLS]. */
     uint64_aligned_t uuid[2];
 };
 typedef struct xen_tmem_pool_info xen_tmem_pool_info_t;
diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h
index b9f3537..aa0aafa 100644
--- a/xen/include/public/tmem.h
+++ b/xen/include/public/tmem.h
@@ -51,10 +51,9 @@
 #define TMEM_XCHG                 10
 #endif
 
-/* Privileged commands to HYPERVISOR_tmem_op() */
-#define TMEM_AUTH                 101
-#define TMEM_RESTORE_NEW          102 /* Now called via XEN_SYSCTL_tmem_op as
-                                         XEN_SYSCTL_TMEM_OP_SET_POOL. */
+/* Privileged commands now called via XEN_SYSCTL_tmem_op. */
+#define TMEM_AUTH                 101 /* as XEN_SYSCTL_TMEM_OP_SET_AUTH. */
+#define TMEM_RESTORE_NEW          102 /* as XEN_SYSCTL_TMEM_OP_SET_POOL. */
 
 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
 #define TMEM_POOL_PERSIST          1
@@ -93,7 +92,7 @@ struct tmem_op {
             uint64_t uuid[2];
             uint32_t flags;
             uint32_t arg1;
-        } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH */
+        } creat; /* for cmd == TMEM_NEW_POOL. */
         struct {
 #if __XEN_INTERFACE_VERSION__ < 0x00040600
             uint64_t oid[3];
diff --git a/xen/include/xen/tmem_control.h b/xen/include/xen/tmem_control.h
index 91c185e..ad04cf7 100644
--- a/xen/include/xen/tmem_control.h
+++ b/xen/include/xen/tmem_control.h
@@ -22,6 +22,8 @@ struct client *client_create(domid_t cli_id);
 int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags,
                      uint64_t uuid_lo, uint64_t uuid_hi);
 
+int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
+                           uint64_t uuid_hi, bool auth);
 #endif /* CONFIG_TMEM */
 
 #endif /* __XEN_TMEM_CONTROL_H__ */
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index b6bd61b..70cc108 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -198,7 +198,6 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t 
*op, tmem_cli_op_t uops)
         switch ( cop.cmd )
         {
         case TMEM_NEW_POOL:   u = XLAT_tmem_op_u_creat; break;
-        case TMEM_AUTH:       u = XLAT_tmem_op_u_creat; break;
         default:              u = XLAT_tmem_op_u_gen ;  break;
         }
         XLAT_tmem_op(op, &cop);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.