[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1] Tmem fixes for v4.9.



Hey!

Please see the attached patches. I had hoped that I would have the
initial migrationv2 patches ready but other things preempted me <sigh>.

This patchset moves two sub-ops (TMEM_RESTORE_NEW, TMEM_AUTH)
from the guest accessible one to the system admin controlled.

It made no sense to have them there in the guest one, and worst
it has some security implications (a hostile guest could join
another shared pool without any authorization) - but since XSA-7
the tmem code is not yet out of "experimental" which means we can
treat these fixes without worrying about security aspects.

Pls see the diff stat, shortlog and also the full diff for easier
review.

 docs/misc/tmem-internals.html       |  6 +--
 docs/misc/xen-command-line.markdown |  3 --
 tools/libxc/include/xenctrl.h       |  2 +-
 tools/libxc/xc_tmem.c               | 78 ++++++++++++-----------------
 tools/xl/xl_cmdtable.c              |  2 +-
 xen/common/tmem.c                   | 24 ++++-----
 xen/common/tmem_control.c           | 97 +++++++++++++++++++++++++++++++++++++
 xen/common/tmem_xen.c               |  3 --
 xen/include/public/sysctl.h         |  5 +-
 xen/include/public/tmem.h           |  8 +--
 xen/include/xen/tmem_control.h      |  6 +++
 xen/include/xen/tmem_xen.h          |  9 ----
 12 files changed, 155 insertions(+), 88 deletions(-)

Konrad Rzeszutek Wilk (5):
      xen/libcx/tmem: Replace TMEM_RESTORE_NEW with XEN_SYSCTL_TMEM_OP_SET_POOLS
      xen/libxc: Move TMEM_AUTH to XEN_SYSCTL_TMEM_OP_SET_AUTH
      tmem: By default to join an shared pool it must be authorized.
      tmem: Fix tmem-shared-auth 'auth' values
      tmem: Parse UUIDs correctly.

diff --git a/docs/misc/tmem-internals.html b/docs/misc/tmem-internals.html
index 2d8635d..9b7e70e 100644
--- a/docs/misc/tmem-internals.html
+++ b/docs/misc/tmem-internals.html
@@ -715,11 +715,9 @@ Ocfs2 has only very limited security; it is assumed that 
anyone who can
 access the filesystem bits on the shared disk can mount the filesystem and use
 it.  But in a virtualized data center,
 higher isolation requirements may apply.
-As a result, a Xen boot option -- &quot;tmem_shared_auth&quot; -- was
-added.  The option defaults to disabled,
-but when it is enabled, management tools must explicitly authenticate (or may
+As a result, management tools must explicitly authenticate (or may
 explicitly deny) shared pool access to any client.
-On Xen, this is done with the &quot;xm
+On Xen, this is done with the &quot;xl
 tmem-shared-auth&quot; command.
 <P>
 <b><i>32-bit implementation</i>.</b>
diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index bad20db..9c20dad 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1543,9 +1543,6 @@ pages) must also be specified via the tbuf\_size 
parameter.
 ### tmem\_compress
 > `= <boolean>`
 
-### tmem\_shared\_auth
-> `= <boolean>`
-
 ### tsc
 > `= unstable | skewed | stable:socket`
 
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 2d97d36..e2b4cc1 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1901,7 +1901,7 @@ int xc_tmem_control_oid(xc_interface *xch, int32_t 
pool_id, uint32_t subop,
 int xc_tmem_control(xc_interface *xch,
                     int32_t pool_id, uint32_t subop, uint32_t cli_id,
                     uint32_t len, uint32_t arg, void *buf);
-int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int arg1);
+int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int enable);
 int xc_tmem_save(xc_interface *xch, int dom, int live, int fd, int 
field_marker);
 int xc_tmem_save_extra(xc_interface *xch, int dom, int fd, int field_marker);
 void xc_tmem_save_done(xc_interface *xch, int dom);
diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 51d11ef..9bf5cc3 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -22,30 +22,6 @@
 #include <assert.h>
 #include <xen/tmem.h>
 
-static int do_tmem_op(xc_interface *xch, tmem_op_t *op)
-{
-    int ret;
-    DECLARE_HYPERCALL_BOUNCE(op, sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, op) )
-    {
-        PERROR("Could not bounce buffer for tmem op hypercall");
-        return -EFAULT;
-    }
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_tmem_op,
-                   HYPERCALL_BUFFER_AS_ARG(op));
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("tmem operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-    xc_hypercall_bounce_post(xch, op);
-
-    return ret;
-}
-
 int xc_tmem_control(xc_interface *xch,
                     int32_t pool_id,
                     uint32_t cmd,
@@ -69,7 +45,8 @@ int xc_tmem_control(xc_interface *xch,
     sysctl.u.tmem_op.oid.oid[1] = 0;
     sysctl.u.tmem_op.oid.oid[2] = 0;
 
-    if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+    if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO ||
+         cmd == XEN_SYSCTL_TMEM_OP_SET_AUTH )
         HYPERCALL_BOUNCE_SET_DIR(buf, XC_HYPERCALL_BUFFER_BOUNCE_IN);
     if ( len )
     {
@@ -161,9 +138,9 @@ static int xc_tmem_uuid_parse(char *uuid_str, uint64_t 
*uuid_lo, uint64_t *uuid_
         else if ( *p >= '0' && *p <= '9' )
             digit = *p - '0';
         else if ( *p >= 'A' && *p <= 'F' )
-            digit = *p - 'A';
+            digit = *p - 'A' + 10;
         else if ( *p >= 'a' && *p <= 'f' )
-            digit = *p - 'a';
+            digit = *p - 'a' + 10;
         else
             return -1;
         *x = (*x << 4) | digit;
@@ -176,22 +153,25 @@ static int xc_tmem_uuid_parse(char *uuid_str, uint64_t 
*uuid_lo, uint64_t *uuid_
 int xc_tmem_auth(xc_interface *xch,
                  int cli_id,
                  char *uuid_str,
-                 int arg1)
+                 int enable)
 {
-    tmem_op_t op;
-
-    op.cmd = TMEM_AUTH;
-    op.pool_id = 0;
-    op.u.creat.arg1 = cli_id;
-    op.u.creat.flags = arg1;
-    if ( xc_tmem_uuid_parse(uuid_str, &op.u.creat.uuid[0],
-                                      &op.u.creat.uuid[1]) < 0 )
+    xen_tmem_pool_info_t pool = {
+        .flags.u.auth = enable,
+        .id = 0,
+        .n_pages = 0,
+        .uuid[0] = 0,
+        .uuid[1] = 0,
+    };
+    if ( xc_tmem_uuid_parse(uuid_str, &pool.uuid[0],
+                                      &pool.uuid[1]) < 0 )
     {
         PERROR("Can't parse uuid, use xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx");
         return -1;
     }
-
-    return do_tmem_op(xch, &op);
+    return xc_tmem_control(xch, 0 /* pool_id */,
+                           XEN_SYSCTL_TMEM_OP_SET_AUTH,
+                           cli_id, sizeof(pool),
+                           0 /* arg */, &pool);
 }
 
 /* Save/restore/live migrate */
@@ -385,16 +365,18 @@ static int xc_tmem_restore_new_pool(
                     uint64_t uuid_lo,
                     uint64_t uuid_hi)
 {
-    tmem_op_t op;
-
-    op.cmd = TMEM_RESTORE_NEW;
-    op.pool_id = pool_id;
-    op.u.creat.arg1 = cli_id;
-    op.u.creat.flags = flags;
-    op.u.creat.uuid[0] = uuid_lo;
-    op.u.creat.uuid[1] = uuid_hi;
-
-    return do_tmem_op(xch, &op);
+    xen_tmem_pool_info_t pool = {
+        .flags.raw = flags,
+        .id = pool_id,
+        .n_pages = 0,
+        .uuid[0] = uuid_lo,
+        .uuid[1] = uuid_hi,
+    };
+
+    return xc_tmem_control(xch, pool_id,
+                           XEN_SYSCTL_TMEM_OP_SET_POOLS,
+                           cli_id, sizeof(pool),
+                           0 /* arg */, &pool);
 }
 
 int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
diff --git a/tools/xl/xl_cmdtable.c b/tools/xl/xl_cmdtable.c
index 7d97811..30eb93c 100644
--- a/tools/xl/xl_cmdtable.c
+++ b/tools/xl/xl_cmdtable.c
@@ -416,7 +416,7 @@ struct cmd_spec cmd_table[] = {
       "  -a                             Authenticate for all tmem pools\n"
       "  -u UUID                        Specify uuid\n"
       "                                 
(abcdef01-2345-6789-1234-567890abcdef)\n"
-      "  -A AUTH                        0=auth,1=deauth",
+      "  -A AUTH                        0=deauth,1=auth",
     },
     { "tmem-freeable",
       &main_tmem_freeable, 0, 0,
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 6d5de5b..ff74292 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -804,7 +804,7 @@ static void pool_flush(struct tmem_pool *pool, domid_t 
cli_id)
 
 /************ CLIENT MANIPULATION OPERATIONS **************************/
 
-static struct client *client_create(domid_t cli_id)
+struct client *client_create(domid_t cli_id)
 {
     struct client *client = xzalloc(struct client);
     int i, shift;
@@ -846,7 +846,6 @@ static struct client *client_create(domid_t cli_id)
     client->info.version = TMEM_SPEC_VERSION;
     client->info.maxpools = MAX_POOLS_PER_DOMAIN;
     client->info.flags.u.compress = tmem_compression_enabled();
-    client->shared_auth_required = tmem_shared_auth();
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
         client->shared_auth_uuid[i][0] =
             client->shared_auth_uuid[i][1] = -1L;
@@ -1435,9 +1434,9 @@ static int do_tmem_destroy_pool(uint32_t pool_id)
     return 1;
 }
 
-static int do_tmem_new_pool(domid_t this_cli_id,
-                                     uint32_t d_poolid, uint32_t flags,
-                                     uint64_t uuid_lo, uint64_t uuid_hi)
+int do_tmem_new_pool(domid_t this_cli_id,
+                     uint32_t d_poolid, uint32_t flags,
+                     uint64_t uuid_lo, uint64_t uuid_hi)
 {
     struct client *client;
     domid_t cli_id;
@@ -1530,7 +1529,8 @@ static int do_tmem_new_pool(domid_t this_cli_id,
             pool->shared = 0;
             goto out;
         }
-        if ( client->shared_auth_required && !tmem_global.shared_auth )
+        /* By default only join domains that are authorized by admin. */
+        if ( !tmem_global.shared_auth )
         {
             for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
                 if ( (client->shared_auth_uuid[i][0] == uuid_lo) &&
@@ -1604,8 +1604,8 @@ fail:
 
 /************ TMEM CONTROL OPERATIONS ************************************/
 
-static int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
-                                  uint64_t uuid_hi, bool_t auth)
+int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
+                           uint64_t uuid_hi, bool auth)
 {
     struct client *client;
     int i, free = -1;
@@ -1908,7 +1908,8 @@ long do_tmem_op(tmem_cli_op_t uops)
     /* Acquire write lock for all commands at first. */
     write_lock(&tmem_rwlock);
 
-    if ( op.cmd == TMEM_CONTROL )
+    if ( op.cmd == TMEM_CONTROL || op.cmd == TMEM_RESTORE_NEW ||
+         op.cmd == TMEM_AUTH )
     {
         rc = -EOPNOTSUPP;
     }
@@ -1917,11 +1918,6 @@ long do_tmem_op(tmem_cli_op_t uops)
         rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0],
                          op.u.creat.uuid[1],op.u.creat.flags);
     }
-    else if ( op.cmd == TMEM_RESTORE_NEW )
-    {
-        rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags,
-                         op.u.creat.uuid[0], op.u.creat.uuid[1]);
-    }
     else {
     /*
         * For other commands, create per-client tmem structure dynamically on
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index ddd9cfe..cae3a95 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -402,6 +402,97 @@ static int tmemc_get_pool(int cli_id,
     return rc ? : idx;
 }
 
+static int tmemc_set_pools(int cli_id,
+                           XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools,
+                           uint32_t len)
+{
+    unsigned int i, idx;
+    int rc = 0;
+    unsigned int nr = len / sizeof(xen_tmem_pool_info_t);
+    struct client *client = tmem_client_from_cli_id(cli_id);
+
+    if ( len % sizeof(xen_tmem_pool_info_t) )
+        return -EINVAL;
+
+    if ( nr > MAX_POOLS_PER_DOMAIN )
+        return -E2BIG;
+
+    if ( !guest_handle_okay(pools, nr) )
+        return -EINVAL;
+
+    if ( !client )
+    {
+        client = client_create(cli_id);
+        if ( !client )
+            return -ENOMEM;
+    }
+    for ( idx = 0, i = 0; i < nr; i++ )
+    {
+        xen_tmem_pool_info_t pool;
+
+        if ( __copy_from_guest_offset(&pool, pools, i, 1 ) )
+            return -EFAULT;
+
+        rc = do_tmem_new_pool(cli_id, pool.id, pool.flags.raw,
+                              pool.uuid[0], pool.uuid[1]);
+        if ( rc < 0 )
+            break;
+
+        pool.id = rc;
+        if ( __copy_to_guest_offset(pools, idx, &pool, 1) )
+            return -EFAULT;
+        idx++;
+    }
+
+    /* And how many we have processed. */
+    return rc ? : idx;
+}
+
+static int tmemc_auth_pools(int cli_id,
+                            XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools,
+                            uint32_t len)
+{
+    unsigned int i, idx;
+    int rc = 0;
+    unsigned int nr = len / sizeof(xen_tmem_pool_info_t);
+    struct client *client = tmem_client_from_cli_id(cli_id);
+
+    if ( len % sizeof(xen_tmem_pool_info_t) )
+        return -EINVAL;
+
+    if ( nr > MAX_POOLS_PER_DOMAIN )
+        return -E2BIG;
+
+    if ( !guest_handle_okay(pools, nr) )
+        return -EINVAL;
+
+    if ( !client )
+    {
+        client = client_create(cli_id);
+        if ( !client )
+            return -ENOMEM;
+    }
+
+    for ( idx = 0, i = 0; i < nr; i++ )
+    {
+        xen_tmem_pool_info_t pool;
+
+        if ( __copy_from_guest_offset(&pool, pools, i, 1 ) )
+            return -EFAULT;
+
+        rc = tmemc_shared_pool_auth(cli_id, pool.uuid[0], pool.uuid[1],
+                                    pool.flags.u.auth);
+
+        if ( rc < 0 )
+            break;
+
+        idx++;
+    }
+
+    /* And how many we have processed. */
+    return rc ? : idx;
+}
+
 int tmem_control(struct xen_sysctl_tmem_op *op)
 {
     int ret;
@@ -438,6 +529,12 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     case XEN_SYSCTL_TMEM_OP_GET_POOLS:
         ret = tmemc_get_pool(op->cli_id, op->u.pool, op->len);
         break;
+    case XEN_SYSCTL_TMEM_OP_SET_POOLS: /* TMEM_RESTORE_NEW */
+        ret = tmemc_set_pools(op->cli_id, op->u.pool, op->len);
+        break;
+    case XEN_SYSCTL_TMEM_OP_SET_AUTH: /* TMEM_AUTH */
+        ret = tmemc_auth_pools(op->cli_id, op->u.pool, op->len);
+        break;
     default:
         ret = do_tmem_control(op);
         break;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 7d60b71..06ce3ef 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -20,9 +20,6 @@ boolean_param("tmem", opt_tmem);
 bool_t __read_mostly opt_tmem_compress = 0;
 boolean_param("tmem_compress", opt_tmem_compress);
 
-bool_t __read_mostly opt_tmem_shared_auth = 0;
-boolean_param("tmem_shared_auth", opt_tmem_shared_auth);
-
 atomic_t freeable_page_count = ATOMIC_INIT(0);
 
 /* these are a concurrency bottleneck, could be percpu and dynamically
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index 00f5e77..74c24cc 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -770,7 +770,9 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        6
 #define XEN_SYSCTL_TMEM_OP_GET_POOLS              7
 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
+#define XEN_SYSCTL_TMEM_OP_SET_POOLS              9
 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
+#define XEN_SYSCTL_TMEM_OP_SET_AUTH               11
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE     19
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV      20
 #define XEN_SYSCTL_TMEM_OP_SAVE_END               21
@@ -823,7 +825,8 @@ struct xen_tmem_pool_info {
         struct {
             uint32_t persist:1,    /* See TMEM_POOL_PERSIST. */
                      shared:1,     /* See TMEM_POOL_SHARED. */
-                     rsv:2,
+                     auth:1,       /* See TMEM_POOL_AUTH. */
+                     rsv1:1,
                      pagebits:8,   /* TMEM_POOL_PAGESIZE_[SHIFT,MASK]. */
                      rsv2:12,
                      version:8;    /* TMEM_POOL_VERSION_[SHIFT,MASK]. */
diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h
index 2d805fb..aa0aafa 100644
--- a/xen/include/public/tmem.h
+++ b/xen/include/public/tmem.h
@@ -51,9 +51,9 @@
 #define TMEM_XCHG                 10
 #endif
 
-/* Privileged commands to HYPERVISOR_tmem_op() */
-#define TMEM_AUTH                 101
-#define TMEM_RESTORE_NEW          102
+/* Privileged commands now called via XEN_SYSCTL_tmem_op. */
+#define TMEM_AUTH                 101 /* as XEN_SYSCTL_TMEM_OP_SET_AUTH. */
+#define TMEM_RESTORE_NEW          102 /* as XEN_SYSCTL_TMEM_OP_SET_POOL. */
 
 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
 #define TMEM_POOL_PERSIST          1
@@ -92,7 +92,7 @@ struct tmem_op {
             uint64_t uuid[2];
             uint32_t flags;
             uint32_t arg1;
-        } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
+        } creat; /* for cmd == TMEM_NEW_POOL. */
         struct {
 #if __XEN_INTERFACE_VERSION__ < 0x00040600
             uint64_t oid[3];
diff --git a/xen/include/xen/tmem_control.h b/xen/include/xen/tmem_control.h
index 44bc07f..ad04cf7 100644
--- a/xen/include/xen/tmem_control.h
+++ b/xen/include/xen/tmem_control.h
@@ -18,6 +18,12 @@ extern rwlock_t tmem_rwlock;
 int tmem_evict(void);
 int do_tmem_control(struct xen_sysctl_tmem_op *op);
 
+struct client *client_create(domid_t cli_id);
+int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags,
+                     uint64_t uuid_lo, uint64_t uuid_hi);
+
+int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
+                           uint64_t uuid_hi, bool auth);
 #endif /* CONFIG_TMEM */
 
 #endif /* __XEN_TMEM_CONTROL_H__ */
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 13cf7bc..dc5888c 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -41,12 +41,6 @@ static inline bool_t tmem_compression_enabled(void)
     return opt_tmem_compress;
 }
 
-extern bool_t opt_tmem_shared_auth;
-static inline bool_t tmem_shared_auth(void)
-{
-    return opt_tmem_shared_auth;
-}
-
 #ifdef CONFIG_TMEM
 extern bool_t opt_tmem;
 static inline bool_t tmem_enabled(void)
@@ -198,8 +192,6 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t 
*op, tmem_cli_op_t uops)
         switch ( cop.cmd )
         {
         case TMEM_NEW_POOL:   u = XLAT_tmem_op_u_creat; break;
-        case TMEM_AUTH:       u = XLAT_tmem_op_u_creat; break;
-        case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_creat; break;
         default:              u = XLAT_tmem_op_u_gen ;  break;
         }
         XLAT_tmem_op(op, &cop);
@@ -293,7 +285,6 @@ struct client {
     long eph_count, eph_count_max;
     domid_t cli_id;
     xen_tmem_client_t info;
-    bool_t shared_auth_required;
     /* For save/restore/migration. */
     bool_t was_frozen;
     struct list_head persistent_invalidated_list;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.