[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/libcx/tmem: Replace TMEM_RESTORE_NEW with XEN_SYSCTL_TMEM_OP_SET_POOLS



commit 8cd9fa11e106104a6df2e4c670d512bb4f239845
Author:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
AuthorDate: Mon Nov 28 01:10:50 2016 -0500
Commit:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CommitDate: Wed Apr 5 09:35:41 2017 -0400

    xen/libcx/tmem: Replace TMEM_RESTORE_NEW with XEN_SYSCTL_TMEM_OP_SET_POOLS
    
    This used to be done under TMEM_RESTORE_NEW which was an hypercall
    accessible by the guest. However there are couple of reasons
    not to do it:
     - No checking of domid on TMEM_RESTORE_NEW which meant that
       any guest could create TMEM pools for other guests.
     - The guest can already create pools using TMEM_NEW_POOL
       (which is limited to guest doing the hypercall)
     - This functionality is only needed during migration - there
       is no need for the guest to have this functionality.
    
    However to move this we also have to allocate the 'struct domain'
    ->tmem pointer. It is by default set to NULL and would be initialized
    via the guest do_tmem() hypercalls. Presumarily that was the
    initial reason that TMEM_RESTORE_NEW was in the guest accessible
    hypercalls.
    
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> [libxc change]
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> [hypervisor changes]
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 tools/libxc/xc_tmem.c          | 22 +++++++++---------
 xen/common/tmem.c              | 30 ++++++++++++-------------
 xen/common/tmem_control.c      | 51 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/sysctl.h    | 11 ++++++---
 xen/include/public/tmem.h      |  5 +++--
 xen/include/xen/tmem_control.h |  4 ++++
 xen/include/xen/tmem_xen.h     |  1 -
 7 files changed, 93 insertions(+), 31 deletions(-)

diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 51d11ef..181de48 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -385,16 +385,18 @@ static int xc_tmem_restore_new_pool(
                     uint64_t uuid_lo,
                     uint64_t uuid_hi)
 {
-    tmem_op_t op;
-
-    op.cmd = TMEM_RESTORE_NEW;
-    op.pool_id = pool_id;
-    op.u.creat.arg1 = cli_id;
-    op.u.creat.flags = flags;
-    op.u.creat.uuid[0] = uuid_lo;
-    op.u.creat.uuid[1] = uuid_hi;
-
-    return do_tmem_op(xch, &op);
+    xen_tmem_pool_info_t pool = {
+        .flags.raw = flags,
+        .id = pool_id,
+        .n_pages = 0,
+        .uuid[0] = uuid_lo,
+        .uuid[1] = uuid_hi,
+    };
+
+    return xc_tmem_control(xch, pool_id,
+                           XEN_SYSCTL_TMEM_OP_SET_POOLS,
+                           cli_id, sizeof(pool),
+                           0 /* arg */, &pool);
 }
 
 int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 6d5de5b..ee43f13 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -804,7 +804,7 @@ static void pool_flush(struct tmem_pool *pool, domid_t 
cli_id)
 
 /************ CLIENT MANIPULATION OPERATIONS **************************/
 
-static struct client *client_create(domid_t cli_id)
+struct client *client_create(domid_t cli_id)
 {
     struct client *client = xzalloc(struct client);
     int i, shift;
@@ -1435,9 +1435,9 @@ static int do_tmem_destroy_pool(uint32_t pool_id)
     return 1;
 }
 
-static int do_tmem_new_pool(domid_t this_cli_id,
-                                     uint32_t d_poolid, uint32_t flags,
-                                     uint64_t uuid_lo, uint64_t uuid_hi)
+int do_tmem_new_pool(domid_t this_cli_id,
+                     uint32_t d_poolid, uint32_t flags,
+                     uint64_t uuid_lo, uint64_t uuid_hi)
 {
     struct client *client;
     domid_t cli_id;
@@ -1908,21 +1908,19 @@ long do_tmem_op(tmem_cli_op_t uops)
     /* Acquire write lock for all commands at first. */
     write_lock(&tmem_rwlock);
 
-    if ( op.cmd == TMEM_CONTROL )
+    switch ( op.cmd )
     {
+    case TMEM_CONTROL:
+    case TMEM_RESTORE_NEW:
         rc = -EOPNOTSUPP;
-    }
-    else if ( op.cmd == TMEM_AUTH )
-    {
+        break;
+
+    case TMEM_AUTH:
         rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0],
                          op.u.creat.uuid[1],op.u.creat.flags);
-    }
-    else if ( op.cmd == TMEM_RESTORE_NEW )
-    {
-        rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags,
-                         op.u.creat.uuid[0], op.u.creat.uuid[1]);
-    }
-    else {
+        break;
+
+    default:
     /*
         * For other commands, create per-client tmem structure dynamically on
         * first use by client.
@@ -1999,6 +1997,8 @@ long do_tmem_op(tmem_cli_op_t uops)
                 tmem_stats.errored_tmem_ops++;
             return rc;
         }
+        break;
+
     }
 out:
     write_unlock(&tmem_rwlock);
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index ddd9cfe..3e99257 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -402,6 +402,54 @@ static int tmemc_get_pool(int cli_id,
     return rc ? : idx;
 }
 
+static int tmemc_set_pools(int cli_id,
+                           XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools,
+                           uint32_t len)
+{
+    unsigned int i;
+    int rc = 0;
+    unsigned int nr = len / sizeof(xen_tmem_pool_info_t);
+    struct client *client = tmem_client_from_cli_id(cli_id);
+
+    if ( len % sizeof(xen_tmem_pool_info_t) )
+        return -EINVAL;
+
+    if ( nr > MAX_POOLS_PER_DOMAIN )
+        return -E2BIG;
+
+    if ( !guest_handle_okay(pools, nr) )
+        return -EINVAL;
+
+    if ( !client )
+    {
+        client = client_create(cli_id);
+        if ( !client )
+            return -ENOMEM;
+    }
+    for ( i = 0; i < nr; i++ )
+    {
+        xen_tmem_pool_info_t pool;
+
+        if ( __copy_from_guest_offset(&pool, pools, i, 1 ) )
+            return -EFAULT;
+
+        if ( pool.n_pages )
+            return -EINVAL;
+
+        rc = do_tmem_new_pool(cli_id, pool.id, pool.flags.raw,
+                              pool.uuid[0], pool.uuid[1]);
+        if ( rc < 0 )
+            break;
+
+        pool.id = rc;
+        if ( __copy_to_guest_offset(pools, i, &pool, 1) )
+            return -EFAULT;
+    }
+
+    /* And how many we have processed. */
+    return rc ? : i;
+}
+
 int tmem_control(struct xen_sysctl_tmem_op *op)
 {
     int ret;
@@ -438,6 +486,9 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
     case XEN_SYSCTL_TMEM_OP_GET_POOLS:
         ret = tmemc_get_pool(op->cli_id, op->u.pool, op->len);
         break;
+    case XEN_SYSCTL_TMEM_OP_SET_POOLS: /* TMEM_RESTORE_NEW */
+        ret = tmemc_set_pools(op->cli_id, op->u.pool, op->len);
+        break;
     default:
         ret = do_tmem_control(op);
         break;
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index 00f5e77..c03d027 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -770,6 +770,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        6
 #define XEN_SYSCTL_TMEM_OP_GET_POOLS              7
 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
+#define XEN_SYSCTL_TMEM_OP_SET_POOLS              9
 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE     19
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV      20
@@ -812,10 +813,14 @@ typedef struct xen_tmem_client xen_tmem_client_t;
 DEFINE_XEN_GUEST_HANDLE(xen_tmem_client_t);
 
 /*
- * XEN_SYSCTL_TMEM_OP_GET_POOLS uses the 'pool' array in
- * xen_sysctl_tmem_op with this structure. The hypercall will
+ * XEN_SYSCTL_TMEM_OP_[GET|SET]_POOLS uses the 'pool' array in
+ * xen_sysctl_tmem_op with this structure.
+ * The XEN_SYSCTL_TMEM_OP_GET_POOLS hypercall will
  * return the number of entries in 'pool' or a negative value
  * if an error was encountered.
+ * The XEN_SYSCTL_TMEM_OP_SET_POOLS will return the number of
+ * entries in 'pool' processed or a negative value if an error
+ * was encountered.
  */
 struct xen_tmem_pool_info {
     union {
@@ -830,7 +835,7 @@ struct xen_tmem_pool_info {
         } u;
     } flags;
     uint32_t id;                  /* Less than tmem_client.maxpools. */
-    uint64_t n_pages;
+    uint64_t n_pages;             /* Zero on XEN_SYSCTL_TMEM_OP_SET_POOLS. */
     uint64_aligned_t uuid[2];
 };
 typedef struct xen_tmem_pool_info xen_tmem_pool_info_t;
diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h
index 2d805fb..b9f3537 100644
--- a/xen/include/public/tmem.h
+++ b/xen/include/public/tmem.h
@@ -53,7 +53,8 @@
 
 /* Privileged commands to HYPERVISOR_tmem_op() */
 #define TMEM_AUTH                 101
-#define TMEM_RESTORE_NEW          102
+#define TMEM_RESTORE_NEW          102 /* Now called via XEN_SYSCTL_tmem_op as
+                                         XEN_SYSCTL_TMEM_OP_SET_POOL. */
 
 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
 #define TMEM_POOL_PERSIST          1
@@ -92,7 +93,7 @@ struct tmem_op {
             uint64_t uuid[2];
             uint32_t flags;
             uint32_t arg1;
-        } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
+        } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH */
         struct {
 #if __XEN_INTERFACE_VERSION__ < 0x00040600
             uint64_t oid[3];
diff --git a/xen/include/xen/tmem_control.h b/xen/include/xen/tmem_control.h
index 44bc07f..91c185e 100644
--- a/xen/include/xen/tmem_control.h
+++ b/xen/include/xen/tmem_control.h
@@ -18,6 +18,10 @@ extern rwlock_t tmem_rwlock;
 int tmem_evict(void);
 int do_tmem_control(struct xen_sysctl_tmem_op *op);
 
+struct client *client_create(domid_t cli_id);
+int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags,
+                     uint64_t uuid_lo, uint64_t uuid_hi);
+
 #endif /* CONFIG_TMEM */
 
 #endif /* __XEN_TMEM_CONTROL_H__ */
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 13cf7bc..b6bd61b 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -199,7 +199,6 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t 
*op, tmem_cli_op_t uops)
         {
         case TMEM_NEW_POOL:   u = XLAT_tmem_op_u_creat; break;
         case TMEM_AUTH:       u = XLAT_tmem_op_u_creat; break;
-        case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_creat; break;
         default:              u = XLAT_tmem_op_u_gen ;  break;
         }
         XLAT_tmem_op(op, &cop);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.