[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] tmem/libxc: Squash XEN_SYSCTL_TMEM_OP_[SET|SAVE]..



commit c4a398701ecf07a24ca391cbdc4a84fcca7c8d69
Author:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
AuthorDate: Fri Sep 30 10:53:01 2016 -0400
Commit:     Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CommitDate: Fri Sep 30 15:26:45 2016 -0400

    tmem/libxc: Squash XEN_SYSCTL_TMEM_OP_[SET|SAVE]..
    
    Specifically:
    
    XEN_SYSCTL_TMEM_OP_SET_[WEIGHT,COMPRESS] are now done via:
    
     XEN_SYSCTL_TMEM_SET_CLIENT_INFO
    
    and XEN_SYSCTL_TMEM_OP_SAVE_GET_[VERSION,MAXPOOLS,
    CLIENT_WEIGHT, CLIENT_FLAGS] can now be retrieved via:
    
     XEN_SYSCTL_TMEM_GET_CLIENT_INFO
    
    All this information is now in 'struct xen_tmem_client' and
    that is what we pass around.
    
    We also rev up the XEN_SYSCTL_INTERFACE_VERSION as we are
    re-using the value number of the deleted ones (and henceforth
    the information is retrieved differently).
    
    On the toolstack, prior to this patch, the xc_tmem_control
    would use the bounce buffer only when arg1 was set and the cmd
    was to list. With the 'XEN_SYSCTL_TMEM_OP_SET_[WEIGHT|COMPRESS]'
    that made sense as the 'arg1' would have the value. However
    for the other ones (say XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID)
    the 'arg1' would be the length of the 'buf'. If this
    confusing don't despair, patch patch titled:
    tmem/xc_tmem_control: Rename 'arg1' to 'len' and 'arg2' to arg.
    takes care of that.
    
    The acute reader of the toolstack code will discover that
    we only used the bounce buffer for LIST, not for any other
    subcommands that used 'buf'!?! Which means that the contents
    of 'buf' would never be copied back to the calleer 'buf'!
    
    The author is not sure how this could possibly work, perhaps Xen 4.1
    (when this was introduced) was more relaxed about the bounce buffer
    being enabled. Anyhow this fixes xc_tmem_control to do it for
    any subcommand that has 'arg1'.
    
    Lastly some of the checks in xc_tmem_[restore|save] are removed
    as they can't ever be reached (not even sure how they could
    have been reached in the original submission). One of them
    is the check for the weight against -1 when in fact the
    hypervisor would never have provided that value.
    
    Now the checks are simple - as the hypercall always returns
    ->version and ->maxpools (which is mirroring how it was done
    prior to this patch). But if one wants to check the if a guest
    has any tmem activity then the patch titled
    "tmem: Batch and squash XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_
    [FLAGS,NPAGES,UUID] in one sub-call: XEN_SYSCTL_TMEM_OP_GET_POOLS."
    adds an ->nr_pools to check for that.
    
    Also we add the check for ->version and ->maxpools and remove
    the TODO.
    
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 tools/libxc/xc_tmem.c             |  71 ++++++++++---------------
 tools/libxl/libxl.c               |  26 ++++++---
 tools/python/xen/lowlevel/xc/xc.c |   2 -
 xen/common/tmem.c                 |   2 +
 xen/common/tmem_control.c         | 107 ++++++++++++++++++++++----------------
 xen/include/public/sysctl.h       |  23 +++++---
 xen/include/xen/tmem_xen.h        |   2 +-
 7 files changed, 128 insertions(+), 105 deletions(-)

diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index e1de16e..9e21d41 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -18,6 +18,7 @@
  */
 
 #include "xc_private.h"
+#include <assert.h>
 #include <xen/tmem.h>
 
 static int do_tmem_op(xc_interface *xch, tmem_op_t *op)
@@ -67,7 +68,10 @@ int xc_tmem_control(xc_interface *xch,
     sysctl.u.tmem_op.oid.oid[1] = 0;
     sysctl.u.tmem_op.oid.oid[2] = 0;
 
-    if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
+    if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+        HYPERCALL_BOUNCE_SET_DIR(buf, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( arg1 )
     {
         if ( buf == NULL )
         {
@@ -85,8 +89,8 @@ int xc_tmem_control(xc_interface *xch,
 
     rc = do_sysctl(xch, &sysctl);
 
-    if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
-            xc_hypercall_bounce_post(xch, buf);
+    if ( arg1 )
+        xc_hypercall_bounce_post(xch, buf);
 
     return rc;
 }
@@ -211,38 +215,29 @@ int xc_tmem_save(xc_interface *xch,
 {
     int marker = field_marker;
     int i, j;
-    uint32_t max_pools, version;
-    uint32_t weight, flags;
-    uint32_t pool_id;
+    uint32_t flags;
     uint32_t minusone = -1;
+    uint32_t pool_id;
     struct tmem_handle *h;
+    xen_tmem_client_t info;
 
     if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,dom,live,0,NULL) 
<= 0 )
         return 0;
 
     if ( write_exact(io_fd, &marker, sizeof(marker)) )
         return -1;
-    version = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,0,0,0,NULL);
-    if ( write_exact(io_fd, &version, sizeof(version)) )
-        return -1;
-    max_pools = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS,0,0,0,NULL);
-    if ( write_exact(io_fd, &max_pools, sizeof(max_pools)) )
-        return -1;
-    if ( version == -1 || max_pools == -1 )
-        return -1;
-    if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
-        return -1;
-    flags = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL);
-    if ( write_exact(io_fd, &flags, sizeof(flags)) )
-        return -1;
-    weight = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL);
-    if ( write_exact(io_fd, &weight, sizeof(weight)) )
+
+    if ( xc_tmem_control(xch, 0 /* pool_id */,
+                         XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
+                         dom /* cli_id */, sizeof(info) /* arg1 */, 0 /* arg2 
*/,
+                         &info) < 0 )
         return -1;
-    if ( flags == -1 || weight == -1 )
+
+    if ( write_exact(io_fd, &info, sizeof(info)) )
         return -1;
     if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
         return -1;
-    for ( i = 0; i < max_pools; i++ )
+    for ( i = 0; i < info.maxpools; i++ )
     {
         uint64_t uuid[2];
         uint32_t n_pages;
@@ -378,35 +373,23 @@ static int xc_tmem_restore_new_pool(
 
 int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
 {
-    uint32_t this_max_pools, this_version;
     uint32_t pool_id;
     uint32_t minusone;
-    uint32_t weight, flags;
+    uint32_t flags;
+    xen_tmem_client_t info;
     int checksum = 0;
 
-    if ( read_exact(io_fd, &this_version, sizeof(this_version)) )
-        return -1;
-    if ( read_exact(io_fd, &this_max_pools, sizeof(this_max_pools)) )
-        return -1;
-    /* FIXME check here to ensure no version mismatch or maxpools mismatch */
-    if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
-        return -1;
-    if ( minusone != -1 )
+    if ( read_exact(io_fd, &info, sizeof(info)) )
         return -1;
     if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN,dom,0,0,NULL) 
< 0 )
         return -1;
-    if ( read_exact(io_fd, &flags, sizeof(flags)) )
-        return -1;
-    if ( flags & TMEM_CLIENT_COMPRESS )
-        if ( 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_COMPRESS,dom,1,0,NULL) < 0 )
-            return -1;
-    if ( flags & TMEM_CLIENT_FROZEN )
-        if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_FREEZE,dom,0,0,NULL) < 0 
)
-            return -1;
-    if ( read_exact(io_fd, &weight, sizeof(weight)) )
-        return -1;
-    if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_WEIGHT,dom,0,0,NULL) < 0 
)
+
+    if ( xc_tmem_control(xch, 0 /* pool_id */,
+                         XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
+                         dom /* cli_id */, sizeof(info) /* arg1 */, 0 /* arg2 
*/,
+                         &info) < 0 )
         return -1;
+
     if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
         return -1;
     while ( read_exact(io_fd, &pool_id, sizeof(pool_id)) == 0 && pool_id != -1 
)
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index ab44e3c..81f8361 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -6152,28 +6152,42 @@ out:
     return rc;
 }
 
-static int32_t tmem_setop_from_string(char *set_name)
+static int32_t tmem_setop_from_string(char *set_name, uint32_t val,
+                                      xen_tmem_client_t *info)
 {
     if (!strcmp(set_name, "weight"))
-        return XEN_SYSCTL_TMEM_OP_SET_WEIGHT;
+        info->weight = val;
     else if (!strcmp(set_name, "compress"))
-        return XEN_SYSCTL_TMEM_OP_SET_COMPRESS;
+        info->flags.u.compress = val;
     else
         return -1;
+
+    return 0;
 }
 
 int libxl_tmem_set(libxl_ctx *ctx, uint32_t domid, char* name, uint32_t set)
 {
     int r, rc;
-    int32_t subop = tmem_setop_from_string(name);
+    xen_tmem_client_t info;
     GC_INIT(ctx);
 
-    if (subop == -1) {
+    r = xc_tmem_control(ctx->xch, -1 /* pool_id */,
+                        XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
+                        domid, sizeof(info), 0 /* arg2 */, &info);
+    if (r < 0) {
+        LOGE(ERROR, "Can not get tmem data!");
+        rc = ERROR_FAIL;
+        goto out;
+    }
+    rc = tmem_setop_from_string(name, set, &info);
+    if (rc == -1) {
         LOGEV(ERROR, -1, "Invalid set, valid sets are <weight|compress>");
         rc = ERROR_INVAL;
         goto out;
     }
-    r = xc_tmem_control(ctx->xch, -1, subop, domid, set, 0, NULL);
+    r = xc_tmem_control(ctx->xch, -1 /* pool_id */,
+                        XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
+                        domid, sizeof(info), 0 /* arg2 */, &info);
     if (r < 0) {
         LOGE(ERROR, "Can not set tmem %s", name);
         rc = ERROR_FAIL;
diff --git a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
index 287f590..a117737 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1640,8 +1640,6 @@ static PyObject *pyxc_tmem_control(XcObject *self,
         case XEN_SYSCTL_TMEM_OP_THAW:
         case XEN_SYSCTL_TMEM_OP_FREEZE:
         case XEN_SYSCTL_TMEM_OP_DESTROY:
-        case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
-        case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
         default:
             break;
     }
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index abc2f67..ab354b6 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -843,6 +843,8 @@ static struct client *client_create(domid_t cli_id)
     rcu_unlock_domain(d);
 
     client->cli_id = cli_id;
+    client->info.version = TMEM_SPEC_VERSION;
+    client->info.maxpools = MAX_POOLS_PER_DOMAIN;
     client->info.flags.u.compress = tmem_compression_enabled();
     client->shared_auth_required = tmem_shared_auth();
     for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index fc20a9f..b7d65a1 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -258,43 +258,57 @@ static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t 
buf, uint32_t len,
     return 0;
 }
 
-static int __tmemc_set_var(struct client *client, uint32_t subop, uint32_t 
arg1)
+static int __tmemc_set_client_info(struct client *client,
+                                   XEN_GUEST_HANDLE(xen_tmem_client_t) buf)
 {
-    domid_t cli_id = client->cli_id;
+    domid_t cli_id;
     uint32_t old_weight;
+    xen_tmem_client_t info = { };
 
-    switch (subop)
+    ASSERT(client);
+
+    if ( copy_from_guest(&info, buf, 1) )
+        return -EFAULT;
+
+    if ( info.version != TMEM_SPEC_VERSION )
+        return -EOPNOTSUPP;
+
+    if ( info.maxpools > MAX_POOLS_PER_DOMAIN )
+        return -ERANGE;
+
+    cli_id = client->cli_id;
+    if ( info.weight != client->info.weight )
     {
-    case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
         old_weight = client->info.weight;
-        client->info.weight = arg1;
+        client->info.weight = info.weight;
         tmem_client_info("tmem: weight set to %d for %s=%d\n",
-                        arg1, tmem_cli_id_str, cli_id);
+                         info.weight, tmem_cli_id_str, cli_id);
         atomic_sub(old_weight,&tmem_global.client_weight_total);
         atomic_add(client->info.weight,&tmem_global.client_weight_total);
-        break;
-    case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
-        client->info.flags.u.compress = arg1 ? 1 : 0;
+    }
+
+
+    if ( info.flags.u.compress != client->info.flags.u.compress )
+    {
+        client->info.flags.u.compress = info.flags.u.compress;
         tmem_client_info("tmem: compression %s for %s=%d\n",
-            arg1 ? "enabled" : "disabled",tmem_cli_id_str,cli_id);
-        break;
-    default:
-        tmem_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
-        return -1;
+                         info.flags.u.compress ? "enabled" : "disabled",
+                         tmem_cli_id_str,cli_id);
     }
     return 0;
 }
 
-static int tmemc_set_var(domid_t cli_id, uint32_t subop, uint32_t arg1)
+static int tmemc_set_client_info(domid_t cli_id,
+                                 XEN_GUEST_HANDLE(xen_tmem_client_t) info)
 {
     struct client *client;
-    int ret = -1;
+    int ret = -ENOENT;
 
     if ( cli_id == TMEM_CLI_ID_NULL )
     {
         list_for_each_entry(client,&tmem_global.client_list,client_list)
         {
-            ret =  __tmemc_set_var(client, subop, arg1);
+            ret =  __tmemc_set_client_info(client, info);
             if (ret)
                 break;
         }
@@ -303,13 +317,37 @@ static int tmemc_set_var(domid_t cli_id, uint32_t subop, 
uint32_t arg1)
     {
         client = tmem_client_from_cli_id(cli_id);
         if ( client )
-            ret = __tmemc_set_var(client, subop, arg1);
+            ret = __tmemc_set_client_info(client, info);
     }
     return ret;
 }
 
-static int tmemc_save_subop(int cli_id, uint32_t pool_id,
-                        uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1)
+static int tmemc_get_client_info(int cli_id,
+                                 XEN_GUEST_HANDLE(xen_tmem_client_t) info)
+{
+    struct client *client = tmem_client_from_cli_id(cli_id);
+
+    if ( client )
+    {
+        if ( copy_to_guest(info, &client->info, 1) )
+            return  -EFAULT;
+    }
+    else
+    {
+        static const xen_tmem_client_t generic = {
+            .version = TMEM_SPEC_VERSION,
+            .maxpools = MAX_POOLS_PER_DOMAIN
+        };
+
+        if ( copy_to_guest(info, &generic, 1) )
+            return -EFAULT;
+    }
+
+    return 0;
+}
+
+static int tmemc_save_subop(int cli_id, uint32_t pool_id, uint32_t subop,
+                            XEN_GUEST_HANDLE_PARAM(char) buf, uint32_t arg1)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
@@ -318,23 +356,6 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id,
 
     switch(subop)
     {
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
-        rc = TMEM_SPEC_VERSION;
-        break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
-        rc = MAX_POOLS_PER_DOMAIN;
-        break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
-        if ( client == NULL )
-            break;
-        rc = client->info.weight == -1 ? -2 : client->info.weight;
-        break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
-        if ( client == NULL )
-            break;
-        rc = (client->info.flags.u.compress ? TMEM_CLIENT_COMPRESS : 0 ) |
-             (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
-        break;
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
          if ( pool == NULL )
              break;
@@ -386,17 +407,15 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
         ret = tmemc_list(op->cli_id,
                          guest_handle_cast(op->u.buf, char), op->arg1, 
op->arg2);
         break;
-    case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
-    case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
-        ret = tmemc_set_var(op->cli_id, cmd, op->arg1);
+    case XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO:
+        ret = tmemc_set_client_info(op->cli_id, op->u.client);
         break;
     case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB:
         ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT);
         break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
+    case XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO:
+        ret = tmemc_get_client_info(op->cli_id, op->u.client);
+        break;
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES:
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID:
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index af128a8..86a1d91 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -36,7 +36,7 @@
 #include "physdev.h"
 #include "tmem.h"
 
-#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000D
+#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000E
 
 /*
  * Read console content from Xen buffer ring.
@@ -767,14 +767,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_FLUSH                  2
 #define XEN_SYSCTL_TMEM_OP_DESTROY                3
 #define XEN_SYSCTL_TMEM_OP_LIST                   4
-#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT             5
-#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS           7
+#define XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO        5
+#define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        6
 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION       11
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS      12
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS  15
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS    16
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES   17
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID     18
@@ -796,7 +792,14 @@ struct tmem_handle {
     xen_tmem_oid_t oid;
 };
 
+/*
+ * XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT uses the 'client' in
+ * xen_tmem_op with this structure, which is mostly used during migration.
+ */
 struct xen_tmem_client {
+    uint32_t version;   /* If mismatched we will get XEN_EOPNOTSUPP. */
+    uint32_t maxpools;  /* If greater than what hypervisor supports, will get
+                           XEN_ERANGE. */
     union {             /* See TMEM_CLIENT_[COMPRESS,FROZEN] */
         uint32_t raw;
         struct {
@@ -807,6 +810,8 @@ struct xen_tmem_client {
     } flags;
     uint32_t weight;
 };
+typedef struct xen_tmem_client xen_tmem_client_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_client_t);
 
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
@@ -819,7 +824,9 @@ struct xen_sysctl_tmem_op {
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
     xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
     union {
-        XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore 
ops. */
+        XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save/restore */
+        XEN_GUEST_HANDLE_64(xen_tmem_client_t) client; /* IN/OUT for */
+                        /*  XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT. */
     } u;
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 955f3df..2b205bf 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -293,7 +293,7 @@ struct client {
     struct list_head ephemeral_page_list;
     long eph_count, eph_count_max;
     domid_t cli_id;
-    struct xen_tmem_client info;
+    xen_tmem_client_t info;
     bool_t shared_auth_required;
     /* For save/restore/migration. */
     bool_t was_frozen;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.