[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 07/12] tmem/libxc: Squash XEN_SYSCTL_TMEM_OP_[SET|SAVE]..



Specifically:

XEN_SYSCTL_TMEM_OP_SET_[WEIGHT,COMPRESS] are now done via:

 XEN_SYSCTL_TMEM_SET_CLIENT_INFO

and XEN_SYSCTL_TMEM_OP_SAVE_GET_[VERSION,MAXPOOLS,
CLIENT_WEIGHT, CLIENT_FLAGS] can now be retrieved via:

 XEN_SYSCTL_TMEM_GET_CLIENT_INFO

All this information is now in 'struct tmem_client' and
that is what we pass around.

Hypervisor wise we had to do a bit of casting. The
'struct xen_sysctl_tmem_op'->buf variable is a pointer to
char. Casting that using the guest_handle_cast to a structure
(struct tmem_client) does not work. Hence we first have to
cast it a void and then to xen_sysctl_tmem_client_t.
This could be improved by having an union and in fact the
patch titled:
"tmem: Handle 'struct tmem_info' as a seperate field in the"
does that. It could be squashed in here but that can make
it harder to review.

On the toolstack, prior to this patch, the xc_tmem_control
would use the bounce buffer only when arg1 was set and the cmd
was to list. With the 'XEN_SYSCTL_TMEM_OP_SET_[WEIGHT|COMPRESS]'
that made sense as the 'arg1' would have the value. However
for the other ones (say XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID)
the 'arg1' would be the length of the 'buf'. If this
confusing don't despair, patch patch titled:
tmem/xc_tmem_control: Rename 'arg1' to 'len' and 'arg2' to arg.
takes care of that.

The acute reader of the toolstack code will discover that
we only used the bounce buffer for LIST, not for any other
subcommands that used 'buf'!?! Which means that the contents
of 'buf' would never be copied back to the calleer 'buf'!

I am not sure how this could possibly work, perhaps Xen 4.1
(when this was introduced) was more relaxed about the bounce buffer
being enabled. Anyhow this fixes xc_tmem_control to do it for
any subcommand that has 'arg1'.

Lastly some of the checks in xc_tmem_[restore|save] are removed
as they can't ever be reached (not even sure how they could
have been reached in the original submission). One of them
is the check for the weight against -1 when in fact the
hypervisor would never have provided that value.

Now the checks are simple - as the hypercall always returns
->version and ->maxpools (which is mirroring how it was done
prior to this patch). But if one wants to check the if a guest
has any tmem activity then the patch titled
"tmem: Batch and squash XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_
[FLAGS,NPAGES,UUID] in one sub-call: XEN_SYSCTL_TMEM_OP_GET_POOLS."
adds an ->nr_pools to check for that.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>

v1: New submission.
---
 tools/libxc/xc_tmem.c             | 72 ++++++++++++----------------
 tools/libxl/libxl.c               | 26 +++++++---
 tools/python/xen/lowlevel/xc/xc.c |  2 -
 xen/common/tmem_control.c         | 99 ++++++++++++++++++++++-----------------
 xen/include/public/sysctl.h       | 18 ++++---
 xen/include/xen/tmem_xen.h        |  2 +-
 6 files changed, 117 insertions(+), 102 deletions(-)

diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c
index 24c8b43..74d2d85 100644
--- a/tools/libxc/xc_tmem.c
+++ b/tools/libxc/xc_tmem.c
@@ -18,6 +18,7 @@
  */
 
 #include "xc_private.h"
+#include <assert.h>
 #include <xen/tmem.h>
 
 static int do_tmem_op(xc_interface *xch, tmem_op_t *op)
@@ -67,7 +68,12 @@ int xc_tmem_control(xc_interface *xch,
     sysctl.u.tmem_op.oid.oid[1] = 0;
     sysctl.u.tmem_op.oid.oid[2] = 0;
 
-    if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
+    if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+    {
+        HYPERCALL_BOUNCE_SET_DIR(buf, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    }
+
+    if ( arg1 )
     {
         if ( buf == NULL )
         {
@@ -85,8 +91,8 @@ int xc_tmem_control(xc_interface *xch,
 
     rc = do_sysctl(xch, &sysctl);
 
-    if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
-            xc_hypercall_bounce_post(xch, buf);
+    if ( arg1 )
+        xc_hypercall_bounce_post(xch, buf);
 
     return rc;
 }
@@ -211,38 +217,29 @@ int xc_tmem_save(xc_interface *xch,
 {
     int marker = field_marker;
     int i, j;
-    uint32_t max_pools, version;
-    uint32_t weight, flags;
-    uint32_t pool_id;
+    uint32_t flags;
     uint32_t minusone = -1;
+    uint32_t pool_id;
     struct tmem_handle *h;
+    xen_sysctl_tmem_client_t info;
 
     if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,dom,live,0,NULL) 
<= 0 )
         return 0;
 
     if ( write_exact(io_fd, &marker, sizeof(marker)) )
         return -1;
-    version = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,0,0,0,NULL);
-    if ( write_exact(io_fd, &version, sizeof(version)) )
-        return -1;
-    max_pools = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS,0,0,0,NULL);
-    if ( write_exact(io_fd, &max_pools, sizeof(max_pools)) )
-        return -1;
-    if ( version == -1 || max_pools == -1 )
-        return -1;
-    if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
-        return -1;
-    flags = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL);
-    if ( write_exact(io_fd, &flags, sizeof(flags)) )
-        return -1;
-    weight = 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL);
-    if ( write_exact(io_fd, &weight, sizeof(weight)) )
+
+    if ( xc_tmem_control(xch, 0 /* pool_id */,
+                         XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
+                         dom /* cli_id */, sizeof(info) /* arg1 */, 0 /* arg2 
*/,
+                         &info) < 0 )
         return -1;
-    if ( flags == -1 || weight == -1 )
+
+    if ( write_exact(io_fd, &info, sizeof(info)) )
         return -1;
     if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
         return -1;
-    for ( i = 0; i < max_pools; i++ )
+    for ( i = 0; i < info.maxpools; i++ )
     {
         uint64_t uuid[2];
         uint32_t n_pages;
@@ -378,35 +375,24 @@ static int xc_tmem_restore_new_pool(
 
 int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
 {
-    uint32_t this_max_pools, this_version;
     uint32_t pool_id;
     uint32_t minusone;
-    uint32_t weight, flags;
+    uint32_t flags;
+    xen_sysctl_tmem_client_t info;
     int checksum = 0;
 
-    if ( read_exact(io_fd, &this_version, sizeof(this_version)) )
-        return -1;
-    if ( read_exact(io_fd, &this_max_pools, sizeof(this_max_pools)) )
+    if ( read_exact(io_fd, &info, sizeof(info)) )
         return -1;
     /* FIXME check here to ensure no version mismatch or maxpools mismatch */
-    if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
-        return -1;
-    if ( minusone != -1 )
-        return -1;
     if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN,dom,0,0,NULL) 
< 0 )
         return -1;
-    if ( read_exact(io_fd, &flags, sizeof(flags)) )
-        return -1;
-    if ( flags & TMEM_CLIENT_COMPRESS )
-        if ( 
xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_COMPRESS,dom,1,0,NULL) < 0 )
-            return -1;
-    if ( flags & TMEM_CLIENT_FROZEN )
-        if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_FREEZE,dom,0,0,NULL) < 0 
)
-            return -1;
-    if ( read_exact(io_fd, &weight, sizeof(weight)) )
-        return -1;
-    if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_WEIGHT,dom,0,0,NULL) < 0 
)
+
+    if ( xc_tmem_control(xch, 0 /* pool_id */,
+                         XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
+                         dom /* cli_id */, sizeof(info) /* arg1 */, 0 /* arg2 
*/,
+                         &info) < 0 )
         return -1;
+
     if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
         return -1;
     while ( read_exact(io_fd, &pool_id, sizeof(pool_id)) == 0 && pool_id != -1 
)
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index e3ee49c..2f50e61 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -6061,28 +6061,42 @@ out:
     return rc;
 }
 
-static int32_t tmem_setop_from_string(char *set_name)
+static int32_t tmem_setop_from_string(char *set_name, uint32_t val,
+                                      struct tmem_client *info)
 {
     if (!strcmp(set_name, "weight"))
-        return XEN_SYSCTL_TMEM_OP_SET_WEIGHT;
+        info->weight = val;
     else if (!strcmp(set_name, "compress"))
-        return XEN_SYSCTL_TMEM_OP_SET_COMPRESS;
+        info->flags.u.compress = val;
     else
         return -1;
+
+    return 0;
 }
 
 int libxl_tmem_set(libxl_ctx *ctx, uint32_t domid, char* name, uint32_t set)
 {
     int r, rc;
-    int32_t subop = tmem_setop_from_string(name);
+    struct tmem_client info;
     GC_INIT(ctx);
 
-    if (subop == -1) {
+    r = xc_tmem_control(ctx->xch, -1 /* pool_id */,
+                        XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
+                        domid, sizeof(info), 0 /* arg2 */, &info);
+    if (r < 0) {
+        LOGE(ERROR, "Can not get tmem data!");
+        rc = ERROR_FAIL;
+        goto out;
+    }
+    rc = tmem_setop_from_string(name, set, &info);
+    if (rc == -1) {
         LOGEV(ERROR, -1, "Invalid set, valid sets are <weight|compress>");
         rc = ERROR_INVAL;
         goto out;
     }
-    r = xc_tmem_control(ctx->xch, -1, subop, domid, set, 0, NULL);
+    r = xc_tmem_control(ctx->xch, -1 /* pool_id */,
+                        XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
+                        domid, sizeof(info), 0 /* arg2 */, &info);
     if (r < 0) {
         LOGE(ERROR, "Can not set tmem %s", name);
         rc = ERROR_FAIL;
diff --git a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
index 287f590..a117737 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1640,8 +1640,6 @@ static PyObject *pyxc_tmem_control(XcObject *self,
         case XEN_SYSCTL_TMEM_OP_THAW:
         case XEN_SYSCTL_TMEM_OP_FREEZE:
         case XEN_SYSCTL_TMEM_OP_DESTROY:
-        case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
-        case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
         default:
             break;
     }
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index ba003a8..8bb9d48 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -258,43 +258,58 @@ static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t 
buf, uint32_t len,
     return 0;
 }
 
-static int __tmemc_set_var(struct client *client, uint32_t subop, uint32_t 
arg1)
+static int __tmemc_set_var(struct client *client, uint32_t subop,
+                           XEN_GUEST_HANDLE_PARAM(xen_sysctl_tmem_client_t) 
buf)
 {
     domid_t cli_id = client->cli_id;
     uint32_t old_weight;
+    xen_sysctl_tmem_client_t info = { };
 
-    switch (subop)
+    ASSERT(client);
+
+    if ( subop != XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+    {
+        tmem_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
+        return -1;
+    }
+    if ( copy_from_guest(&info, buf, 1) )
+        return -EFAULT;
+
+    if ( info.weight != client->info.weight )
     {
-    case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
         old_weight = client->info.weight;
-        client->info.weight = arg1;
+        client->info.weight = info.weight;
         tmem_client_info("tmem: weight set to %d for %s=%d\n",
-                        arg1, tmem_cli_id_str, cli_id);
+                         info.weight, tmem_cli_id_str, cli_id);
         atomic_sub(old_weight,&tmem_global.client_weight_total);
         atomic_add(client->info.weight,&tmem_global.client_weight_total);
-        break;
-    case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
-        client->info.flags.u.compress = arg1 ? 1 : 0;
+    }
+
+
+    if ( info.flags.u.compress != client->info.flags.u.compress )
+    {
+        client->info.flags.u.compress = info.flags.u.compress;
         tmem_client_info("tmem: compression %s for %s=%d\n",
-            arg1 ? "enabled" : "disabled",tmem_cli_id_str,cli_id);
-        break;
-    default:
-        tmem_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
-        return -1;
+                         info.flags.u.compress ? "enabled" : "disabled",
+                         tmem_cli_id_str,cli_id);
     }
     return 0;
 }
 
-static int tmemc_set_var(domid_t cli_id, uint32_t subop, uint32_t arg1)
+static int tmemc_set_var(domid_t cli_id, uint32_t subop,
+                         XEN_GUEST_HANDLE_PARAM(void) buf)
 {
     struct client *client;
-    int ret = -1;
+    int ret = -ENOENT;
+    XEN_GUEST_HANDLE_PARAM(xen_sysctl_tmem_client_t) info;
+
+    info = guest_handle_cast(buf, xen_sysctl_tmem_client_t);
 
     if ( cli_id == TMEM_CLI_ID_NULL )
     {
         list_for_each_entry(client,&tmem_global.client_list,client_list)
         {
-            ret =  __tmemc_set_var(client, subop, arg1);
+            ret =  __tmemc_set_var(client, subop, info);
             if (ret)
                 break;
         }
@@ -303,37 +318,38 @@ static int tmemc_set_var(domid_t cli_id, uint32_t subop, 
uint32_t arg1)
     {
         client = tmem_client_from_cli_id(cli_id);
         if ( client )
-            ret = __tmemc_set_var(client, subop, arg1);
+            ret = __tmemc_set_var(client, subop, info);
     }
     return ret;
 }
 
-static int tmemc_save_subop(int cli_id, uint32_t pool_id,
-                        uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1)
+static int tmemc_save_subop(int cli_id, uint32_t pool_id, uint32_t subop,
+                            XEN_GUEST_HANDLE_PARAM(void) buf, uint32_t arg1)
 {
     struct client *client = tmem_client_from_cli_id(cli_id);
     struct tmem_pool *pool = (client == NULL || pool_id >= 
MAX_POOLS_PER_DOMAIN)
                    ? NULL : client->pools[pool_id];
     int rc = -1;
+    XEN_GUEST_HANDLE_PARAM(xen_sysctl_tmem_client_t) info;
 
     switch(subop)
     {
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
-        rc = TMEM_SPEC_VERSION;
-        break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
-        rc = MAX_POOLS_PER_DOMAIN;
-        break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
-        if ( client == NULL )
-            break;
-        rc = client->info.weight == -1 ? -2 : client->info.weight;
-        break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
-        if ( client == NULL )
-            break;
-        rc = (client->info.flags.u.compress ? TMEM_CLIENT_COMPRESS : 0 ) |
-             (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
+    case XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO:
+        rc = 0;
+        info = guest_handle_cast(buf, xen_sysctl_tmem_client_t);
+        if ( client )
+        {
+            if ( copy_to_guest(info, &client->info, 1) )
+                rc = -EFAULT;
+        }
+        else
+        {
+            xen_sysctl_tmem_client_t generic = { .version = TMEM_SPEC_VERSION,
+                                                 .maxpools = 
MAX_POOLS_PER_DOMAIN };
+
+            if ( copy_to_guest(info, &generic, 1) )
+                rc = -EFAULT;
+        }
         break;
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
          if ( pool == NULL )
@@ -386,22 +402,19 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
         ret = tmemc_list(op->cli_id,
                          guest_handle_cast(op->buf, char), op->arg1, op->arg2);
         break;
-    case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
-    case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
-        ret = tmemc_set_var(op->cli_id, cmd, op->arg1);
+    case XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO:
+        ret = tmemc_set_var(op->cli_id, cmd,
+                            guest_handle_cast(op->buf, void));
         break;
     case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB:
         ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT);
         break;
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
-    case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
+    case XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO:
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES:
     case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID:
         ret = tmemc_save_subop(op->cli_id, pool_id, cmd,
-                               guest_handle_cast(op->buf, char), op->arg1);
+                               guest_handle_cast(op->buf, void), op->arg1);
         break;
     default:
         ret = do_tmem_control(op);
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index a29a8fa..27e051b 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -756,14 +756,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
 #define XEN_SYSCTL_TMEM_OP_FLUSH                  2
 #define XEN_SYSCTL_TMEM_OP_DESTROY                3
 #define XEN_SYSCTL_TMEM_OP_LIST                   4
-#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT             5
-#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS           7
 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION       11
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS      12
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS  15
+#define XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO        11
+#define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        12
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS    16
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES   17
 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID     18
@@ -785,6 +781,11 @@ struct tmem_handle {
     xen_tmem_oid_t oid;
 };
 
+/*
+ * XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT overrides the 'buf' in
+ * xen_sysctl_tmem_op with this structure - and contain data used
+ * during migration.
+ */
 struct tmem_client {
     uint32_t version;
     uint32_t maxpools;
@@ -798,6 +799,8 @@ struct tmem_client {
     } flags;
     uint32_t weight;
 };
+typedef struct tmem_client xen_sysctl_tmem_client_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_client_t);
 
 struct xen_sysctl_tmem_op {
     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
@@ -809,7 +812,8 @@ struct xen_sysctl_tmem_op {
     uint32_t arg2;      /* IN: If not applicable to command use 0. */
     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
     xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
-    XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. 
*/
+    XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save/restore,
+                                      and set/get ops. */
 };
 typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index c0e776c..daf6c92 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -293,7 +293,7 @@ struct client {
     struct list_head ephemeral_page_list;
     long eph_count, eph_count_max;
     domid_t cli_id;
-    struct tmem_client info;
+    xen_sysctl_tmem_client_t info;
     bool_t shared_auth_required;
     /* For save/restore/migration. */
     bool_t was_frozen;
-- 
2.4.11


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.