[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] tmem: Move TMEM_CONTROL subop of tmem hypercall to sysctl.
commit d0edc15a6cd30ed4cfd14c082615cc7cb3eeaefb Author: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> AuthorDate: Wed Aug 26 18:04:12 2015 -0400 Commit: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> CommitDate: Wed Sep 2 08:48:00 2015 -0400 tmem: Move TMEM_CONTROL subop of tmem hypercall to sysctl. The operations are to be used by an control domain to set parameters, list pools, clients, and to be used during migration. There is no need to have them in the tmem hypercall path. This patch moves code without adding fixes - and in fact in some cases makes the parameters soo long that they hurt eyes - but that is for another patch. Note that in regards to existing users: - Only the control domain could call it - which meant that if a guest called it would get -EPERM, so we are OK there. In practice no guests called this TMEM_CONTROL command. - The spec: https://oss.oracle.com/projects/tmem/dist/documentation/api/tmemspec-v001.pdf mentions: "TBD [Not sure if this is really needed.]" which is a carte blanche as any to do this! Note: The XSM check is the same - we just move it from do_tmem_op to do_sysctl. We also add an 32-bit pad to make the sysctl structure have the same exact size under 32 and 64-bit toolstacks and not worry about aligment issues. And the XLAT does not need to deal with the buf as it has been moved to another structure which is 32/64 fixed. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Acked-by: Jen Beulich <jbeulich@xxxxxxxx> --- tools/libxc/xc_tmem.c | 106 +++++++++++++------------ tools/libxl/libxl.c | 14 ++-- tools/python/xen/lowlevel/xc/xc.c | 20 +++--- tools/xenstat/libxenstat/src/xenstat.c | 4 +- xen/common/sysctl.c | 7 ++- xen/common/tmem.c | 137 ++++++++++++++++---------------- xen/include/public/sysctl.h | 44 ++++++++++ xen/include/public/tmem.h | 43 ++--------- xen/include/xen/tmem.h | 3 + xen/include/xen/tmem_xen.h | 4 - xen/xsm/flask/hooks.c | 3 + 11 files changed, 203 insertions(+), 182 deletions(-) diff --git a/tools/libxc/xc_tmem.c b/tools/libxc/xc_tmem.c index 467001b..505595b 100644 --- a/tools/libxc/xc_tmem.c +++ b/tools/libxc/xc_tmem.c @@ -47,27 +47,28 @@ static int do_tmem_op(xc_interface *xch, tmem_op_t *op) int xc_tmem_control(xc_interface *xch, int32_t pool_id, - uint32_t subop, + uint32_t cmd, uint32_t cli_id, uint32_t arg1, uint32_t arg2, void *buf) { - tmem_op_t op; + DECLARE_SYSCTL; DECLARE_HYPERCALL_BOUNCE(buf, arg1, XC_HYPERCALL_BUFFER_BOUNCE_OUT); int rc; - op.cmd = TMEM_CONTROL; - op.pool_id = pool_id; - op.u.ctrl.subop = subop; - op.u.ctrl.cli_id = cli_id; - op.u.ctrl.arg1 = arg1; - op.u.ctrl.arg2 = arg2; - op.u.ctrl.oid[0] = 0; - op.u.ctrl.oid[1] = 0; - op.u.ctrl.oid[2] = 0; - - if ( subop == TMEMC_LIST && arg1 != 0 ) + sysctl.cmd = XEN_SYSCTL_tmem_op; + sysctl.u.tmem_op.pool_id = pool_id; + sysctl.u.tmem_op.cmd = cmd; + sysctl.u.tmem_op.cli_id = cli_id; + sysctl.u.tmem_op.arg1 = arg1; + sysctl.u.tmem_op.arg2 = arg2; + sysctl.u.tmem_op.pad = 0; + sysctl.u.tmem_op.oid[0] = 0; + sysctl.u.tmem_op.oid[1] = 0; + sysctl.u.tmem_op.oid[2] = 0; + + if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 ) { if ( buf == NULL ) { @@ -81,11 +82,11 @@ int xc_tmem_control(xc_interface *xch, } } - set_xen_guest_handle(op.u.ctrl.buf, buf); + set_xen_guest_handle(sysctl.u.tmem_op.buf, buf); - rc = do_tmem_op(xch, &op); + rc = do_sysctl(xch, &sysctl); - if (subop == TMEMC_LIST && arg1 != 0) + if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 ) xc_hypercall_bounce_post(xch, buf); return rc; @@ -93,28 +94,29 @@ int xc_tmem_control(xc_interface *xch, int xc_tmem_control_oid(xc_interface *xch, int32_t pool_id, - uint32_t subop, + uint32_t cmd, uint32_t cli_id, uint32_t arg1, uint32_t arg2, struct tmem_oid oid, void *buf) { - tmem_op_t op; + DECLARE_SYSCTL; DECLARE_HYPERCALL_BOUNCE(buf, arg1, XC_HYPERCALL_BUFFER_BOUNCE_OUT); int rc; - op.cmd = TMEM_CONTROL; - op.pool_id = pool_id; - op.u.ctrl.subop = subop; - op.u.ctrl.cli_id = cli_id; - op.u.ctrl.arg1 = arg1; - op.u.ctrl.arg2 = arg2; - op.u.ctrl.oid[0] = oid.oid[0]; - op.u.ctrl.oid[1] = oid.oid[1]; - op.u.ctrl.oid[2] = oid.oid[2]; - - if ( subop == TMEMC_LIST && arg1 != 0 ) + sysctl.cmd = XEN_SYSCTL_tmem_op; + sysctl.u.tmem_op.pool_id = pool_id; + sysctl.u.tmem_op.cmd = cmd; + sysctl.u.tmem_op.cli_id = cli_id; + sysctl.u.tmem_op.arg1 = arg1; + sysctl.u.tmem_op.arg2 = arg2; + sysctl.u.tmem_op.pad = 0; + sysctl.u.tmem_op.oid[0] = oid.oid[0]; + sysctl.u.tmem_op.oid[1] = oid.oid[1]; + sysctl.u.tmem_op.oid[2] = oid.oid[2]; + + if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 ) { if ( buf == NULL ) { @@ -128,11 +130,11 @@ int xc_tmem_control_oid(xc_interface *xch, } } - set_xen_guest_handle(op.u.ctrl.buf, buf); + set_xen_guest_handle(sysctl.u.tmem_op.buf, buf); - rc = do_tmem_op(xch, &op); + rc = do_sysctl(xch, &sysctl); - if (subop == TMEMC_LIST && arg1 != 0) + if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 ) xc_hypercall_bounce_post(xch, buf); return rc; @@ -218,28 +220,28 @@ int xc_tmem_save(xc_interface *xch, uint32_t minusone = -1; struct tmem_handle *h; - if ( xc_tmem_control(xch,0,TMEMC_SAVE_BEGIN,dom,live,0,NULL) <= 0 ) + if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,dom,live,0,NULL) <= 0 ) return 0; if ( write_exact(io_fd, &marker, sizeof(marker)) ) return -1; - version = xc_tmem_control(xch,0,TMEMC_SAVE_GET_VERSION,0,0,0,NULL); + version = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,0,0,0,NULL); if ( write_exact(io_fd, &version, sizeof(version)) ) return -1; - max_pools = xc_tmem_control(xch,0,TMEMC_SAVE_GET_MAXPOOLS,0,0,0,NULL); + max_pools = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS,0,0,0,NULL); if ( write_exact(io_fd, &max_pools, sizeof(max_pools)) ) return -1; if ( version == -1 || max_pools == -1 ) return -1; if ( write_exact(io_fd, &minusone, sizeof(minusone)) ) return -1; - flags = xc_tmem_control(xch,0,TMEMC_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL); + flags = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL); if ( write_exact(io_fd, &flags, sizeof(flags)) ) return -1; - weight = xc_tmem_control(xch,0,TMEMC_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL); + weight = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL); if ( write_exact(io_fd, &weight, sizeof(weight)) ) return -1; - cap = xc_tmem_control(xch,0,TMEMC_SAVE_GET_CLIENT_CAP,dom,0,0,NULL); + cap = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP,dom,0,0,NULL); if ( write_exact(io_fd, &cap, sizeof(cap)) ) return -1; if ( flags == -1 || weight == -1 || cap == -1 ) @@ -256,14 +258,14 @@ int xc_tmem_save(xc_interface *xch, int checksum = 0; /* get pool id, flags, pagesize, n_pages, uuid */ - flags = xc_tmem_control(xch,i,TMEMC_SAVE_GET_POOL_FLAGS,dom,0,0,NULL); + flags = xc_tmem_control(xch,i,XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS,dom,0,0,NULL); if ( flags != -1 ) { pool_id = i; - n_pages = xc_tmem_control(xch,i,TMEMC_SAVE_GET_POOL_NPAGES,dom,0,0,NULL); + n_pages = xc_tmem_control(xch,i,XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES,dom,0,0,NULL); if ( !(flags & TMEM_POOL_PERSIST) ) n_pages = 0; - (void)xc_tmem_control(xch,i,TMEMC_SAVE_GET_POOL_UUID,dom,sizeof(uuid),0,&uuid); + (void)xc_tmem_control(xch,i,XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID,dom,sizeof(uuid),0,&uuid); if ( write_exact(io_fd, &pool_id, sizeof(pool_id)) ) return -1; if ( write_exact(io_fd, &flags, sizeof(flags)) ) @@ -287,7 +289,7 @@ int xc_tmem_save(xc_interface *xch, { int ret; if ( (ret = xc_tmem_control(xch, pool_id, - TMEMC_SAVE_GET_NEXT_PAGE, dom, + XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE, dom, bufsize, 0, buf)) > 0 ) { h = (struct tmem_handle *)buf; @@ -332,7 +334,7 @@ int xc_tmem_save_extra(xc_interface *xch, int dom, int io_fd, int field_marker) if ( write_exact(io_fd, &marker, sizeof(marker)) ) return -1; - while ( xc_tmem_control(xch, 0, TMEMC_SAVE_GET_NEXT_INV, dom, + while ( xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV, dom, sizeof(handle),0,&handle) > 0 ) { if ( write_exact(io_fd, &handle.pool_id, sizeof(handle.pool_id)) ) return -1; @@ -355,7 +357,7 @@ int xc_tmem_save_extra(xc_interface *xch, int dom, int io_fd, int field_marker) /* only called for live migration */ void xc_tmem_save_done(xc_interface *xch, int dom) { - xc_tmem_control(xch,0,TMEMC_SAVE_END,dom,0,0,NULL); + xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_END,dom,0,0,NULL); } /* restore routines */ @@ -389,7 +391,7 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd) uint32_t weight, cap, flags; int checksum = 0; - save_version = xc_tmem_control(xch,0,TMEMC_SAVE_GET_VERSION,dom,0,0,NULL); + save_version = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,dom,0,0,NULL); if ( save_version == -1 ) return -1; /* domain doesn't exist */ if ( read_exact(io_fd, &this_version, sizeof(this_version)) ) @@ -401,23 +403,23 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd) return -1; if ( minusone != -1 ) return -1; - if ( xc_tmem_control(xch,0,TMEMC_RESTORE_BEGIN,dom,0,0,NULL) < 0 ) + if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN,dom,0,0,NULL) < 0 ) return -1; if ( read_exact(io_fd, &flags, sizeof(flags)) ) return -1; if ( flags & TMEM_CLIENT_COMPRESS ) - if ( xc_tmem_control(xch,0,TMEMC_SET_COMPRESS,dom,1,0,NULL) < 0 ) + if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_COMPRESS,dom,1,0,NULL) < 0 ) return -1; if ( flags & TMEM_CLIENT_FROZEN ) - if ( xc_tmem_control(xch,0,TMEMC_FREEZE,dom,0,0,NULL) < 0 ) + if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_FREEZE,dom,0,0,NULL) < 0 ) return -1; if ( read_exact(io_fd, &weight, sizeof(weight)) ) return -1; - if ( xc_tmem_control(xch,0,TMEMC_SET_WEIGHT,dom,0,0,NULL) < 0 ) + if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_WEIGHT,dom,0,0,NULL) < 0 ) return -1; if ( read_exact(io_fd, &cap, sizeof(cap)) ) return -1; - if ( xc_tmem_control(xch,0,TMEMC_SET_CAP,dom,0,0,NULL) < 0 ) + if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_CAP,dom,0,0,NULL) < 0 ) return -1; if ( read_exact(io_fd, &minusone, sizeof(minusone)) ) return -1; @@ -464,7 +466,7 @@ int xc_tmem_restore(xc_interface *xch, int dom, int io_fd) return -1; checksum += *buf; if ( (rc = xc_tmem_control_oid(xch, pool_id, - TMEMC_RESTORE_PUT_PAGE, dom, + XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE, dom, bufsize, index, oid, buf)) <= 0 ) { DPRINTF("xc_tmem_restore: putting page failed, rc=%d\n",rc); @@ -496,7 +498,7 @@ int xc_tmem_restore_extra(xc_interface *xch, int dom, int io_fd) return -1; if ( read_exact(io_fd, &index, sizeof(index)) ) return -1; - if ( xc_tmem_control_oid(xch, pool_id, TMEMC_RESTORE_FLUSH_PAGE, dom, + if ( xc_tmem_control_oid(xch, pool_id, XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE, dom, 0,index,oid,NULL) <= 0 ) return -1; count++; diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c index e564c22..10d1909 100644 --- a/tools/libxl/libxl.c +++ b/tools/libxl/libxl.c @@ -6045,7 +6045,7 @@ char *libxl_tmem_list(libxl_ctx *ctx, uint32_t domid, int use_long) int rc; char _buf[32768]; - rc = xc_tmem_control(ctx->xch, -1, TMEMC_LIST, domid, 32768, use_long, + rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_LIST, domid, 32768, use_long, _buf); if (rc < 0) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, @@ -6060,7 +6060,7 @@ int libxl_tmem_freeze(libxl_ctx *ctx, uint32_t domid) { int rc; - rc = xc_tmem_control(ctx->xch, -1, TMEMC_FREEZE, domid, 0, 0, + rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_FREEZE, domid, 0, 0, NULL); if (rc < 0) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, @@ -6075,7 +6075,7 @@ int libxl_tmem_thaw(libxl_ctx *ctx, uint32_t domid) { int rc; - rc = xc_tmem_control(ctx->xch, -1, TMEMC_THAW, domid, 0, 0, + rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_THAW, domid, 0, 0, NULL); if (rc < 0) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, @@ -6089,11 +6089,11 @@ int libxl_tmem_thaw(libxl_ctx *ctx, uint32_t domid) static int32_t tmem_setop_from_string(char *set_name) { if (!strcmp(set_name, "weight")) - return TMEMC_SET_WEIGHT; + return XEN_SYSCTL_TMEM_OP_SET_WEIGHT; else if (!strcmp(set_name, "cap")) - return TMEMC_SET_CAP; + return XEN_SYSCTL_TMEM_OP_SET_CAP; else if (!strcmp(set_name, "compress")) - return TMEMC_SET_COMPRESS; + return XEN_SYSCTL_TMEM_OP_SET_COMPRESS; else return -1; } @@ -6137,7 +6137,7 @@ int libxl_tmem_freeable(libxl_ctx *ctx) { int rc; - rc = xc_tmem_control(ctx->xch, -1, TMEMC_QUERY_FREEABLE_MB, -1, 0, 0, 0); + rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB, -1, 0, 0, 0); if (rc < 0) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Can not get tmem freeable memory"); diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c index 024014f..9ab53fb 100644 --- a/tools/python/xen/lowlevel/xc/xc.c +++ b/tools/python/xen/lowlevel/xc/xc.c @@ -1809,25 +1809,25 @@ static PyObject *pyxc_tmem_control(XcObject *self, &pool_id, &subop, &cli_id, &arg1, &arg2, &buf) ) return NULL; - if ( (subop == TMEMC_LIST) && (arg1 > 32768) ) + if ( (subop == XEN_SYSCTL_TMEM_OP_LIST) && (arg1 > 32768) ) arg1 = 32768; if ( (rc = xc_tmem_control(self->xc_handle, pool_id, subop, cli_id, arg1, arg2, buffer)) < 0 ) return Py_BuildValue("i", rc); switch (subop) { - case TMEMC_LIST: + case XEN_SYSCTL_TMEM_OP_LIST: return Py_BuildValue("s", buffer); - case TMEMC_FLUSH: + case XEN_SYSCTL_TMEM_OP_FLUSH: return Py_BuildValue("i", rc); - case TMEMC_QUERY_FREEABLE_MB: + case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB: return Py_BuildValue("i", rc); - case TMEMC_THAW: - case TMEMC_FREEZE: - case TMEMC_DESTROY: - case TMEMC_SET_WEIGHT: - case TMEMC_SET_CAP: - case TMEMC_SET_COMPRESS: + case XEN_SYSCTL_TMEM_OP_THAW: + case XEN_SYSCTL_TMEM_OP_FREEZE: + case XEN_SYSCTL_TMEM_OP_DESTROY: + case XEN_SYSCTL_TMEM_OP_SET_WEIGHT: + case XEN_SYSCTL_TMEM_OP_SET_CAP: + case XEN_SYSCTL_TMEM_OP_SET_COMPRESS: default: break; } diff --git a/tools/xenstat/libxenstat/src/xenstat.c b/tools/xenstat/libxenstat/src/xenstat.c index 24b57d0..3495f3f 100644 --- a/tools/xenstat/libxenstat/src/xenstat.c +++ b/tools/xenstat/libxenstat/src/xenstat.c @@ -149,7 +149,7 @@ void domain_get_tmem_stats(xenstat_handle * handle, xenstat_domain * domain) { char buffer[4096]; - if (xc_tmem_control(handle->xc_handle,-1,TMEMC_LIST,domain->id, + if (xc_tmem_control(handle->xc_handle,-1,XEN_SYSCTL_TMEM_OP_LIST,domain->id, sizeof(buffer),-1,buffer) < 0) return; domain->tmem_stats.curr_eph_pages = parse(buffer,"Ec"); @@ -191,7 +191,7 @@ xenstat_node *xenstat_get_node(xenstat_handle * handle, unsigned int flags) * handle->page_size; rc = xc_tmem_control(handle->xc_handle, -1, - TMEMC_QUERY_FREEABLE_MB, -1, 0, 0, NULL); + XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB, -1, 0, 0, NULL); node->freeable_mb = (rc < 0) ? 0 : rc; /* malloc(0) is not portable, so allocate a single domain. This will * be resized below. */ diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c index f1c0c76..85e853f 100644 --- a/xen/common/sysctl.c +++ b/xen/common/sysctl.c @@ -14,6 +14,7 @@ #include <xen/domain.h> #include <xen/event.h> #include <xen/domain_page.h> +#include <xen/tmem.h> #include <xen/trace.h> #include <xen/console.h> #include <xen/iocap.h> @@ -68,7 +69,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) case XEN_SYSCTL_tbuf_op: ret = tb_control(&op->u.tbuf_op); break; - + case XEN_SYSCTL_sched_id: op->u.sched_id.sched_id = sched_id(); break; @@ -455,6 +456,10 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) } #endif + case XEN_SYSCTL_tmem_op: + ret = tmem_control(&op->u.tmem_op); + break; + default: ret = arch_do_sysctl(op, u_sysctl); copyback = 0; diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 6ea602b..c73add5 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -18,6 +18,7 @@ #include <xen/tmem_xen.h> /* host-specific (eg Xen) code goes here */ #endif +#include <public/sysctl.h> #include <xen/tmem.h> #include <xen/rbtree.h> #include <xen/radix-tree.h> @@ -2012,8 +2013,8 @@ fail: static int tmemc_freeze_pools(domid_t cli_id, int arg) { struct client *client; - bool_t freeze = (arg == TMEMC_FREEZE) ? 1 : 0; - bool_t destroy = (arg == TMEMC_DESTROY) ? 1 : 0; + bool_t freeze = (arg == XEN_SYSCTL_TMEM_OP_FREEZE) ? 1 : 0; + bool_t destroy = (arg == XEN_SYSCTL_TMEM_OP_DESTROY) ? 1 : 0; char *s; s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" ); @@ -2230,7 +2231,7 @@ static int __tmemc_set_var(struct client *client, uint32_t subop, uint32_t arg1) switch (subop) { - case TMEMC_SET_WEIGHT: + case XEN_SYSCTL_TMEM_OP_SET_WEIGHT: old_weight = client->weight; client->weight = arg1; tmem_client_info("tmem: weight set to %d for %s=%d\n", @@ -2238,12 +2239,12 @@ static int __tmemc_set_var(struct client *client, uint32_t subop, uint32_t arg1) atomic_sub(old_weight,&client_weight_total); atomic_add(client->weight,&client_weight_total); break; - case TMEMC_SET_CAP: + case XEN_SYSCTL_TMEM_OP_SET_CAP: client->cap = arg1; tmem_client_info("tmem: cap set to %d for %s=%d\n", arg1, tmem_cli_id_str, cli_id); break; - case TMEMC_SET_COMPRESS: + case XEN_SYSCTL_TMEM_OP_SET_COMPRESS: if ( tmem_dedup_enabled() ) { tmem_client_warn("tmem: compression %s for all %ss, cannot be changed when tmem_dedup is enabled\n", @@ -2346,7 +2347,7 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id, switch(subop) { - case TMEMC_SAVE_BEGIN: + case XEN_SYSCTL_TMEM_OP_SAVE_BEGIN: if ( client == NULL ) return 0; for (p = 0; p < MAX_POOLS_PER_DOMAIN; p++) @@ -2363,33 +2364,33 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id, client->live_migrating = 1; rc = 1; break; - case TMEMC_RESTORE_BEGIN: + case XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN: if ( client == NULL && (client = client_create(cli_id)) != NULL ) return 1; break; - case TMEMC_SAVE_GET_VERSION: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION: rc = TMEM_SPEC_VERSION; break; - case TMEMC_SAVE_GET_MAXPOOLS: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS: rc = MAX_POOLS_PER_DOMAIN; break; - case TMEMC_SAVE_GET_CLIENT_WEIGHT: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT: if ( client == NULL ) break; rc = client->weight == -1 ? -2 : client->weight; break; - case TMEMC_SAVE_GET_CLIENT_CAP: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP: if ( client == NULL ) break; rc = client->cap == -1 ? -2 : client->cap; break; - case TMEMC_SAVE_GET_CLIENT_FLAGS: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS: if ( client == NULL ) break; rc = (client->compress ? TMEM_CLIENT_COMPRESS : 0 ) | (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 ); break; - case TMEMC_SAVE_GET_POOL_FLAGS: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS: if ( pool == NULL ) break; rc = (pool->persistent ? TMEM_POOL_PERSIST : 0) | @@ -2397,19 +2398,19 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id, (POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) | (TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT); break; - case TMEMC_SAVE_GET_POOL_NPAGES: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES: if ( pool == NULL ) break; rc = _atomic_read(pool->pgp_count); break; - case TMEMC_SAVE_GET_POOL_UUID: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID: if ( pool == NULL ) break; rc = 0; if ( copy_to_guest(guest_handle_cast(buf, void), pool->uuid, 2) ) rc = -EFAULT; break; - case TMEMC_SAVE_END: + case XEN_SYSCTL_TMEM_OP_SAVE_END: if ( client == NULL ) break; client->live_migrating = 0; @@ -2553,77 +2554,75 @@ static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid *oi return do_tmem_flush_page(pool,oidp,index); } -static int do_tmem_control(struct tmem_op *op) +int tmem_control(struct xen_sysctl_tmem_op *op) { int ret; uint32_t pool_id = op->pool_id; - uint32_t subop = op->u.ctrl.subop; - struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]); + uint32_t cmd = op->cmd; + struct oid *oidp = (struct oid *)(&op->oid[0]); - if ( xsm_tmem_control(XSM_PRIV) ) - return -EPERM; + if ( op->pad != 0 ) + return -EINVAL; - switch(subop) + write_lock(&tmem_rwlock); + + switch (cmd) { - case TMEMC_THAW: - case TMEMC_FREEZE: - case TMEMC_DESTROY: - ret = tmemc_freeze_pools(op->u.ctrl.cli_id,subop); + case XEN_SYSCTL_TMEM_OP_THAW: + case XEN_SYSCTL_TMEM_OP_FREEZE: + case XEN_SYSCTL_TMEM_OP_DESTROY: + ret = tmemc_freeze_pools(op->cli_id, cmd); break; - case TMEMC_FLUSH: - ret = tmemc_flush_mem(op->u.ctrl.cli_id,op->u.ctrl.arg1); + case XEN_SYSCTL_TMEM_OP_FLUSH: + ret = tmemc_flush_mem(op->cli_id,op->arg1); break; - case TMEMC_LIST: - ret = tmemc_list(op->u.ctrl.cli_id, - guest_handle_cast(op->u.ctrl.buf, char), - op->u.ctrl.arg1,op->u.ctrl.arg2); + case XEN_SYSCTL_TMEM_OP_LIST: + ret = tmemc_list(op->cli_id, + guest_handle_cast(op->buf, char), op->arg1, op->arg2); break; - case TMEMC_SET_WEIGHT: - case TMEMC_SET_CAP: - case TMEMC_SET_COMPRESS: - ret = tmemc_set_var(op->u.ctrl.cli_id,subop,op->u.ctrl.arg1); + case XEN_SYSCTL_TMEM_OP_SET_WEIGHT: + case XEN_SYSCTL_TMEM_OP_SET_CAP: + case XEN_SYSCTL_TMEM_OP_SET_COMPRESS: + ret = tmemc_set_var(op->cli_id, cmd, op->arg1); break; - case TMEMC_QUERY_FREEABLE_MB: + case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB: ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT); break; - case TMEMC_SAVE_BEGIN: - case TMEMC_RESTORE_BEGIN: - case TMEMC_SAVE_GET_VERSION: - case TMEMC_SAVE_GET_MAXPOOLS: - case TMEMC_SAVE_GET_CLIENT_WEIGHT: - case TMEMC_SAVE_GET_CLIENT_CAP: - case TMEMC_SAVE_GET_CLIENT_FLAGS: - case TMEMC_SAVE_GET_POOL_FLAGS: - case TMEMC_SAVE_GET_POOL_NPAGES: - case TMEMC_SAVE_GET_POOL_UUID: - case TMEMC_SAVE_END: - ret = tmemc_save_subop(op->u.ctrl.cli_id,pool_id,subop, - guest_handle_cast(op->u.ctrl.buf, char), - op->u.ctrl.arg1); + case XEN_SYSCTL_TMEM_OP_SAVE_BEGIN: + case XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES: + case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID: + case XEN_SYSCTL_TMEM_OP_SAVE_END: + ret = tmemc_save_subop(op->cli_id, pool_id, cmd, + guest_handle_cast(op->buf, char), op->arg1); break; - case TMEMC_SAVE_GET_NEXT_PAGE: - ret = tmemc_save_get_next_page(op->u.ctrl.cli_id, pool_id, - guest_handle_cast(op->u.ctrl.buf, char), - op->u.ctrl.arg1); + case XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE: + ret = tmemc_save_get_next_page(op->cli_id, pool_id, + guest_handle_cast(op->buf, char), op->arg1); break; - case TMEMC_SAVE_GET_NEXT_INV: - ret = tmemc_save_get_next_inv(op->u.ctrl.cli_id, - guest_handle_cast(op->u.ctrl.buf, char), - op->u.ctrl.arg1); + case XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV: + ret = tmemc_save_get_next_inv(op->cli_id, + guest_handle_cast(op->buf, char), op->arg1); break; - case TMEMC_RESTORE_PUT_PAGE: - ret = tmemc_restore_put_page(op->u.ctrl.cli_id,pool_id, - oidp, op->u.ctrl.arg2, - guest_handle_cast(op->u.ctrl.buf, char), - op->u.ctrl.arg1); + case XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE: + ret = tmemc_restore_put_page(op->cli_id, pool_id, oidp, op->arg2, + guest_handle_cast(op->buf, char), op->arg1); break; - case TMEMC_RESTORE_FLUSH_PAGE: - ret = tmemc_restore_flush_page(op->u.ctrl.cli_id,pool_id, - oidp, op->u.ctrl.arg2); + case XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE: + ret = tmemc_restore_flush_page(op->cli_id, pool_id, oidp, op->arg2); break; default: ret = -1; } + + write_unlock(&tmem_rwlock); + return ret; } @@ -2666,7 +2665,7 @@ long do_tmem_op(tmem_cli_op_t uops) if ( op.cmd == TMEM_CONTROL ) { - rc = do_tmem_control(&op); + rc = -EOPNOTSUPP; } else if ( op.cmd == TMEM_AUTH ) { @@ -2786,7 +2785,7 @@ void tmem_destroy(void *v) write_unlock(&tmem_rwlock); } -#define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */ +#define MAX_EVICTS 10 /* should be variable or set via XEN_SYSCTL_TMEM_OP_ ?? */ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) { struct page_info *pfp; diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h index 58c9be2..fc4709a 100644 --- a/xen/include/public/sysctl.h +++ b/xen/include/public/sysctl.h @@ -710,6 +710,48 @@ struct xen_sysctl_psr_cat_op { typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t); +#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU + +#define XEN_SYSCTL_TMEM_OP_THAW 0 +#define XEN_SYSCTL_TMEM_OP_FREEZE 1 +#define XEN_SYSCTL_TMEM_OP_FLUSH 2 +#define XEN_SYSCTL_TMEM_OP_DESTROY 3 +#define XEN_SYSCTL_TMEM_OP_LIST 4 +#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5 +#define XEN_SYSCTL_TMEM_OP_SET_CAP 6 +#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7 +#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8 +#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP 14 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19 +#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20 +#define XEN_SYSCTL_TMEM_OP_SAVE_END 21 +#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN 30 +#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE 32 +#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE 33 + +struct xen_sysctl_tmem_op { + uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */ + int32_t pool_id; /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/ + uint32_t cli_id; /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB + for all others can be the domain id or + XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */ + uint32_t arg1; /* IN: If not applicable to command use 0. */ + uint32_t arg2; /* IN: If not applicable to command use 0. */ + uint32_t pad; /* Padding so structure is the same under 32 and 64. */ + uint64_t oid[3]; /* IN: If not applicable to command use 0s. */ + XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */ +}; +typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t); + struct xen_sysctl { uint32_t cmd; #define XEN_SYSCTL_readconsole 1 @@ -734,6 +776,7 @@ struct xen_sysctl { #define XEN_SYSCTL_psr_cmt_op 21 #define XEN_SYSCTL_pcitopoinfo 22 #define XEN_SYSCTL_psr_cat_op 23 +#define XEN_SYSCTL_tmem_op 24 uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; @@ -758,6 +801,7 @@ struct xen_sysctl { struct xen_sysctl_coverage_op coverage_op; struct xen_sysctl_psr_cmt_op psr_cmt_op; struct xen_sysctl_psr_cat_op psr_cat_op; + struct xen_sysctl_tmem_op tmem_op; uint8_t pad[128]; } u; }; diff --git a/xen/include/public/tmem.h b/xen/include/public/tmem.h index 4fd2fc6..e4ee704 100644 --- a/xen/include/public/tmem.h +++ b/xen/include/public/tmem.h @@ -33,7 +33,11 @@ #define TMEM_SPEC_VERSION 1 /* Commands to HYPERVISOR_tmem_op() */ -#define TMEM_CONTROL 0 +#ifdef __XEN__ +#define TMEM_CONTROL 0 /* Now called XEN_SYSCTL_tmem_op */ +#else +#undef TMEM_CONTROL +#endif #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_PUT_PAGE 4 @@ -48,35 +52,9 @@ #endif /* Privileged commands to HYPERVISOR_tmem_op() */ -#define TMEM_AUTH 101 +#define TMEM_AUTH 101 #define TMEM_RESTORE_NEW 102 -/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ -#define TMEMC_THAW 0 -#define TMEMC_FREEZE 1 -#define TMEMC_FLUSH 2 -#define TMEMC_DESTROY 3 -#define TMEMC_LIST 4 -#define TMEMC_SET_WEIGHT 5 -#define TMEMC_SET_CAP 6 -#define TMEMC_SET_COMPRESS 7 -#define TMEMC_QUERY_FREEABLE_MB 8 -#define TMEMC_SAVE_BEGIN 10 -#define TMEMC_SAVE_GET_VERSION 11 -#define TMEMC_SAVE_GET_MAXPOOLS 12 -#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13 -#define TMEMC_SAVE_GET_CLIENT_CAP 14 -#define TMEMC_SAVE_GET_CLIENT_FLAGS 15 -#define TMEMC_SAVE_GET_POOL_FLAGS 16 -#define TMEMC_SAVE_GET_POOL_NPAGES 17 -#define TMEMC_SAVE_GET_POOL_UUID 18 -#define TMEMC_SAVE_GET_NEXT_PAGE 19 -#define TMEMC_SAVE_GET_NEXT_INV 20 -#define TMEMC_SAVE_END 21 -#define TMEMC_RESTORE_BEGIN 30 -#define TMEMC_RESTORE_PUT_PAGE 32 -#define TMEMC_RESTORE_FLUSH_PAGE 33 - /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 @@ -110,16 +88,7 @@ struct tmem_op { uint32_t flags; uint32_t arg1; } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */ - struct { - uint32_t subop; - uint32_t cli_id; - uint32_t arg1; - uint32_t arg2; - uint64_t oid[3]; - tmem_cli_va_t buf; - } ctrl; /* for cmd == TMEM_CONTROL */ struct { - uint64_t oid[3]; uint32_t index; uint32_t tmem_offset; diff --git a/xen/include/xen/tmem.h b/xen/include/xen/tmem.h index 5dbf9d5..32a542a 100644 --- a/xen/include/xen/tmem.h +++ b/xen/include/xen/tmem.h @@ -9,6 +9,9 @@ #ifndef __XEN_TMEM_H__ #define __XEN_TMEM_H__ +struct xen_sysctl_tmem_op; + +extern int tmem_control(struct xen_sysctl_tmem_op *op); extern void tmem_destroy(void *); extern void *tmem_relinquish_pages(unsigned int, unsigned int); extern unsigned long tmem_freeable_pages(void); diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index c0d6831..0fdbf68 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -297,15 +297,11 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops) switch ( cop.cmd ) { case TMEM_NEW_POOL: u = XLAT_tmem_op_u_creat; break; - case TMEM_CONTROL: u = XLAT_tmem_op_u_ctrl; break; case TMEM_AUTH: u = XLAT_tmem_op_u_creat; break; case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_creat; break; default: u = XLAT_tmem_op_u_gen ; break; } -#define XLAT_tmem_op_HNDL_u_ctrl_buf(_d_, _s_) \ - guest_from_compat_handle((_d_)->u.ctrl.buf, (_s_)->u.ctrl.buf) XLAT_tmem_op(op, &cop); -#undef XLAT_tmem_op_HNDL_u_ctrl_buf return 0; } #endif diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index 7a4522e..666770a 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -801,6 +801,9 @@ static int flask_sysctl(int cmd) return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN2, XEN2__PSR_CAT_OP, NULL); + case XEN_SYSCTL_tmem_op: + return domain_has_xen(current->domain, XEN__TMEM_CONTROL); + default: printk("flask_sysctl: Unknown op %d\n", cmd); return -EPERM; -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |