|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1 of 5] libxl: validate scheduler parameters
On Fri, 2012-06-22 at 11:56 +0100, Ian Campbell wrote:
> # HG changeset patch
> # User Ian Campbell <ian.campbell@xxxxxxxxxx>
> # Date 1340361703 -3600
> # Node ID 998d48ccb8905907cb2f104b475e5ab6ad445348
> # Parent 5fb3c536b5a8810fb8be4149df609d272beff959
> libxl: validate scheduler parameters
>
> This was previously done by xl itself however the domain was not
> created at that point so there was no domid to check. This happened to
> work on first boot because xl's global domid was initialised to zero
> so we would (incorrectly) validate the new domain to be against
> domain0. On reboot though we would try to use the old domain's id and
> fail.
>
> sched_params_valid is moved and gains a gc+domid parameters and
> s/ctx/CTX/. The call is placed after
> libxl__domain_build_info_setdefault in the create path, because
> set_defaults doesn't have access to the domid and there are other
> callers which don't even have a domid to give it.
>
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
After consultation with Ian J and Dario I have committed this one in
order to get a test pass ASAP. I'll leave the remainder of this series
to get properly reviewed.
Ian.
>
> diff -r 5fb3c536b5a8 -r 998d48ccb890 tools/libxl/libxl_create.c
> --- a/tools/libxl/libxl_create.c Fri Jun 22 10:14:46 2012 +0100
> +++ b/tools/libxl/libxl_create.c Fri Jun 22 11:41:43 2012 +0100
> @@ -72,6 +72,49 @@ int libxl__domain_create_info_setdefault
> return 0;
> }
>
> +static int sched_params_valid(libxl__gc *gc,
> + uint32_t domid, libxl_domain_sched_params *scp)
> +{
> + int has_weight = scp->weight != LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT;
> + int has_period = scp->period != LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT;
> + int has_slice = scp->slice != LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT;
> + int has_extratime =
> + scp->extratime != LIBXL_DOMAIN_SCHED_PARAM_EXTRATIME_DEFAULT;
> + libxl_domain_sched_params sci;
> +
> + libxl_domain_sched_params_get(CTX, domid, &sci);
> +
> + /* The sedf scheduler needs some more consistency checking */
> + if (sci.sched == LIBXL_SCHEDULER_SEDF) {
> + if (has_weight && (has_period || has_slice))
> + return 0;
> + if (has_period != has_slice)
> + return 0;
> +
> + /*
> + * Idea is, if we specify a weight, then both period and
> + * slice has to be zero. OTOH, if we do not specify a weight,
> + * that means we want a pure best effort domain or an actual
> + * real-time one. In the former case, it is period that needs
> + * to be zero, in the latter, weight should be.
> + */
> + if (has_weight) {
> + scp->slice = 0;
> + scp->period = 0;
> + }
> + else if (!has_period) {
> + /* We can setup a proper best effort domain (extra time only)
> + * iff we either already have or are asking for some extra time.
> */
> + scp->weight = has_extratime ? scp->extratime : sci.extratime;
> + scp->period = 0;
> + }
> + if (has_period && has_slice)
> + scp->weight = 0;
> + }
> +
> + return 1;
> +}
> +
> int libxl__domain_build_info_setdefault(libxl__gc *gc,
> libxl_domain_build_info *b_info)
> {
> @@ -622,6 +665,12 @@ static void initiate_domain_create(libxl
> ret = libxl__domain_build_info_setdefault(gc, &d_config->b_info);
> if (ret) goto error_out;
>
> + if (!sched_params_valid(gc, domid, &d_config->b_info.sched_params)) {
> + LOG(ERROR, "Invalid scheduling parameters\n");
> + ret = ERROR_INVAL;
> + goto error_out;
> + }
> +
> for (i = 0; i < d_config->num_disks; i++) {
> ret = libxl__device_disk_setdefault(gc, &d_config->disks[i]);
> if (ret) goto error_out;
> diff -r 5fb3c536b5a8 -r 998d48ccb890 tools/libxl/xl_cmdimpl.c
> --- a/tools/libxl/xl_cmdimpl.c Fri Jun 22 10:14:46 2012 +0100
> +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 22 11:41:43 2012 +0100
> @@ -550,48 +550,6 @@ vcpp_out:
> return rc;
> }
>
> -static int sched_params_valid(libxl_domain_sched_params *scp)
> -{
> - int has_weight = scp->weight != LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT;
> - int has_period = scp->period != LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT;
> - int has_slice = scp->slice != LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT;
> - int has_extratime =
> - scp->extratime != LIBXL_DOMAIN_SCHED_PARAM_EXTRATIME_DEFAULT;
> - libxl_domain_sched_params sci;
> -
> - libxl_domain_sched_params_get(ctx, domid, &sci);
> -
> - /* The sedf scheduler needs some more consistency checking */
> - if (sci.sched == LIBXL_SCHEDULER_SEDF) {
> - if (has_weight && (has_period || has_slice))
> - return 0;
> - if (has_period != has_slice)
> - return 0;
> -
> - /*
> - * Idea is, if we specify a weight, then both period and
> - * slice has to be zero. OTOH, if we do not specify a weight,
> - * that means we want a pure best effort domain or an actual
> - * real-time one. In the former case, it is period that needs
> - * to be zero, in the latter, weight should be.
> - */
> - if (has_weight) {
> - scp->slice = 0;
> - scp->period = 0;
> - }
> - else if (!has_period) {
> - /* We can setup a proper best effort domain (extra time only)
> - * iff we either already have or are asking for some extra time.
> */
> - scp->weight = has_extratime ? scp->extratime : sci.extratime;
> - scp->period = 0;
> - }
> - if (has_period && has_slice)
> - scp->weight = 0;
> - }
> -
> - return 1;
> -}
> -
> static void parse_config_data(const char *config_source,
> const char *config_data,
> int config_len,
> @@ -686,10 +644,6 @@ static void parse_config_data(const char
> b_info->sched_params.latency = l;
> if (!xlu_cfg_get_long (config, "extratime", &l, 0))
> b_info->sched_params.extratime = l;
> - if (!sched_params_valid(&b_info->sched_params)) {
> - fprintf(stderr, "Invalid scheduling parameters\n");
> - exit(1);
> - }
>
> if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
> b_info->max_vcpus = l;
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |