[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxl: set domain scheduling parameters while creating the domU


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Mon, 14 May 2012 16:32:03 +0000
  • Delivery-date: Mon, 14 May 2012 16:32:09 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Dieter Bloms <dieter@xxxxxxxx>
# Date 1335350306 -3600
# Node ID e428eae1838c0c384a6f311750e3024a257a3793
# Parent  b3caa36ed7e7f33fbb5acf20c0cc709e0bb891ca
libxl: set domain scheduling parameters while creating the domU

the domain specific scheduling parameters like cpu_weight, cap, slice, ...
will be set during creating the domain, so this parameters can be defined
in the domain config file

[ Improved the documentation wording slightly. -iwj ]

Signed-off-by: Dieter Bloms <dieter@xxxxxxxx>
Acked-by: Ian Campbell <Ian.Campbell@xxxxxxxxxx>
Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Committed-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---


diff -r b3caa36ed7e7 -r e428eae1838c docs/man/xl.cfg.pod.5
--- a/docs/man/xl.cfg.pod.5     Wed Apr 25 11:35:42 2012 +0100
+++ b/docs/man/xl.cfg.pod.5     Wed Apr 25 11:38:26 2012 +0100
@@ -112,6 +112,44 @@ List of which cpus the guest is allowed 
 (all vcpus will run on cpus 0,2,3,5), or `cpus=["2", "3"]` (all vcpus
 will run on cpus 2 and 3).
 
+=item B<cpu_weight=WEIGHT>
+
+A domain with a weight of 512 will get twice as much CPU as a domain
+with a weight of 256 on a contended host.
+Legal weights range from 1 to 65535 and the default is 256.
+Honoured by the credit, credit2 and sedf schedulers.
+
+=item B<cap=N>
+
+The cap optionally fixes the maximum amount of CPU a domain will be
+able to consume, even if the host system has idle CPU cycles.
+The cap is expressed in percentage of one physical CPU:
+100 is 1 physical CPU, 50 is half a CPU, 400 is 4 CPUs, etc.
+The default, 0, means there is no upper cap.
+Honoured by the credit and credit2 schedulers.
+
+=item B<period=NANOSECONDS>
+
+The normal EDF scheduling usage in nanoseconds. This means every period
+the domain gets cpu time defined in slice.
+Honoured by the sedf scheduler.
+
+=item B<slice=NANOSECONDS>
+
+The normal EDF scheduling usage in nanoseconds. it defines the time 
+a domain get every period time.
+Honoured by the sedf scheduler.
+
+=item B<latency=N>
+
+Scaled period if domain is doing heavy I/O.
+Honoured by the sedf scheduler.
+
+=item B<extratime=BOOLEAN>
+
+Flag for allowing domain to run in extra time.
+Honoured by the sedf scheduler.
+
 =item B<memory=MBYTES>
 
 Start the guest with MBYTES megabytes of RAM.
diff -r b3caa36ed7e7 -r e428eae1838c tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c   Wed Apr 25 11:35:42 2012 +0100
+++ b/tools/libxl/libxl_dom.c   Wed Apr 25 11:38:26 2012 +0100
@@ -42,6 +42,40 @@ libxl_domain_type libxl__domain_type(lib
         return LIBXL_DOMAIN_TYPE_PV;
 }
 
+int libxl__sched_set_params(libxl__gc *gc, uint32_t domid, libxl_sched_params 
*scparams)
+{
+    libxl_ctx *ctx = libxl__gc_owner(gc);
+    libxl_scheduler sched;
+    libxl_sched_sedf_domain sedf_info;
+    libxl_sched_credit_domain credit_info;
+    libxl_sched_credit2_domain credit2_info;
+    int ret;
+
+    sched = libxl_get_scheduler (ctx);
+    switch (sched) {
+    case LIBXL_SCHEDULER_SEDF:
+      sedf_info.period = scparams->period;
+      sedf_info.slice = scparams->slice;
+      sedf_info.latency = scparams->latency;
+      sedf_info.extratime = scparams->extratime;
+      sedf_info.weight = scparams->weight;
+      ret=libxl_sched_sedf_domain_set(ctx, domid, &sedf_info);
+      break;
+    case LIBXL_SCHEDULER_CREDIT:
+      credit_info.weight = scparams->weight;
+      credit_info.cap = scparams->cap;
+      ret=libxl_sched_credit_domain_set(ctx, domid, &credit_info);
+      break;
+    case LIBXL_SCHEDULER_CREDIT2:
+      credit2_info.weight = scparams->weight;
+      ret=libxl_sched_credit2_domain_set(ctx, domid, &credit2_info);
+      break;
+    default:
+      ret=-1;
+    }
+    return ret;
+}
+
 int libxl__domain_shutdown_reason(libxl__gc *gc, uint32_t domid)
 {
     libxl_ctx *ctx = libxl__gc_owner(gc);
@@ -139,6 +173,8 @@ int libxl__build_post(libxl__gc *gc, uin
     char **ents, **hvm_ents;
     int i;
 
+    libxl__sched_set_params (gc, domid, &(info->sched_params));
+
     libxl_cpuid_apply_policy(ctx, domid);
     if (info->cpuid != NULL)
         libxl_cpuid_set(ctx, domid, info->cpuid);
diff -r b3caa36ed7e7 -r e428eae1838c tools/libxl/libxl_internal.h
--- a/tools/libxl/libxl_internal.h      Wed Apr 25 11:35:42 2012 +0100
+++ b/tools/libxl/libxl_internal.h      Wed Apr 25 11:38:26 2012 +0100
@@ -618,6 +618,7 @@ _hidden int libxl__atfork_init(libxl_ctx
 /* from xl_dom */
 _hidden libxl_domain_type libxl__domain_type(libxl__gc *gc, uint32_t domid);
 _hidden int libxl__domain_shutdown_reason(libxl__gc *gc, uint32_t domid);
+_hidden int libxl__sched_set_params(libxl__gc *gc, uint32_t domid, 
libxl_sched_params *scparams);
 #define LIBXL__DOMAIN_IS_TYPE(gc, domid, type) \
     libxl__domain_type((gc), (domid)) == LIBXL_DOMAIN_TYPE_##type
 typedef struct {
diff -r b3caa36ed7e7 -r e428eae1838c tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl       Wed Apr 25 11:35:42 2012 +0100
+++ b/tools/libxl/libxl_types.idl       Wed Apr 25 11:38:26 2012 +0100
@@ -224,6 +224,17 @@ libxl_domain_create_info = Struct("domai
 
 MemKB = UInt(64, init_val = "LIBXL_MEMKB_DEFAULT")
 
+libxl_sched_params = Struct("sched_params",[
+    ("weight",       integer),
+    ("cap",          integer),
+    ("tslice_ms",    integer),
+    ("ratelimit_us", integer),
+    ("period",       integer),
+    ("slice",        integer),
+    ("latency",      integer),
+    ("extratime",    integer),
+    ], dir=DIR_IN)
+
 # Instances of libxl_file_reference contained in this struct which
 # have been mapped (with libxl_file_reference_map) will be unmapped
 # by libxl_domain_build/restore. If either of these are never called
@@ -255,6 +266,8 @@ libxl_domain_build_info = Struct("domain
     ("extra_pv",         libxl_string_list),
     # extra parameters pass directly to qemu for HVM guest, NULL terminated
     ("extra_hvm",        libxl_string_list),
+    #  parameters for all type of scheduler
+    ("sched_params",     libxl_sched_params),
 
     ("u", KeyedUnion(None, libxl_domain_type, "type",
                 [("hvm", Struct(None, [("firmware",         string),
diff -r b3caa36ed7e7 -r e428eae1838c tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Wed Apr 25 11:35:42 2012 +0100
+++ b/tools/libxl/xl_cmdimpl.c  Wed Apr 25 11:38:26 2012 +0100
@@ -597,6 +597,23 @@ static void parse_config_data(const char
     libxl_domain_build_info_init_type(b_info, c_info->type);
 
     /* the following is the actual config parsing with overriding values in 
the structures */
+    if (!xlu_cfg_get_long (config, "cpu_weight", &l, 0))
+        b_info->sched_params.weight = l;
+    if (!xlu_cfg_get_long (config, "cap", &l, 0))
+        b_info->sched_params.cap = l;
+    if (!xlu_cfg_get_long (config, "tslice_ms", &l, 0))
+        b_info->sched_params.tslice_ms = l;
+    if (!xlu_cfg_get_long (config, "ratelimit_us", &l, 0))
+        b_info->sched_params.ratelimit_us = l;
+    if (!xlu_cfg_get_long (config, "period", &l, 0))
+        b_info->sched_params.period = l;
+    if (!xlu_cfg_get_long (config, "slice", &l, 0))
+        b_info->sched_params.period = l;
+    if (!xlu_cfg_get_long (config, "latency", &l, 0))
+        b_info->sched_params.period = l;
+    if (!xlu_cfg_get_long (config, "extratime", &l, 0))
+        b_info->sched_params.period = l;
+
     if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
         b_info->max_vcpus = l;
         b_info->cur_vcpus = (1 << l) - 1;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.