[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 3/4] Updated comments/variables to reflect cbs, fixed formatting and confusing comments/variables



From: Robbie VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx>

---
 docs/man/xl.cfg.pod.5               |   12 +-
 docs/man/xl.pod.1                   |    4 +-
 docs/man/xlcpupool.cfg.pod.5        |    4 +-
 docs/misc/xen-command-line.markdown |    2 +-
 tools/examples/cpupool              |    2 +-
 tools/libxc/xc_sedf.c               |   24 +-
 tools/libxc/xenctrl.h               |    8 +-
 tools/libxl/libxl.c                 |   36 +--
 tools/libxl/libxl.h                 |    2 +-
 tools/libxl/libxl_types.idl         |    4 +-
 tools/libxl/xl.h                    |    2 +-
 tools/libxl/xl_cmdimpl.c            |   60 ++--
 tools/libxl/xl_cmdtable.c           |   12 +-
 tools/python/README.XendConfig      |    4 +-
 tools/python/xen/lowlevel/xc/xc.c   |   34 +-
 xen/common/sched_sedf.c             |  599 +++++++++++++++++------------------
 xen/common/schedule.c               |    2 +-
 xen/include/public/domctl.h         |    8 +-
 xen/include/public/trace.h          |    2 +-
 xen/include/xen/sched-if.h          |    2 +-
 20 files changed, 400 insertions(+), 423 deletions(-)

diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5
index 5c55298..b18b157 100755
--- a/docs/man/xl.cfg.pod.5
+++ b/docs/man/xl.cfg.pod.5
@@ -176,7 +176,7 @@ details.
 A domain with a weight of 512 will get twice as much CPU as a domain
 with a weight of 256 on a contended host.
 Legal weights range from 1 to 65535 and the default is 256.
-Honoured by the credit, credit2 and sedf schedulers.
+Honoured by the credit, credit2 and cbs schedulers.
 
 =item B<cap=N>
 
@@ -203,19 +203,19 @@ your BIOS.
 =item B<period=NANOSECONDS>
 
 The normal EDF scheduling usage in nanoseconds. This means every period
-the domain gets cpu time defined in slice.
-Honoured by the sedf scheduler.
+the domain gets cpu time defined in budget.
+Honoured by the cbs scheduler.
 
-=item B<slice=NANOSECONDS>
+=item B<budget=NANOSECONDS>
 
 The normal EDF scheduling usage in nanoseconds. it defines the time 
 a domain get every period time.
-Honoured by the sedf scheduler.
+Honoured by the cbs scheduler.
 
 =item B<soft=BOOLEAN>
 
 Flag for setting a domain or VCPU to run as a soft task.
-Honoured by the sedf scheduler.
+Honoured by the cbs scheduler.
 
 =back
 
diff --git a/docs/man/xl.pod.1 b/docs/man/xl.pod.1
index 30bd4bf..e960436 100644
--- a/docs/man/xl.pod.1
+++ b/docs/man/xl.pod.1
@@ -977,12 +977,12 @@ Restrict output to domains in the specified cpupool.
 
 =back
 
-=item B<sched-sedf> [I<OPTIONS>]
+=item B<sched-cbs> [I<OPTIONS>]
 
 Set or get Simple EDF (Earliest Deadline First) scheduler parameters. This
 scheduler provides weighted CPU sharing in an intuitive way and uses
 realtime-algorithms to ensure time guarantees.  For more information see
-docs/misc/sedf_scheduler_mini-HOWTO.txt in the Xen distribution.
+docs/misc/cbs_scheduler_mini-HOWTO.txt in the Xen distribution.
 
 B<OPTIONS>
 
diff --git a/docs/man/xlcpupool.cfg.pod.5 b/docs/man/xlcpupool.cfg.pod.5
index e32ce17..dd299ac 100644
--- a/docs/man/xlcpupool.cfg.pod.5
+++ b/docs/man/xlcpupool.cfg.pod.5
@@ -74,9 +74,9 @@ the credit scheduler
 
 the credit2 scheduler
 
-=item B<sedf>
+=item B<cbs>
 
-the SEDF scheduler
+the CBS scheduler
 
 =back
 
diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index a7ac53d..8bbc151 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -858,7 +858,7 @@ Specify the host reboot method.
 `acpi` instructs Xen to reboot the host using RESET_REG in the ACPI FADT.
 
 ### sched
-> `= credit | credit2 | sedf | arinc653`
+> `= credit | credit2 | cbs | arinc653`
 
 > Default: `sched=credit`
 
diff --git a/tools/examples/cpupool b/tools/examples/cpupool
index 01e62c8..22d0d84 100644
--- a/tools/examples/cpupool
+++ b/tools/examples/cpupool
@@ -9,7 +9,7 @@
 # the name of the new cpupool
 name = "Example-Cpupool"
 
-# the scheduler to use: valid are e.g. credit, sedf, credit2
+# the scheduler to use: valid are e.g. credit, cbs, credit2
 sched = "credit"
 
 # list of cpus to use
diff --git a/tools/libxc/xc_sedf.c b/tools/libxc/xc_sedf.c
index 81ff133..3b578d1 100755
--- a/tools/libxc/xc_sedf.c
+++ b/tools/libxc/xc_sedf.c
@@ -1,7 +1,7 @@
 /******************************************************************************
- * xc_sedf.c
+ * xc_cbs.c
  *
- * API for manipulating parameters of the Simple EDF scheduler.
+ * API for manipulating parameters of the CBS scheduler.
  *
  * changes by Stephan Diestelhorst
  * based on code
@@ -24,47 +24,47 @@
 
 #include "xc_private.h"
 
-int xc_sedf_domain_set(
+int xc_cbs_domain_set(
     xc_interface *xch,
     uint32_t domid,
     uint64_t period,
-    uint64_t slice,
+    uint64_t budget,
     uint16_t soft)
 {
     DECLARE_DOMCTL;
-    struct xen_domctl_sched_sedf *p = &domctl.u.scheduler_op.u.sedf;
+    struct xen_domctl_sched_cbs *p = &domctl.u.scheduler_op.u.cbs;
 
     domctl.cmd = XEN_DOMCTL_scheduler_op;
     domctl.domain  = (domid_t)domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_SEDF;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CBS;
     domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
 
     p->period    = period;
-    p->slice     = slice;
+    p->budget    = budget;
     p->soft      = soft;
     return do_domctl(xch, &domctl);
 }
 
-int xc_sedf_domain_get(
+int xc_cbs_domain_get(
     xc_interface *xch,
     uint32_t domid,
     uint64_t *period,
-    uint64_t *slice,
+    uint64_t *budget,
     uint16_t *soft)
 {
     DECLARE_DOMCTL;
     int ret;
-    struct xen_domctl_sched_sedf *p = &domctl.u.scheduler_op.u.sedf;
+    struct xen_domctl_sched_cbs *p = &domctl.u.scheduler_op.u.cbs;
 
     domctl.cmd = XEN_DOMCTL_scheduler_op;
     domctl.domain = (domid_t)domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_SEDF;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CBS;
     domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
 
     ret = do_domctl(xch, &domctl);
 
     *period    = p->period;
-    *slice     = p->slice;
+    *budget    = p->budget;
     *soft      = p->soft;
     return ret;
 }
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index d5cfdb8..af2cdb2 100755
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -763,16 +763,16 @@ int xc_shadow_control(xc_interface *xch,
                       uint32_t mode,
                       xc_shadow_op_stats_t *stats);
 
-int xc_sedf_domain_set(xc_interface *xch,
+int xc_cbs_domain_set(xc_interface *xch,
                        uint32_t domid,
                        uint64_t period,
-                       uint64_t slice, 
+                       uint64_t budget, 
                        uint16_t soft);
 
-int xc_sedf_domain_get(xc_interface *xch,
+int xc_cbs_domain_get(xc_interface *xch,
                        uint32_t domid,
                        uint64_t *period,
-                       uint64_t *slice,
+                       uint64_t *budget,
                        uint16_t *soft);
 
 int xc_sched_credit_domain_set(xc_interface *xch,
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index cea8af2..a862cb5 100755
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4927,54 +4927,54 @@ static int sched_credit2_domain_set(libxl__gc *gc, 
uint32_t domid,
     return 0;
 }
 
-static int sched_sedf_domain_get(libxl__gc *gc, uint32_t domid,
+static int sched_cbs_domain_get(libxl__gc *gc, uint32_t domid,
                                  libxl_domain_sched_params *scinfo)
 {
     uint64_t period;
-    uint64_t slice;
+    uint64_t budget;
     uint16_t soft;
     int rc;
 
-    rc = xc_sedf_domain_get(CTX->xch, domid, &period, &slice, &soft);
+    rc = xc_cbs_domain_get(CTX->xch, domid, &period, &budget, &soft);
     if (rc != 0) {
-        LOGE(ERROR, "getting domain sched sedf");
+        LOGE(ERROR, "getting domain sched cbs");
         return ERROR_FAIL;
     }
 
     libxl_domain_sched_params_init(scinfo);
-    scinfo->sched = LIBXL_SCHEDULER_SEDF;
+    scinfo->sched = LIBXL_SCHEDULER_CBS;
     scinfo->period = period / 1000000;
-    scinfo->slice = slice / 1000000;
+    scinfo->budget = budget / 1000000;
     scinfo->soft = soft;
 
     return 0;
 }
 
-static int sched_sedf_domain_set(libxl__gc *gc, uint32_t domid,
+static int sched_cbs_domain_set(libxl__gc *gc, uint32_t domid,
                                  const libxl_domain_sched_params *scinfo)
 {
     uint64_t period;
-    uint64_t slice;
+    uint64_t budget;
     uint16_t soft;
 
     int ret;
 
-    ret = xc_sedf_domain_get(CTX->xch, domid, &period, &slice, &soft);
+    ret = xc_cbs_domain_get(CTX->xch, domid, &period, &budget, &soft);
     if (ret != 0) {
-        LOGE(ERROR, "getting domain sched sedf");
+        LOGE(ERROR, "getting domain sched cbs");
         return ERROR_FAIL;
     }
 
     if (scinfo->period != LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT)
         period = (uint64_t)scinfo->period * 1000000;
-    if (scinfo->slice != LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT)
-        slice = (uint64_t)scinfo->slice * 1000000;
+    if (scinfo->budget != LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT)
+        budget = (uint64_t)scinfo->budget * 1000000;
     if (scinfo->soft != LIBXL_DOMAIN_SCHED_PARAM_SOFT_DEFAULT)
         soft = scinfo->soft;
 
-    ret = xc_sedf_domain_set(CTX->xch, domid, period, slice, soft);
+    ret = xc_cbs_domain_set(CTX->xch, domid, period, budget, soft);
     if ( ret < 0 ) {
-        LOGE(ERROR, "setting domain sched sedf");
+        LOGE(ERROR, "setting domain sched cbs");
         return ERROR_FAIL;
     }
 
@@ -4992,8 +4992,8 @@ int libxl_domain_sched_params_set(libxl_ctx *ctx, 
uint32_t domid,
         sched = libxl__domain_scheduler(gc, domid);
 
     switch (sched) {
-    case LIBXL_SCHEDULER_SEDF:
-        ret=sched_sedf_domain_set(gc, domid, scinfo);
+    case LIBXL_SCHEDULER_CBS:
+        ret=sched_cbs_domain_set(gc, domid, scinfo);
         break;
     case LIBXL_SCHEDULER_CREDIT:
         ret=sched_credit_domain_set(gc, domid, scinfo);
@@ -5025,8 +5025,8 @@ int libxl_domain_sched_params_get(libxl_ctx *ctx, 
uint32_t domid,
     scinfo->sched = libxl__domain_scheduler(gc, domid);
 
     switch (scinfo->sched) {
-    case LIBXL_SCHEDULER_SEDF:
-        ret=sched_sedf_domain_get(gc, domid, scinfo);
+    case LIBXL_SCHEDULER_CBS:
+        ret=sched_cbs_domain_get(gc, domid, scinfo);
         break;
     case LIBXL_SCHEDULER_CREDIT:
         ret=sched_credit_domain_get(gc, domid, scinfo);
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 548d37e..8b26643 100755
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -1119,7 +1119,7 @@ int libxl_sched_credit_params_set(libxl_ctx *ctx, 
uint32_t poolid,
 #define LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT    -1
 #define LIBXL_DOMAIN_SCHED_PARAM_CAP_DEFAULT       -1
 #define LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT    -1
-#define LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT     -1
+#define LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT    -1
 #define LIBXL_DOMAIN_SCHED_PARAM_SOFT_DEFAULT      -1
 
 int libxl_domain_sched_params_get(libxl_ctx *ctx, uint32_t domid,
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 3ec2f80..a212e33 100755
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -134,7 +134,7 @@ libxl_bios_type = Enumeration("bios_type", [
 # Except unknown which we have made up
 libxl_scheduler = Enumeration("scheduler", [
     (0, "unknown"),
-    (4, "sedf"),
+    (4, "cbs"),
     (5, "credit"),
     (6, "credit2"),
     (7, "arinc653"),
@@ -291,7 +291,7 @@ libxl_domain_sched_params = Struct("domain_sched_params",[
     ("weight",       integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT'}),
     ("cap",          integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_CAP_DEFAULT'}),
     ("period",       integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT'}),
-    ("slice",        integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT'}),
+    ("budget",       integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT'}),
     ("soft",         integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_SOFT_DEFAULT'}),
     ])
 
diff --git a/tools/libxl/xl.h b/tools/libxl/xl.h
index 10a2e66..f7c73cc 100644
--- a/tools/libxl/xl.h
+++ b/tools/libxl/xl.h
@@ -66,7 +66,7 @@ int main_memmax(int argc, char **argv);
 int main_memset(int argc, char **argv);
 int main_sched_credit(int argc, char **argv);
 int main_sched_credit2(int argc, char **argv);
-int main_sched_sedf(int argc, char **argv);
+int main_sched_cbs(int argc, char **argv);
 int main_domid(int argc, char **argv);
 int main_domname(int argc, char **argv);
 int main_rename(int argc, char **argv);
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index e06f924..0c1959c 100755
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -836,8 +836,8 @@ static void parse_config_data(const char *config_source,
         b_info->sched_params.cap = l;
     if (!xlu_cfg_get_long (config, "period", &l, 0))
         b_info->sched_params.period = l;
-    if (!xlu_cfg_get_long (config, "slice", &l, 0))
-        b_info->sched_params.slice = l;
+    if (!xlu_cfg_get_long (config, "budget", &l, 0))
+        b_info->sched_params.budget = l;
     if (!xlu_cfg_get_long (config, "soft", &l, 0))
         b_info->sched_params.soft = l;
 
@@ -5171,7 +5171,7 @@ static int sched_credit2_domain_output(
     return 0;
 }
 
-static int sched_sedf_domain_output(
+static int sched_cbs_domain_output(
     int domid)
 {
     char *domname;
@@ -5180,10 +5180,10 @@ static int sched_sedf_domain_output(
 
     if (domid < 0) {
         printf("%-33s %4s %6s %-6s %5s\n", "Name", "ID", "Period",
-               "Slice", "Soft");
+                "Budget", "Soft");
         return 0;
     }
-    rc = sched_domain_get(LIBXL_SCHEDULER_SEDF, domid, &scinfo);
+    rc = sched_domain_get(LIBXL_SCHEDULER_CBS, domid, &scinfo);
     if (rc)
         return rc;
     domname = libxl_domid_to_name(ctx, domid);
@@ -5191,7 +5191,7 @@ static int sched_sedf_domain_output(
         domname,
         domid,
         scinfo.period,
-        scinfo.slice,
+        scinfo.budget,
         scinfo.soft);
     free(domname);
     libxl_domain_sched_params_dispose(&scinfo);
@@ -5456,24 +5456,24 @@ int main_sched_credit2(int argc, char **argv)
     return 0;
 }
 
-int main_sched_sedf(int argc, char **argv)
+int main_sched_cbs(int argc, char **argv)
 {
     const char *dom = NULL;
     const char *cpupool = NULL;
     int period = 0, opt_p = 0;
-    int slice = 0, opt_s = 0;
-    int soft = 0, opt_t = 0;
+    int budget = 0, opt_b = 0;
+    int soft = 0, opt_s = 0;
     int opt, rc;
     static struct option opts[] = {
         {"period", 1, 0, 'p'},
-        {"slice", 1, 0, 's'},
-        {"soft", 1, 0, 't'},
+        {"budget", 1, 0, 'b'},
+        {"soft", 1, 0, 's'},
         {"cpupool", 1, 0, 'c'},
         COMMON_LONG_OPTS,
         {0, 0, 0, 0}
     };
 
-    SWITCH_FOREACH_OPT(opt, "d:p:s:t:c:h", opts, "sched-sedf", 0) {
+    SWITCH_FOREACH_OPT(opt, "d:p:b:s:c:h", opts, "sched-cbs", 0) {
     case 'd':
         dom = optarg;
         break;
@@ -5481,53 +5481,53 @@ int main_sched_sedf(int argc, char **argv)
         period = strtol(optarg, NULL, 10);
         opt_p = 1;
         break;
-    case 's':
-        slice = strtol(optarg, NULL, 10);
-        opt_s = 1;
+    case 'b':
+        budget = strtol(optarg, NULL, 10);
+        opt_b = 1;
         break;
-    case 't':
+    case 's':
         soft = strtol(optarg, NULL, 10);
-        opt_t = 1;
+        opt_s = 1;
         break;
     case 'c':
         cpupool = optarg;
         break;
     }
 
-    if (cpupool && (dom || opt_p || opt_s || opt_t)) {
+    if (cpupool && (dom || opt_p || opt_b || opt_s)) {
         fprintf(stderr, "Specifying a cpupool is not allowed with other "
                 "options.\n");
         return 1;
     }
-    if (!dom && (opt_p || opt_s || opt_t)) {
+    if (!dom && (opt_p || opt_b || opt_s)) {
         fprintf(stderr, "Must specify a domain.\n");
         return 1;
     }
 
     if (!dom) { /* list all domain's credit scheduler info */
-        return -sched_domain_output(LIBXL_SCHEDULER_SEDF,
-                                    sched_sedf_domain_output,
+        return -sched_domain_output(LIBXL_SCHEDULER_CBS,
+                                    sched_cbs_domain_output,
                                     sched_default_pool_output,
                                     cpupool);
     } else {
         uint32_t domid = find_domain(dom);
 
-        if (!opt_p && !opt_s) {
-            /* output sedf scheduler info */
-            sched_sedf_domain_output(-1);
-            return -sched_sedf_domain_output(domid);
-        } else { /* set sedf scheduler paramaters */
+        if (!opt_p && !opt_b) {
+            /* output cbs scheduler info */
+            sched_cbs_domain_output(-1);
+            return -sched_cbs_domain_output(domid);
+        } else { /* set cbs scheduler paramaters */
             libxl_domain_sched_params scinfo;
             libxl_domain_sched_params_init(&scinfo);
-            scinfo.sched = LIBXL_SCHEDULER_SEDF;
+            scinfo.sched = LIBXL_SCHEDULER_CBS;
 
             if (opt_p) {
                 scinfo.period = period;
             }
-            if (opt_s) {
-                scinfo.slice = slice;
+            if (opt_b) {
+                scinfo.budget = budget;
             }
-            if (opt_t) {
+            if (opt_s) {
                 scinfo.soft = soft;
             }
             rc = sched_domain_set(domid, &scinfo);
diff --git a/tools/libxl/xl_cmdtable.c b/tools/libxl/xl_cmdtable.c
index 1226fb8..3507468 100755
--- a/tools/libxl/xl_cmdtable.c
+++ b/tools/libxl/xl_cmdtable.c
@@ -261,15 +261,15 @@ struct cmd_spec cmd_table[] = {
       "-w WEIGHT, --weight=WEIGHT     Weight (int)\n"
       "-p CPUPOOL, --cpupool=CPUPOOL  Restrict output to CPUPOOL"
     },
-    { "sched-sedf",
-      &main_sched_sedf, 0, 1,
-      "Get/set sedf scheduler parameters",
+    { "sched-cbs",
+      &main_sched_cbs, 0, 1,
+      "Get/set cbs scheduler parameters",
       "[options]",
       "-d DOMAIN, --domain=DOMAIN     Domain to modify\n"
       "-p MS, --period=MS             Relative deadline(ms)\n"
-      "-s MS, --slice=MS              Worst-case execution time(ms).\n"
-      "                               (slice < period)\n"
-      "-t FLAG, --soft=FLAG           Flag (0 or 1) controls if domain\n"
+      "-b MS, --budget=MS             Constant bandwidth server budget(ms).\n"
+      "                               (budget < period)\n"
+      "-s FLAG, --soft=FLAG           Flag (0 or 1) controls if domain\n"
       "                               can run as a soft task\n"
       "-c CPUPOOL, --cpupool=CPUPOOL  Restrict output to CPUPOOL"
     },
diff --git a/tools/python/README.XendConfig b/tools/python/README.XendConfig
index 338715b..fbf3867 100644
--- a/tools/python/README.XendConfig
+++ b/tools/python/README.XendConfig
@@ -32,8 +32,8 @@ memory_static_max               maxmem
 memory_actual
 memory_dynamic_min
 memory_dynamic_max
-vcpus_policy                    !set_credit/set_sedf
-vcpus_params                    !set_credit/set_sedf
+vcpus_policy                    !set_credit/set_cbs
+vcpus_params                    !set_credit/set_cbs
 vcpus_number                    vcpus
 vcpus_utilisation               
 vcpus_features_required
diff --git a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
index bf10165..198bfe2 100755
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1466,45 +1466,45 @@ static PyObject *pyxc_xeninfo(XcObject *self)
 }
 
 
-static PyObject *pyxc_sedf_domain_set(XcObject *self,
+static PyObject *pyxc_cbs_domain_set(XcObject *self,
                                       PyObject *args,
                                       PyObject *kwds)
 {
     uint32_t domid;
-    uint64_t period, slice;
+    uint64_t period, budget;
     uint16_t soft;
 
-    static char *kwd_list[] = { "domid", "period", "slice", "soft",NULL };
+    static char *kwd_list[] = { "domid", "period", "budget", "soft",NULL };
     
     if( !PyArg_ParseTupleAndKeywords(args, kwds, "iLLi", kwd_list, 
-                                     &domid, &period, &slice, &soft) )
+                                     &domid, &period, &budget, &soft) )
 
         return NULL;
-   if ( xc_sedf_domain_set(self->xc_handle, domid, period,
-                           slice, soft) != 0 )
+   if ( xc_cbs_domain_set(self->xc_handle, domid, period,
+                           budget, soft) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
 
     Py_INCREF(zero);
     return zero;
 }
 
-static PyObject *pyxc_sedf_domain_get(XcObject *self, PyObject *args)
+static PyObject *pyxc_cbs_domain_get(XcObject *self, PyObject *args)
 {
     uint32_t domid;
-    uint64_t period, slice;
+    uint64_t period, budget;
     uint16_t soft;
     
     if(!PyArg_ParseTuple(args, "i", &domid))
         return NULL;
     
-    if (xc_sedf_domain_get(self->xc_handle, domid, &period,
-                           &slice, &soft))
+    if (xc_cbs_domain_get(self->xc_handle, domid, &period,
+                           &budget, &soft))
         return pyxc_error_to_exception(self->xc_handle);
 
     return Py_BuildValue("{s:i,s:L,s:L,s:i}",
-                         "domid",    domid,
+                         "domid",     domid,
                          "period",    period,
-                         "slice",     slice,
+                         "budget",    budget,
                          "soft",      soft);
 }
 
@@ -2535,8 +2535,8 @@ static PyMethodDef pyxc_methods[] = {
       "Get the current scheduler type in use.\n"
       "Returns: [int] sched_id.\n" },    
 
-    { "sedf_domain_set",
-      (PyCFunction)pyxc_sedf_domain_set,
+    { "cbs_domain_set",
+      (PyCFunction)pyxc_cbs_domain_set,
       METH_KEYWORDS, "\n"
       "Set the scheduling parameters for a domain when running with Atropos.\n"
       " dom       [int]:  domain to set\n"
@@ -2545,8 +2545,8 @@ static PyMethodDef pyxc_methods[] = {
       " soft      [int]:  domain is a soft task?\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "sedf_domain_get",
-      (PyCFunction)pyxc_sedf_domain_get,
+    { "cbs_domain_get",
+      (PyCFunction)pyxc_cbs_domain_get,
       METH_VARARGS, "\n"
       "Get the current scheduling parameters for a domain when running with\n"
       "the Atropos scheduler."
@@ -3076,7 +3076,7 @@ PyMODINIT_FUNC initxc(void)
     PyModule_AddObject(m, "Error", xc_error_obj);
 
     /* Expose some libxc constants to Python */
-    PyModule_AddIntConstant(m, "XEN_SCHEDULER_SEDF", XEN_SCHEDULER_SEDF);
+    PyModule_AddIntConstant(m, "XEN_SCHEDULER_CBS", XEN_SCHEDULER_CBS);
     PyModule_AddIntConstant(m, "XEN_SCHEDULER_CREDIT", XEN_SCHEDULER_CREDIT);
     PyModule_AddIntConstant(m, "XEN_SCHEDULER_CREDIT2", XEN_SCHEDULER_CREDIT2);
 
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 2ee4538..5df4825 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -1,8 +1,29 @@
 /******************************************************************************
- * Simple EDF scheduler for xen
+ * Constant Bandwidth Server Scheduler for Xen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * by DornerWorks Ltd. (C) 2014 Grand Rapids, MI
+ *
+ * Adapted from code by Stephan Diestelhorst (C) 2004 Cambridge University
+ *                       and Mark Williamson (C) 2004 Intel Research Cambridge
  *
- * by Stephan Diestelhorst (C)  2004 Cambridge University
- * based on code by Mark Williamson (C) 2004 Intel Research Cambridge
  */
 
 #include <xen/lib.h>
@@ -14,7 +35,7 @@
 #include <xen/errno.h>
 
 #ifndef NDEBUG
-#define SEDF_STATS
+#define CBS_STATS
 #define CHECK(_p)                                           \
     do {                                                    \
         if ( !(_p) )                                        \
@@ -25,48 +46,49 @@
 #define CHECK(_p) ((void)0)
 #endif
 
-#define SEDF_SOFT_TASK (1)
-#define SEDF_ASLEEP (16)
+#define CBS_SOFT_TASK (1)
+#define CBS_ASLEEP (16)
 
 #define DEFAULT_PERIOD (MILLISECS(20))
-#define DEFAULT_SLICE (MILLISECS(10))
+#define DEFAULT_BUDGET (MILLISECS(10))
 
 #define PERIOD_MAX MILLISECS(10000) /* 10s  */
 #define PERIOD_MIN (MICROSECS(10))  /* 10us */
-#define SLICE_MIN (MICROSECS(5))    /*  5us */
+#define BUDGET_MIN (MICROSECS(5))    /*  5us */
 
-#define EQ(a, b) ((!!(a)) == (!!(b)))
+#define EQ(_A, _B) ((!!(_A)) == (!!(_B)))
 
 
-struct sedf_dom_info {
+struct cbs_dom_info {
     struct domain  *domain;
 };
 
-struct sedf_priv_info {
+struct cbs_priv_info {
     /* lock for the whole pluggable scheduler, nests inside cpupool_lock */
     spinlock_t lock;
 };
 
-struct sedf_vcpu_info {
+struct cbs_vcpu_info {
     struct vcpu *vcpu;
     struct list_head list;
  
-    /* Parameters for EDF */
-    s_time_t  period;  /* = relative deadline */
-    s_time_t  slice;   /* = worst case execution time */
-    /* Note: Server bandwidth = (slice / period) */
 
-    /* Status of domain */
+    /* Parameters for EDF-CBS */
+    s_time_t  period;  /* = Server scheduling period */
+    s_time_t  budget;  /* = Guarenteed minimum CPU time per period */
+    /* Note: Server bandwidth = (budget / period) */
+ 
+    /* Status of vcpu */
     int       status;
     /* Bookkeeping */
     s_time_t  deadl_abs;
     s_time_t  sched_start_abs;
     s_time_t  cputime;
-    /* Times the domain un-/blocked */
+    /* Times the vcpu un-/blocked */
     s_time_t  block_abs;
     s_time_t  unblock_abs;
  
-#ifdef SEDF_STATS
+#ifdef CBS_STATS
     s_time_t  block_time_tot;
     int   block_tot;
     int   short_block_tot;
@@ -78,45 +100,44 @@ struct sedf_vcpu_info {
 #endif
 };
 
-struct sedf_cpu_info {
+struct cbs_cpu_info {
     struct list_head runnableq;
     struct list_head waitq;
-    s_time_t         current_slice_expires;
+    s_time_t         current_budget_expires;
 };
 
-#define SEDF_PRIV(_ops) \
-    ((struct sedf_priv_info *)((_ops)->sched_data))
-#define EDOM_INFO(d)   ((struct sedf_vcpu_info *)((d)->sched_priv))
-#define CPU_INFO(cpu)  \
-    ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv)
-#define LIST(d)        (&EDOM_INFO(d)->list)
-#define RUNQ(cpu)      (&CPU_INFO(cpu)->runnableq)
-#define WAITQ(cpu)     (&CPU_INFO(cpu)->waitq)
-#define IDLETASK(cpu)  (idle_vcpu[cpu])
+#define CBS_PRIV(_ops) \
+    ((struct cbs_priv_info *)((_ops)->sched_data))
+#define CBS_VCPU(_vcpu)   ((struct cbs_vcpu_info *)((_vcpu)->sched_priv))
+#define CBS_PCPU(_cpu)  \
+    ((struct cbs_cpu_info *)per_cpu(schedule_data, _cpu).sched_priv)
+#define LIST(_vcpu)        (&CBS_VCPU(_vcpu)->list)
+#define RUNQ(_cpu)      (&CBS_PCPU(_cpu)->runnableq)
+#define WAITQ(_cpu)     (&CBS_PCPU(_cpu)->waitq)
+#define IDLETASK(_cpu)  (idle_vcpu[_cpu])
 
 #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
 
-#define DIV_UP(x,y) (((x) + (y) - 1) / y)
+#define DIV_UP(_X, _Y) (((_X) + (_Y) - 1) / _Y)
 
-#define sedf_runnable(edom)  (!(EDOM_INFO(edom)->status & SEDF_ASLEEP))
+#define cbs_runnable(edom)  (!(CBS_VCPU(edom)->status & CBS_ASLEEP))
 
-#define sedf_soft(edom)  (EDOM_INFO(edom)->status & SEDF_SOFT_TASK)
+#define cbs_soft(edom)  (CBS_VCPU(edom)->status & CBS_SOFT_TASK)
 
+static void cbs_dump_cpu_state(const struct scheduler *ops, int cpu);
 
-static void sedf_dump_cpu_state(const struct scheduler *ops, int i);
-
-static inline int __task_on_queue(struct vcpu *d)
+static inline int __task_on_queue(struct vcpu *v)
 {
-    return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
+    return (((LIST(v))->next != NULL) && (LIST(v)->next != LIST(v)));
 }
 
-static inline void __del_from_queue(struct vcpu *d)
+static inline void __del_from_queue(struct vcpu *v)
 {
-    struct list_head *list = LIST(d);
-    ASSERT(__task_on_queue(d));
+    struct list_head *list = LIST(v);
+    ASSERT(__task_on_queue(v));
     list_del(list);
     list->next = NULL;
-    ASSERT(!__task_on_queue(d));
+    ASSERT(!__task_on_queue(v));
 }
 
 typedef int(*list_comparer)(struct list_head* el1, struct list_head* el2);
@@ -135,12 +156,12 @@ static inline void list_insert_sort(
     list_add(element, cur->prev);
 }
 
-#define DOMAIN_COMPARER(name, field, comp1, comp2)                      \
+#define VCPU_COMPARER(name, field, comp1, comp2)                      \
 static int name##_comp(struct list_head* el1, struct list_head* el2)    \
 {                                                                       \
-    struct sedf_vcpu_info *d1, *d2;                                     \
-    d1 = list_entry(el1,struct sedf_vcpu_info, field);                  \
-    d2 = list_entry(el2,struct sedf_vcpu_info, field);                  \
+    struct cbs_vcpu_info *v1, *v2;                                     \
+    v1 = list_entry(el1, struct cbs_vcpu_info, field);                  \
+    v2 = list_entry(el2, struct cbs_vcpu_info, field);                  \
     if ( (comp1) == (comp2) )                                           \
         return 0;                                                       \
     if ( (comp1) < (comp2) )                                            \
@@ -150,11 +171,11 @@ static int name##_comp(struct list_head* el1, struct 
list_head* el2)    \
 }
 
 /*
- * Adds a domain to the queue of processes which wait for the beginning of the
+ * Adds a vcpu to the queue of processes which wait for the beginning of the
  * next period; this list is therefore sortet by this time, which is simply
  * absol. deadline - period.
  */ 
-DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2));
+VCPU_COMPARER(waitq, list, PERIOD_BEGIN(v1), PERIOD_BEGIN(v2));
 static inline void __add_to_waitqueue_sort(struct vcpu *v)
 {
     ASSERT(!__task_on_queue(v));
@@ -163,32 +184,32 @@ static inline void __add_to_waitqueue_sort(struct vcpu *v)
 }
 
 /*
- * Adds a domain to the queue of processes which have started their current
+ * Adds a vcpu to the queue of processes which have started their current
  * period and are runnable (i.e. not blocked, dieing,...). The first element
  * on this list is running on the processor, if the list is empty the idle
  * task will run. As we are implementing EDF, this list is sorted by deadlines.
  */ 
-DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs);
+VCPU_COMPARER(runq, list, v1->deadl_abs, v2->deadl_abs);
 static inline void __add_to_runqueue_sort(struct vcpu *v)
 {
     list_insert_sort(RUNQ(v->processor), LIST(v), runq_comp);
 }
 
 
-static void sedf_insert_vcpu(const struct scheduler *ops, struct vcpu *v)
+static void cbs_insert_vcpu(const struct scheduler *ops, struct vcpu *v)
 {
     if ( is_idle_vcpu(v) )
     {
-        EDOM_INFO(v)->deadl_abs = 0;
-        EDOM_INFO(v)->status &= ~SEDF_ASLEEP;
+        CBS_VCPU(v)->deadl_abs = 0;
+        CBS_VCPU(v)->status &= ~CBS_ASLEEP;
     }
 }
 
-static void *sedf_alloc_vdata(const struct scheduler *ops, struct vcpu *v, 
void *dd)
+static void *cbs_alloc_vdata(const struct scheduler *ops, struct vcpu *v, void 
*dd)
 {
-    struct sedf_vcpu_info *inf;
+    struct cbs_vcpu_info *inf;
 
-    inf = xzalloc(struct sedf_vcpu_info);
+    inf = xzalloc(struct cbs_vcpu_info);
     if ( inf == NULL )
         return NULL;
 
@@ -196,18 +217,18 @@ static void *sedf_alloc_vdata(const struct scheduler 
*ops, struct vcpu *v, void
 
     inf->deadl_abs  = 0;
     inf->cputime    = 0;
-    inf->status     = SEDF_ASLEEP;
+    inf->status     = CBS_ASLEEP;
 
     if (v->domain->domain_id == 0)
     {
-        /* Domain 0, needs a slice to boot the machine */
-        inf->period      = DEFAULT_PERIOD;
-        inf->slice       = DEFAULT_SLICE;
+        /* Domain 0, needs a budget to boot the machine */
+        inf->period = DEFAULT_PERIOD;
+        inf->budget = DEFAULT_BUDGET;
     }
     else
     {
-        inf->period      = DEFAULT_PERIOD;
-        inf->slice       = 0;
+        inf->period = DEFAULT_PERIOD;
+        inf->budget = 0;
     }
 
     INIT_LIST_HEAD(&(inf->list));
@@ -218,11 +239,11 @@ static void *sedf_alloc_vdata(const struct scheduler 
*ops, struct vcpu *v, void
 }
 
 static void *
-sedf_alloc_pdata(const struct scheduler *ops, int cpu)
+cbs_alloc_pdata(const struct scheduler *ops, int cpu)
 {
-    struct sedf_cpu_info *spc;
+    struct cbs_cpu_info *spc;
 
-    spc = xzalloc(struct sedf_cpu_info);
+    spc = xzalloc(struct cbs_cpu_info);
     BUG_ON(spc == NULL);
     INIT_LIST_HEAD(&spc->waitq);
     INIT_LIST_HEAD(&spc->runnableq);
@@ -231,7 +252,7 @@ sedf_alloc_pdata(const struct scheduler *ops, int cpu)
 }
 
 static void
-sedf_free_pdata(const struct scheduler *ops, void *spc, int cpu)
+cbs_free_pdata(const struct scheduler *ops, void *spc, int cpu)
 {
     if ( spc == NULL )
         return;
@@ -239,37 +260,37 @@ sedf_free_pdata(const struct scheduler *ops, void *spc, 
int cpu)
     xfree(spc);
 }
 
-static void sedf_free_vdata(const struct scheduler *ops, void *priv)
+static void cbs_free_vdata(const struct scheduler *ops, void *priv)
 {
     xfree(priv);
 }
 
 static void *
-sedf_alloc_domdata(const struct scheduler *ops, struct domain *d)
+cbs_alloc_domdata(const struct scheduler *ops, struct domain *d)
 {
-    return xzalloc(struct sedf_dom_info);
+    return xzalloc(struct cbs_dom_info);
 }
 
-static int sedf_init_domain(const struct scheduler *ops, struct domain *d)
+static int cbs_init_domain(const struct scheduler *ops, struct domain *d)
 {
-    d->sched_priv = sedf_alloc_domdata(ops, d);
+    d->sched_priv = cbs_alloc_domdata(ops, d);
     if ( d->sched_priv == NULL )
         return -ENOMEM;
 
     return 0;
 }
 
-static void sedf_free_domdata(const struct scheduler *ops, void *data)
+static void cbs_free_domdata(const struct scheduler *ops, void *data)
 {
     xfree(data);
 }
 
-static void sedf_destroy_domain(const struct scheduler *ops, struct domain *d)
+static void cbs_destroy_domain(const struct scheduler *ops, struct domain *d)
 {
-    sedf_free_domdata(ops, d->sched_priv);
+    cbs_free_domdata(ops, d->sched_priv);
 }
 
-static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v)
+static int cbs_pick_cpu(const struct scheduler *ops, struct vcpu *v)
 {
     cpumask_t online_affinity;
     cpumask_t *online;
@@ -281,27 +302,27 @@ static int sedf_pick_cpu(const struct scheduler *ops, 
struct vcpu *v)
 }
 
 /*
- * Handles the rescheduling & bookkeeping of domains running in their
- * guaranteed timeslice.
+ * Handles the rescheduling & bookkeeping of vcpus running in their
+ * guaranteed time budget.
  */
-static void desched_edf_dom(s_time_t now, struct vcpu* d)
+static void desched_edf_vcpu(s_time_t now, struct vcpu *v)
 {
-    struct sedf_vcpu_info* inf = EDOM_INFO(d);
+    struct cbs_vcpu_info* inf = CBS_VCPU(v);
 
-    /* Current domain is running in real time mode */
-    ASSERT(__task_on_queue(d));
+    /* Current vcpu is running in real time mode */
+    ASSERT(__task_on_queue(v));
 
-    /* Update the domain's cputime */
+    /* Update the vcpu's cputime */
     inf->cputime += now - inf->sched_start_abs;
 
-    /* Scheduling decisions which don't remove the running domain from
+    /* Scheduling decisions which don't remove the running vcpu from
      * the runq */
-    if ( (inf->cputime < inf->slice) && sedf_runnable(d) )
+    if ( (inf->cputime < inf->budget) && cbs_runnable(v) )
         return;
   
-    __del_from_queue(d);
+    __del_from_queue(v);
 
-#ifdef SEDF_STATS
+#ifdef CBS_STATS
     /* Manage deadline misses */
     if ( unlikely(inf->deadl_abs < now) )
     {
@@ -311,26 +332,27 @@ static void desched_edf_dom(s_time_t now, struct vcpu* d)
 #endif
 
     /* Manage overruns */
-    if ( inf->cputime >= inf->slice )
+    if ( inf->cputime >= inf->budget )
     {
-        inf->cputime -= inf->slice;
+        inf->cputime -= inf->budget;
+
 
         /* Set next deadline */
         inf->deadl_abs += inf->period;
 
-        /* Ensure that the cputime is always less than slice */
-        if ( unlikely(inf->cputime > inf->slice) )
+        /* Ensure that the cputime is always less than budget */
+        if ( unlikely(inf->cputime > inf->budget) )
         {
-#ifdef SEDF_STATS
+#ifdef CBS_STATS
             inf->over_tot++;
             inf->over_time += inf->cputime;
 #endif
 
             /* Make up for the overage by pushing the deadline
                into the future */
-            inf->deadl_abs += ((inf->cputime / inf->slice)
+            inf->deadl_abs += ((inf->cputime / inf->budget)
                                * inf->period) * 2;
-            inf->cputime -= (inf->cputime / inf->slice) * inf->slice;
+            inf->cputime -= (inf->cputime / inf->budget) * inf->budget;
         }
 
         /* Ensure that the start of the next period is in the future */
@@ -340,20 +362,20 @@ static void desched_edf_dom(s_time_t now, struct vcpu* d)
                         inf->period)) * inf->period;
     }
  
-    /* Add a runnable domain to the waitqueue */
-    if ( sedf_runnable(d) )
+    /* Add a runnable vcpu to the appropriate queue */
+    if ( cbs_runnable(v) )
     {
-        if( sedf_soft(d) )
+        if( cbs_soft(v) )
         {
-            __add_to_runqueue_sort(d);
+            __add_to_runqueue_sort(v);
         }
         else 
         {
-            __add_to_waitqueue_sort(d);
+            __add_to_waitqueue_sort(v);
         }
     }
     
-    ASSERT(EQ(sedf_runnable(d), __task_on_queue(d)));
+    ASSERT(EQ(cbs_runnable(v), __task_on_queue(v)));
 }
 
 
@@ -362,7 +384,7 @@ static void update_queues(
     s_time_t now, struct list_head *runq, struct list_head *waitq)
 {
     struct list_head     *cur, *tmp;
-    struct sedf_vcpu_info *curinf;
+    struct cbs_vcpu_info *curinf;
  
     /*
      * Check for the first elements of the waitqueue, whether their
@@ -370,21 +392,21 @@ static void update_queues(
      */
     list_for_each_safe ( cur, tmp, waitq )
     {
-        curinf = list_entry(cur, struct sedf_vcpu_info, list);
+        curinf = list_entry(cur, struct cbs_vcpu_info, list);
         if ( PERIOD_BEGIN(curinf) > now )
             break;
         __del_from_queue(curinf->vcpu);
         __add_to_runqueue_sort(curinf->vcpu);
     }
  
-    /* Process the runq, find domains that are on the runq that shouldn't */
+    /* Process the runq, find vcpus that are on the runq that shouldn't */
     list_for_each_safe ( cur, tmp, runq )
     {
-        curinf = list_entry(cur,struct sedf_vcpu_info,list);
+        curinf = list_entry(cur, struct cbs_vcpu_info, list);
 
-        if ( unlikely(curinf->slice == 0) )
+        if ( unlikely(curinf->budget == 0) )
         {
-            /* Ignore domains with empty slice */
+            /* Ignore vcpus with empty budget */
             __del_from_queue(curinf->vcpu);
 
             /* Move them to their next period */
@@ -399,17 +421,18 @@ static void update_queues(
             /* Put them back into the queue */
             __add_to_waitqueue_sort(curinf->vcpu);
         }
+
         else
             break;
     }
 }
 
 
-static int sedf_init(struct scheduler *ops)
+static int cbs_init(struct scheduler *ops)
 {
-    struct sedf_priv_info *prv;
+    struct cbs_priv_info *prv;
 
-    prv = xzalloc(struct sedf_priv_info);
+    prv = xzalloc(struct cbs_priv_info);
     if ( prv == NULL )
         return -ENOMEM;
 
@@ -420,11 +443,11 @@ static int sedf_init(struct scheduler *ops)
 }
 
 
-static void sedf_deinit(const struct scheduler *ops)
+static void cbs_deinit(const struct scheduler *ops)
 {
-    struct sedf_priv_info *prv;
+    struct cbs_priv_info *prv;
 
-    prv = SEDF_PRIV(ops);
+    prv = CBS_PRIV(ops);
     if ( prv != NULL )
         xfree(prv);
 }
@@ -433,43 +456,43 @@ static void sedf_deinit(const struct scheduler *ops)
 /*
  * Main scheduling function
  * Reasons for calling this function are:
- * -timeslice for the current period used up
- * -domain on waitqueue has started it's period
- * -and various others ;) in general: determine which domain to run next
+ * -budget for the current server is used up
+ * -vcpu on waitqueue has started it's period
+ * -and various others ;) in general: determine which vcpu to run next
  */
-static struct task_slice sedf_do_schedule(
+static struct task_slice cbs_do_schedule(
     const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
 {
     int                   cpu      = smp_processor_id();
     struct list_head     *runq     = RUNQ(cpu);
     struct list_head     *waitq    = WAITQ(cpu);
-    struct sedf_vcpu_info *inf     = EDOM_INFO(current);
-    struct sedf_vcpu_info *runinf, *waitinf;
+    struct cbs_vcpu_info *inf      = CBS_VCPU(current);
+    struct cbs_vcpu_info *runinf, *waitinf;
     struct task_slice      ret;
 
     SCHED_STAT_CRANK(schedule);
 
-    /* Idle tasks don't need any of the following stuf */
+    /* Idle tasks don't need any of the following stuff */
     if ( is_idle_vcpu(current) )
         goto check_waitq;
 
     /*
-     * Create local state of the status of the domain, in order to avoid
+     * Create local state of the status of the vcpu, in order to avoid
      * inconsistent state during scheduling decisions, because data for
      * vcpu_runnable is not protected by the scheduling lock!
      */
     if ( !vcpu_runnable(current) )
-        inf->status |= SEDF_ASLEEP;
+        inf->status |= CBS_ASLEEP;
  
-    if ( inf->status & SEDF_ASLEEP )
+    if ( inf->status & CBS_ASLEEP )
         inf->block_abs = now;
 
-    desched_edf_dom(now, current);
+    desched_edf_vcpu(now, current);
  check_waitq:
     update_queues(now, runq, waitq);
 
     /*
-     * Now simply pick the first domain from the runqueue, which has the
+     * Now simply pick the first vcpu from the runqueue, which has the
      * earliest deadline, because the list is sorted
      *
      * Tasklet work (which runs in idle VCPU context) overrides all else.
@@ -484,28 +507,28 @@ static struct task_slice sedf_do_schedule(
     }
     else if ( !list_empty(runq) )
     {
-        runinf   = list_entry(runq->next,struct sedf_vcpu_info,list);
+        runinf   = list_entry(runq->next, struct cbs_vcpu_info, list);
         ret.task = runinf->vcpu;
         if ( !list_empty(waitq) )
         {
             waitinf  = list_entry(waitq->next,
-                                  struct sedf_vcpu_info,list);
+                                  struct cbs_vcpu_info, list);
             /*
-             * Rerun scheduler, when scheduled domain reaches it's
-             * end of slice or the first domain from the waitqueue
+             * Rerun scheduler, when scheduled vcpu consumes
+             * its budget or the first vcpu from the waitqueue
              * gets ready.
              */
-            ret.time = MIN(now + runinf->slice - runinf->cputime,
+            ret.time = MIN(now + runinf->budget - runinf->cputime,
                            PERIOD_BEGIN(waitinf)) - now;
         }
         else
         {
-            ret.time = runinf->slice - runinf->cputime;
+            ret.time = runinf->budget - runinf->cputime;
         }
     }
     else
     {
-        waitinf  = list_entry(waitq->next,struct sedf_vcpu_info, list);
+        waitinf  = list_entry(waitq->next, struct cbs_vcpu_info, list);
 
         ret.task = IDLETASK(cpu);
         ret.time = PERIOD_BEGIN(waitinf) - now;
@@ -521,35 +544,35 @@ static struct task_slice sedf_do_schedule(
 
     ret.migrated = 0;
 
-    EDOM_INFO(ret.task)->sched_start_abs = now;
+    CBS_VCPU(ret.task)->sched_start_abs = now;
     CHECK(ret.time > 0);
-    ASSERT(sedf_runnable(ret.task));
-    CPU_INFO(cpu)->current_slice_expires = now + ret.time;
+    ASSERT(cbs_runnable(ret.task));
+    CBS_PCPU(cpu)->current_budget_expires = now + ret.time;
     return ret;
 }
 
-static void sedf_sleep(const struct scheduler *ops, struct vcpu *d)
+static void cbs_sleep(const struct scheduler *ops, struct vcpu *v)
 {
-    if ( is_idle_vcpu(d) )
+    if ( is_idle_vcpu(v) )
         return;
 
-    EDOM_INFO(d)->status |= SEDF_ASLEEP;
+    CBS_VCPU(v)->status |= CBS_ASLEEP;
  
-    if ( per_cpu(schedule_data, d->processor).curr == d )
+    if ( per_cpu(schedule_data, v->processor).curr == v )
     {
-        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
+        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
     }
     else
     {
-        if ( __task_on_queue(d) )
-            __del_from_queue(d);
+        if ( __task_on_queue(v) )
+            __del_from_queue(v);
     }
 }
 
 /*
- * Compares two domains in the relation of whether the one is allowed to
+ * Compares two vcpus in the relation of whether the one is allowed to
  * interrupt the others execution.
- * It returns true (!=0) if a switch to the other domain is good.
+ * It returns true (!=0) if a switch to the other vcpu is good.
  * Priority scheme is as follows:
  *  EDF: early deadline > late deadline
  */
@@ -557,129 +580,82 @@ static inline int should_switch(struct vcpu *cur,
                                 struct vcpu *other,
                                 s_time_t now)
 {
-    struct sedf_vcpu_info *cur_inf, *other_inf;
-    cur_inf   = EDOM_INFO(cur);
-    other_inf = EDOM_INFO(other);
+    struct cbs_vcpu_info *cur_inf, *other_inf;
+    cur_inf   = CBS_VCPU(cur);
+    other_inf = CBS_VCPU(other);
 
-    /* Always interrupt idle domain. */
+    /* Always interrupt idle vcpu. */
     if ( is_idle_vcpu(cur) )
         return 1;
 
     /* Check whether we need to make an earlier scheduling decision */
     if ( PERIOD_BEGIN(other_inf) < 
-         CPU_INFO(other->processor)->current_slice_expires )
+         CBS_PCPU(other->processor)->current_budget_expires )
         return 1;
 
     return 0;
 }
 
 /*
- * This function wakes up a domain, i.e. moves them into the waitqueue
- * things to mention are: admission control is taking place nowhere at
- * the moment, so we can't be sure, whether it is safe to wake the domain
- * up at all. Anyway, even if it is safe (total cpu usage <=100%) there are
- * some considerations on when to allow the domain to wake up and have it's
- * first deadline...
- * I detected 3 cases, which could describe the possible behaviour of the
- * scheduler,
- * and I'll try to make them more clear:
+ * This function wakes up a vcpu, i.e. moves them into the appropriate queue
  *
- * 1. Very conservative
- *     -when a blocked domain unblocks, it is allowed to start execution at
+ *  For Hard Real-Time vcpus (soft = 0):
+ *     -When a blocked vcpu unblocks, it is allowed to start execution at
  *      the beginning of the next complete period
  *      (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up
  *
  *      DRRB_____D__U_____DRRRRR___D________ ... 
  *
- *     -this causes the domain to miss a period (and a deadlline)
- *     -doesn't disturb the schedule at all
- *     -deadlines keep occuring isochronous
- *
- * 2. Conservative Part 1: Short Unblocking
- *     -when a domain unblocks in the same period as it was blocked it
- *      unblocks and may consume the rest of it's original time-slice minus
- *      the time it was blocked
- *      (assume period=9, slice=5)
- *
- *      DRB_UR___DRRRRR___D...
- *
- *     -this also doesn't disturb scheduling, but might lead to the fact, that
- *      the domain can't finish it's workload in the period
- *     -addition: experiments have shown that this may have a HUGE impact on
- *      performance of other domains, becaus it can lead to excessive context
- *      switches
+ *     -This causes the vcpu to miss a period (and a deadlline)
+ *     -Doesn't disturb the schedule at all
+ *     -Deadlines keep occuring isochronous
  *
- *    Part2: Long Unblocking
- *    Part 2a
- *     -it is obvious that such accounting of block time, applied when
- *      unblocking is happening in later periods, works fine aswell
- *     -the domain is treated as if it would have been running since the start
- *      of its new period
+ *  For Soft Real-Time vcpus (soft = 1):
+ *     -Deadlines are set and updated according to the Constant Bandwidth 
Server
+ *      rule and vcpus are moved immediately to the run queue.
  *
- *      DRB______D___UR___D... 
- *
- *    Part 2b
- *     -if one needs the full slice in the next period, it is necessary to
- *      treat the unblocking time as the start of the new period, i.e. move
- *      the deadline further back (later)
- *     -this doesn't disturb scheduling as well, because for EDF periods can
- *      be treated as minimal inter-release times and scheduling stays
- *      correct, when deadlines are kept relative to the time the process
- *      unblocks
- *
- *      DRB______D___URRRR___D...<prev [Thread] next>
- *                       (D) <- old deadline was here
- *     -problem: deadlines don't occur isochronous anymore
- *
- * 3. Unconservative (i.e. incorrect)
- *     -to boost the performance of I/O dependent domains it would be possible
- *      to put the domain into the runnable queue immediately, and let it run
- *      for the remainder of the slice of the current period
- *      (or even worse: allocate a new full slice for the domain) 
- *     -either behaviour can lead to missed deadlines in other domains as
- *      opposed to approaches 1,2a,2b
  */
-static void sedf_wake(const struct scheduler *ops, struct vcpu *d)
+static void cbs_wake(const struct scheduler *ops, struct vcpu *v)
 {
     s_time_t              now = NOW();
-    struct sedf_vcpu_info* inf = EDOM_INFO(d);
+    struct cbs_vcpu_info* inf = CBS_VCPU(v);
 
-    if ( unlikely(is_idle_vcpu(d)) )
+    if ( unlikely(is_idle_vcpu(v)) )
         return;
    
-    if ( unlikely(__task_on_queue(d)) )
+    if ( unlikely(__task_on_queue(v)) )
         return;
 
-    ASSERT(!sedf_runnable(d));
-    inf->status &= ~SEDF_ASLEEP;
+    ASSERT(!cbs_runnable(v));
+    inf->status &= ~CBS_ASLEEP;
  
     if ( unlikely(inf->deadl_abs == 0) )
     {
         /* Initial setup of the deadline */
-        inf->deadl_abs = now + inf->slice;
+        inf->deadl_abs = now + inf->budget;
     }
   
-#ifdef SEDF_STATS 
+#ifdef CBS_STATS 
     inf->block_tot++;
 #endif
 
-    if ( sedf_soft(d) )
+    if ( cbs_soft(v) )
     {
         /* Apply CBS rule
          * Where:
-         *      c == Remaining server slice == (inf->slice - cpu_time) 
+         *      c == Remaining server budget == (inf->budget - cpu_time) 
          *      d == Server (vcpu) deadline  == inf->deadl_abs
          *      r == Wake-up time of vcpu    == now
-         *      U == Server (vcpu) bandwidth == (inf->slice / inf->period)
+         *      U == Server (vcpu) bandwidth == (inf->budget / inf->period)
          *
          * if c>=(d-r)*U  --->  
-         *      (inf->slice - cputime) >= (inf->deadl_abs - now) * inf->period
+         *      (inf->budget - cputime) >= (inf->deadl_abs - now) * inf->period
          *
-         * If true, push deadline back by one period and refresh slice, else
-         * use current slice and deadline.
+         * If true, push deadline back by one period and refresh budget, else
+         * use current budget and deadline.
          */
-        if((inf->slice - inf->cputime) >= 
-            ((inf->deadl_abs - now) * (inf->slice / inf->period)))
+        if((inf->budget - inf->cputime) >= 
+            ((inf->deadl_abs - now) * (inf->budget / inf->period)))
         {
             /* Push back deadline by one period */
             inf->deadl_abs += inf->period;
@@ -688,14 +664,14 @@ static void sedf_wake(const struct scheduler *ops, struct 
vcpu *d)
         
         /* In CBS we don't care if the period has begun,
          * the task doesn't have to wait for its period
-         * because it'll never request more than its slice
+         * because it'll never request more than its budget
          * for any given period.
          */
-        __add_to_runqueue_sort(d);
+        __add_to_runqueue_sort(v);
     }
     else {
         /* Task is a hard task, treat accordingly */
-#ifdef SEDF_STATS
+#ifdef CBS_STATS
         if ( now < inf->deadl_abs )
         {
             /* Short blocking */
@@ -709,12 +685,12 @@ static void sedf_wake(const struct scheduler *ops, struct 
vcpu *d)
 #endif
 
         if ( PERIOD_BEGIN(inf) > now )
-            __add_to_waitqueue_sort(d);
+            __add_to_waitqueue_sort(v);
         else
-            __add_to_runqueue_sort(d);
+            __add_to_runqueue_sort(v);
     }
  
-#ifdef SEDF_STATS
+#ifdef CBS_STATS
     /* Do some statistics here... */
     if ( inf->block_abs != 0 )
     {
@@ -722,74 +698,75 @@ static void sedf_wake(const struct scheduler *ops, struct 
vcpu *d)
     }
 #endif
 
-    ASSERT(__task_on_queue(d));
+    ASSERT(__task_on_queue(v));
     /*
      * Check whether the awakened task needs to invoke the do_schedule
      * routine. Try to avoid unnecessary runs but:
-     * Save approximation: Always switch to scheduler!
+     * Safe approximation: Always switch to scheduler!
      */
-    ASSERT(d->processor >= 0);
-    ASSERT(d->processor < nr_cpu_ids);
-    ASSERT(per_cpu(schedule_data, d->processor).curr);
+    ASSERT(v->processor >= 0);
+    ASSERT(v->processor < nr_cpu_ids);
+    ASSERT(per_cpu(schedule_data, v->processor).curr);
 
-    if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) )
-        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
+    if ( should_switch(per_cpu(schedule_data, v->processor).curr, v, now) )
+        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
 }
 
-/* Print a lot of useful information about a domains in the system */
-static void sedf_dump_domain(struct vcpu *d)
+/* Print a lot of useful information about a vcpus in the system */
+static void cbs_dump_vcpu(struct vcpu *v)
 {
-    printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
-           d->is_running ? 'T':'F');
+    printk("%i.%i has=%c ", v->domain->domain_id, v->vcpu_id,
+           v->is_running ? 'T':'F');
     printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64,
-           EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs);
+           CBS_VCPU(v)->period, CBS_VCPU(v)->budget, CBS_VCPU(v)->deadl_abs);
     
-#ifdef SEDF_STATS
+#ifdef CBS_STATS
     printk(" m=%u mt=%"PRIu64"o=%u ot=%"PRIu64, 
-           EDOM_INFO(d)->miss_tot, EDOM_INFO(d)->miss_time, 
-           EDOM_INFO(d)->over_tot, EDOM_INFO(d)->over_time);
+           CBS_VCPU(v)->miss_tot, CBS_VCPU(v)->miss_time, 
+           CBS_VCPU(v)->over_tot, CBS_VCPU(v)->over_time);
 
-    if ( EDOM_INFO(d)->block_tot != 0 )
+    if ( CBS_VCPU(v)->block_tot != 0 )
         printk("\n   blks=%u sh=%u (%u%%) "\
                "l=%u (%u%%) avg: b=%"PRIu64,
-               EDOM_INFO(d)->block_tot, EDOM_INFO(d)->short_block_tot,
-               (EDOM_INFO(d)->short_block_tot * 100) / EDOM_INFO(d)->block_tot,
-               EDOM_INFO(d)->long_block_tot,
-               (EDOM_INFO(d)->long_block_tot * 100) / EDOM_INFO(d)->block_tot,
-               (EDOM_INFO(d)->block_time_tot) / EDOM_INFO(d)->block_tot);
+               CBS_VCPU(v)->block_tot, CBS_VCPU(v)->short_block_tot,
+               (CBS_VCPU(v)->short_block_tot * 100) / CBS_VCPU(v)->block_tot,
+               CBS_VCPU(v)->long_block_tot,
+               (CBS_VCPU(v)->long_block_tot * 100) / CBS_VCPU(v)->block_tot,
+               (CBS_VCPU(v)->block_time_tot) / CBS_VCPU(v)->block_tot);
 #endif
     printk("\n");
 }
 
 
-/* Dumps all domains on the specified cpu */
-static void sedf_dump_cpu_state(const struct scheduler *ops, int i)
+/* Dumps all vcpus on the specified cpu */
+static void cbs_dump_cpu_state(const struct scheduler *ops, int cpu)
 {
     struct list_head      *list, *queue, *tmp;
-    struct sedf_vcpu_info *d_inf;
+    struct cbs_vcpu_info *v_inf;
     struct domain         *d;
-    struct vcpu    *ed;
+    struct vcpu    *v;
     int loop = 0;
  
-    printk("now=%"PRIu64"\n",NOW());
-    queue = RUNQ(i);
+    printk("now=%"PRIu64"\n", NOW());
+    queue = RUNQ(cpu);
     printk("RUNQ rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
            (unsigned long) queue->next, (unsigned long) queue->prev);
     list_for_each_safe ( list, tmp, queue )
     {
-        printk("%3d: ",loop++);
-        d_inf = list_entry(list, struct sedf_vcpu_info, list);
-        sedf_dump_domain(d_inf->vcpu);
+        printk("%3d: ", loop++);
+        v_inf = list_entry(list, struct cbs_vcpu_info, list);
+        cbs_dump_vcpu(v_inf->vcpu);
     }
  
-    queue = WAITQ(i); loop = 0;
+    queue = WAITQ(cpu); 
+    loop = 0;
     printk("\nWAITQ rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
            (unsigned long) queue->next, (unsigned long) queue->prev);
     list_for_each_safe ( list, tmp, queue )
     {
-        printk("%3d: ",loop++);
-        d_inf = list_entry(list, struct sedf_vcpu_info, list);
-        sedf_dump_domain(d_inf->vcpu);
+        printk("%3d: ", loop++);
+        v_inf = list_entry(list, struct cbs_vcpu_info, list);
+        cbs_dump_vcpu(v_inf->vcpu);
     }
  
     loop = 0;
@@ -798,14 +775,14 @@ static void sedf_dump_cpu_state(const struct scheduler 
*ops, int i)
     rcu_read_lock(&domlist_read_lock);
     for_each_domain ( d )
     {
-        if ( (d->cpupool ? d->cpupool->sched : &sched_sedf_def) != ops )
+        if ( (d->cpupool ? d->cpupool->sched : &sched_cbs_def) != ops )
             continue;
-        for_each_vcpu(d, ed)
+        for_each_vcpu(d, v)
         {
-            if ( !__task_on_queue(ed) && (ed->processor == i) )
+            if ( !__task_on_queue(v) && (v->processor == cpu) )
             {
-                printk("%3d: ",loop++);
-                sedf_dump_domain(ed);
+                printk("%3d: ", loop++);
+                cbs_dump_vcpu(v);
             }
         }
     }
@@ -814,9 +791,9 @@ static void sedf_dump_cpu_state(const struct scheduler 
*ops, int i)
 
 
 /* Set or fetch domain scheduling parameters */
-static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct 
xen_domctl_scheduler_op *op)
+static int cbs_adjust(const struct scheduler *ops, struct domain *d, struct 
xen_domctl_scheduler_op *op)
 {
-    struct sedf_priv_info *prv = SEDF_PRIV(ops);
+    struct cbs_priv_info *prv = CBS_PRIV(ops);
     unsigned long flags;
     s_time_t now = NOW();
     struct vcpu *v;
@@ -825,7 +802,7 @@ static int sedf_adjust(const struct scheduler *ops, struct 
domain *p, struct xen
     /*
      * Serialize against the pluggable scheduler lock to protect from
      * concurrent updates. We need to take the runq lock for the VCPUs
-     * as well, since we are touching slice and period. 
+     * as well, since we are touching budget and period. 
      *
      * As in sched_credit2.c, runq locks nest inside the  pluggable scheduler
      * lock.
@@ -835,7 +812,7 @@ static int sedf_adjust(const struct scheduler *ops, struct 
domain *p, struct xen
     if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
     {
         /* Check for sane parameters */
-        if ( !op->u.sedf.period )
+        if ( !op->u.cbs.period )
         {
             printk("Period Not set");
             rc = -EINVAL;
@@ -845,51 +822,51 @@ static int sedf_adjust(const struct scheduler *ops, 
struct domain *p, struct xen
         /*
          * Sanity checking
          */
-        if ( (op->u.sedf.period > PERIOD_MAX) ||
-             (op->u.sedf.period < PERIOD_MIN) ||
-             (op->u.sedf.slice  > op->u.sedf.period) ||
-             (op->u.sedf.slice  < SLICE_MIN) )
+        if ( (op->u.cbs.period > PERIOD_MAX) ||
+             (op->u.cbs.period < PERIOD_MIN) ||
+             (op->u.cbs.budget  > op->u.cbs.period) ||
+             (op->u.cbs.budget  < BUDGET_MIN) )
         {
-            printk("Insane Parameters: period: %lu\tbudget: %lu\n", 
op->u.sedf.period, op->u.sedf.slice);
+            printk("Insane Parameters: period: %lu\tbudget: %lu\n", 
op->u.cbs.period, op->u.cbs.budget);
             rc = -EINVAL;
             goto out;
         }
 
         /* Time-driven domains */
-        for_each_vcpu ( p, v )
+        for_each_vcpu ( d, v )
         {
             spinlock_t *lock = vcpu_schedule_lock(v);
 
-            EDOM_INFO(v)->period  = op->u.sedf.period;
-            EDOM_INFO(v)->slice   = op->u.sedf.slice;
-            if(op->u.sedf.soft)
+            CBS_VCPU(v)->period  = op->u.cbs.period;
+            CBS_VCPU(v)->budget  = op->u.cbs.budget;
+            if(op->u.cbs.soft)
             {
-                EDOM_INFO(v)->status |= SEDF_SOFT_TASK;
+                CBS_VCPU(v)->status |= CBS_SOFT_TASK;
             }
             else
             {
                 /* Correct deadline when switching from a soft to hard vcpu */
-                if( unlikely((EDOM_INFO(v)->deadl_abs - now) >= 
(EDOM_INFO(v)->period * 3)) )
+                if( unlikely((CBS_VCPU(v)->deadl_abs - now) >= 
(CBS_VCPU(v)->period * 3)) )
                 {
-                    EDOM_INFO(v)->deadl_abs = (now - EDOM_INFO(v)->cputime) + 
(2 * EDOM_INFO(v)->period);
+                    CBS_VCPU(v)->deadl_abs = (now - CBS_VCPU(v)->cputime) + (2 
* CBS_VCPU(v)->period);
                 }
                 
-                EDOM_INFO(v)->status &= (~SEDF_SOFT_TASK);
+                CBS_VCPU(v)->status &= (~CBS_SOFT_TASK);
             }
             vcpu_schedule_unlock(lock, v);
         }
     }
     else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
     {
-        if ( p->vcpu[0] == NULL )
+        if ( d->vcpu[0] == NULL )
         {
             rc = -EINVAL;
             goto out;
         }
 
-        op->u.sedf.period    = EDOM_INFO(p->vcpu[0])->period;
-        op->u.sedf.slice     = EDOM_INFO(p->vcpu[0])->slice;
-        op->u.sedf.soft      = sedf_soft(p->vcpu[0]);
+        op->u.cbs.period    = CBS_VCPU(d->vcpu[0])->period;
+        op->u.cbs.budget    = CBS_VCPU(d->vcpu[0])->budget;
+        op->u.cbs.soft      = cbs_soft(d->vcpu[0]);
     }
 
 out:
@@ -898,35 +875,35 @@ out:
     return rc;
 }
 
-static struct sedf_priv_info _sedf_priv;
+static struct cbs_priv_info _cbs_priv;
 
-const struct scheduler sched_sedf_def = {
-    .name           = "Simple EDF Scheduler",
-    .opt_name       = "sedf",
-    .sched_id       = XEN_SCHEDULER_SEDF,
-    .sched_data     = &_sedf_priv,
+const struct scheduler sched_cbs_def = {
+    .name           = "Constant Bandwidth Server Scheduler",
+    .opt_name       = "cbs",
+    .sched_id       = XEN_SCHEDULER_CBS,
+    .sched_data     = &_cbs_priv,
     
-    .init_domain    = sedf_init_domain,
-    .destroy_domain = sedf_destroy_domain,
-
-    .insert_vcpu    = sedf_insert_vcpu,
-
-    .alloc_vdata    = sedf_alloc_vdata,
-    .free_vdata     = sedf_free_vdata,
-    .alloc_pdata    = sedf_alloc_pdata,
-    .free_pdata     = sedf_free_pdata,
-    .alloc_domdata  = sedf_alloc_domdata,
-    .free_domdata   = sedf_free_domdata,
-
-    .init           = sedf_init,
-    .deinit         = sedf_deinit,
-
-    .do_schedule    = sedf_do_schedule,
-    .pick_cpu       = sedf_pick_cpu,
-    .dump_cpu_state = sedf_dump_cpu_state,
-    .sleep          = sedf_sleep,
-    .wake           = sedf_wake,
-    .adjust         = sedf_adjust,
+    .init_domain    = cbs_init_domain,
+    .destroy_domain = cbs_destroy_domain,
+
+    .insert_vcpu    = cbs_insert_vcpu,
+
+    .alloc_vdata    = cbs_alloc_vdata,
+    .free_vdata     = cbs_free_vdata,
+    .alloc_pdata    = cbs_alloc_pdata,
+    .free_pdata     = cbs_free_pdata,
+    .alloc_domdata  = cbs_alloc_domdata,
+    .free_domdata   = cbs_free_domdata,
+
+    .init           = cbs_init,
+    .deinit         = cbs_deinit,
+
+    .do_schedule    = cbs_do_schedule,
+    .pick_cpu       = cbs_pick_cpu,
+    .dump_cpu_state = cbs_dump_cpu_state,
+    .sleep          = cbs_sleep,
+    .wake           = cbs_wake,
+    .adjust         = cbs_adjust,
 };
 
 /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c174c41..bcb430d 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,7 +65,7 @@ DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 
 static const struct scheduler *schedulers[] = {
-    &sched_sedf_def,
+    &sched_cbs_def,
     &sched_credit_def,
     &sched_credit2_def,
     &sched_arinc653_def,
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 6e143d3..adf9e83 100755
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -317,7 +317,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
 
 /* XEN_DOMCTL_scheduler_op */
 /* Scheduler types. */
-#define XEN_SCHEDULER_SEDF     4
+#define XEN_SCHEDULER_CBS     4
 #define XEN_SCHEDULER_CREDIT   5
 #define XEN_SCHEDULER_CREDIT2  6
 #define XEN_SCHEDULER_ARINC653 7
@@ -328,11 +328,11 @@ struct xen_domctl_scheduler_op {
     uint32_t sched_id;  /* XEN_SCHEDULER_* */
     uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
     union {
-        struct xen_domctl_sched_sedf {
+        struct xen_domctl_sched_cbs {
             uint64_aligned_t period;
-            uint64_aligned_t slice;
+            uint64_aligned_t budget;
             uint32_t soft;
-        } sedf;
+        } cbs;
         struct xen_domctl_sched_credit {
             uint16_t weight;
             uint16_t cap;
diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h
index cfcf4aa..bd8d00b 100644
--- a/xen/include/public/trace.h
+++ b/xen/include/public/trace.h
@@ -75,7 +75,7 @@
 /* Per-scheduler IDs, to identify scheduler specific events */
 #define TRC_SCHED_CSCHED   0
 #define TRC_SCHED_CSCHED2  1
-#define TRC_SCHED_SEDF     2
+#define TRC_SCHED_CBS      2
 #define TRC_SCHED_ARINC653 3
 
 /* Per-scheduler tracing */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index d95e254..6bdbf47 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -167,7 +167,7 @@ struct scheduler {
     void         (*tick_resume)     (const struct scheduler *, unsigned int);
 };
 
-extern const struct scheduler sched_sedf_def;
+extern const struct scheduler sched_cbs_def;
 extern const struct scheduler sched_credit_def;
 extern const struct scheduler sched_credit2_def;
 extern const struct scheduler sched_arinc653_def;
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.