[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 06/46] xen/sched: switch schedule_data.curr to point at sched_unit



In preparation of core scheduling let the percpu pointer
schedule_data.curr point to a strct sched_unit instead of the related
vcpu. At the same time rename the per-vcpu scheduler specific structs
to per-unit ones.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
V3:
- remove no longer matching comment (Jan Beulich)
---
 xen/common/sched_arinc653.c |   2 +-
 xen/common/sched_credit.c   | 105 ++++++++++++++-------------
 xen/common/sched_credit2.c  | 168 ++++++++++++++++++++++----------------------
 xen/common/sched_null.c     |  46 ++++++------
 xen/common/sched_rt.c       | 118 +++++++++++++++----------------
 xen/common/schedule.c       |   8 +--
 xen/include/xen/sched-if.h  |   2 +-
 7 files changed, 222 insertions(+), 227 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 9faa1c48c4..7bdaf257ce 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -481,7 +481,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct 
sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == vc )
+    if ( per_cpu(schedule_data, vc->processor).curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index fa73081b3c..cfe3edc14c 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -83,7 +83,7 @@
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
     ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
-#define CSCHED_VCPU(_vcpu)  ((struct csched_vcpu *) (_vcpu)->sched_unit->priv)
+#define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
 
@@ -160,7 +160,7 @@ struct csched_pcpu {
 /*
  * Virtual CPU
  */
-struct csched_vcpu {
+struct csched_unit {
     struct list_head runq_elem;
     struct list_head active_vcpu_elem;
 
@@ -233,15 +233,15 @@ static void csched_tick(void *_cpu);
 static void csched_acct(void *dummy);
 
 static inline int
-__vcpu_on_runq(struct csched_vcpu *svc)
+__vcpu_on_runq(struct csched_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static inline struct csched_vcpu *
+static inline struct csched_unit *
 __runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched_vcpu, runq_elem);
+    return list_entry(elem, struct csched_unit, runq_elem);
 }
 
 /* Is the first element of cpu's runq (if any) cpu's idle vcpu? */
@@ -273,7 +273,7 @@ dec_nr_runnable(unsigned int cpu)
 }
 
 static inline void
-__runq_insert(struct csched_vcpu *svc)
+__runq_insert(struct csched_unit *svc)
 {
     unsigned int cpu = svc->vcpu->processor;
     const struct list_head * const runq = RUNQ(cpu);
@@ -283,7 +283,7 @@ __runq_insert(struct csched_vcpu *svc)
 
     list_for_each( iter, runq )
     {
-        const struct csched_vcpu * const iter_svc = __runq_elem(iter);
+        const struct csched_unit * const iter_svc = __runq_elem(iter);
         if ( svc->pri > iter_svc->pri )
             break;
     }
@@ -304,34 +304,34 @@ __runq_insert(struct csched_vcpu *svc)
 }
 
 static inline void
-runq_insert(struct csched_vcpu *svc)
+runq_insert(struct csched_unit *svc)
 {
     __runq_insert(svc);
     inc_nr_runnable(svc->vcpu->processor);
 }
 
 static inline void
-__runq_remove(struct csched_vcpu *svc)
+__runq_remove(struct csched_unit *svc)
 {
     BUG_ON( !__vcpu_on_runq(svc) );
     list_del_init(&svc->runq_elem);
 }
 
 static inline void
-runq_remove(struct csched_vcpu *svc)
+runq_remove(struct csched_unit *svc)
 {
     dec_nr_runnable(svc->vcpu->processor);
     __runq_remove(svc);
 }
 
-static void burn_credits(struct csched_vcpu *svc, s_time_t now)
+static void burn_credits(struct csched_unit *svc, s_time_t now)
 {
     s_time_t delta;
     uint64_t val;
     unsigned int credits;
 
     /* Assert svc is current */
-    ASSERT( svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor)) );
+    ASSERT( svc == CSCHED_UNIT(curr_on_cpu(svc->vcpu->processor)) );
 
     if ( (delta = now - svc->start_time) <= 0 )
         return;
@@ -349,10 +349,10 @@ boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 
-static inline void __runq_tickle(struct csched_vcpu *new)
+static inline void __runq_tickle(struct csched_unit *new)
 {
     unsigned int cpu = new->vcpu->processor;
-    struct csched_vcpu * const cur = CSCHED_VCPU(curr_on_cpu(cpu));
+    struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask, idle_mask, *online;
     int balance_step, idlers_empty;
@@ -607,7 +607,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu 
*spc, int cpu)
     spc->idle_bias = nr_cpu_ids - 1;
 
     /* Start off idling... */
-    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
+    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)->vcpu_list));
     cpumask_set_cpu(cpu, prv->idlers);
     spc->nr_runnable = 0;
 }
@@ -630,7 +630,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
-    struct csched_vcpu *svc = vdata;
+    struct csched_unit *svc = vdata;
 
     ASSERT(svc && is_idle_vcpu(svc->vcpu));
 
@@ -653,7 +653,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
 static inline void
 __csched_vcpu_check(struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(vc->sched_unit);
     struct csched_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
@@ -686,7 +686,7 @@ integer_param("vcpu_migration_delay", 
vcpu_migration_delay_us);
 
 static inline bool
 __csched_vcpu_is_cache_hot(const struct csched_private *prv,
-                           const struct csched_vcpu *svc)
+                           const struct csched_unit *svc)
 {
     bool hot = prv->vcpu_migr_delay &&
                (NOW() - svc->last_sched_time) < prv->vcpu_migr_delay;
@@ -701,7 +701,7 @@ static inline int
 __csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc,
                              int dest_cpu, cpumask_t *mask)
 {
-    const struct csched_vcpu *svc = CSCHED_VCPU(vc);
+    const struct csched_unit *svc = CSCHED_UNIT(vc->sched_unit);
     /*
      * Don't pick up work that's hot on peer PCPU, or that can't (or
      * would prefer not to) run on cpu.
@@ -857,7 +857,7 @@ static struct sched_resource *
 csched_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu *svc = CSCHED_VCPU(vc);
+    struct csched_unit *svc = CSCHED_UNIT(unit);
 
     /*
      * We have been called by vcpu_migrate() (in schedule.c), as part
@@ -871,7 +871,7 @@ csched_res_pick(const struct scheduler *ops, const struct 
sched_unit *unit)
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
+__csched_vcpu_acct_start(struct csched_private *prv, struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
     unsigned long flags;
@@ -901,7 +901,7 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct 
csched_vcpu *svc)
 
 static inline void
 __csched_vcpu_acct_stop_locked(struct csched_private *prv,
-    struct csched_vcpu *svc)
+    struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -926,7 +926,7 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
 static void
 csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(current);
+    struct csched_unit * const svc = CSCHED_UNIT(current->sched_unit);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
     ASSERT( current->processor == cpu );
@@ -995,10 +995,10 @@ csched_alloc_udata(const struct scheduler *ops, struct 
sched_unit *unit,
                    void *dd)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched_vcpu);
+    svc = xzalloc(struct csched_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -1017,7 +1017,7 @@ static void
 csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu *svc = unit->priv;
+    struct csched_unit *svc = unit->priv;
     spinlock_t *lock;
 
     BUG_ON( is_idle_vcpu(vc) );
@@ -1043,7 +1043,7 @@ csched_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
 static void
 csched_free_udata(const struct scheduler *ops, void *priv)
 {
-    struct csched_vcpu *svc = priv;
+    struct csched_unit *svc = priv;
 
     BUG_ON( !list_empty(&svc->runq_elem) );
 
@@ -1054,8 +1054,7 @@ static void
 csched_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     struct csched_dom * const sdom = svc->sdom;
 
     SCHED_STAT_CRANK(vcpu_remove);
@@ -1082,14 +1081,14 @@ static void
 csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     unsigned int cpu = vc->processor;
 
     SCHED_STAT_CRANK(vcpu_sleep);
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( curr_on_cpu(cpu) == vc )
+    if ( curr_on_cpu(cpu) == unit )
     {
         /*
          * We are about to tickle cpu, so we should clear its bit in idlers.
@@ -1107,12 +1106,12 @@ static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     bool_t migrating;
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1168,8 +1167,7 @@ csched_unit_wake(const struct scheduler *ops, struct 
sched_unit *unit)
 static void
 csched_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu_list;
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
 
     /* Let the scheduler know that this vcpu is trying to yield */
     set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags);
@@ -1224,8 +1222,7 @@ static void
 csched_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                 const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct vcpu *v = unit->vcpu_list;
-    struct csched_vcpu *svc = CSCHED_VCPU(v);
+    struct csched_unit *svc = CSCHED_UNIT(unit);
 
     if ( !hard )
         return;
@@ -1328,7 +1325,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int 
cpu)
 {
     struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
     struct list_head *runq, *elem, *next, *last_under;
-    struct csched_vcpu *svc_elem;
+    struct csched_unit *svc_elem;
     spinlock_t *lock;
     unsigned long flags;
     int sort_epoch;
@@ -1374,7 +1371,7 @@ csched_acct(void* dummy)
     unsigned long flags;
     struct list_head *iter_vcpu, *next_vcpu;
     struct list_head *iter_sdom, *next_sdom;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
     struct csched_dom *sdom;
     uint32_t credit_total;
     uint32_t weight_total;
@@ -1497,7 +1494,7 @@ csched_acct(void* dummy)
 
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
-            svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
+            svc = list_entry(iter_vcpu, struct csched_unit, active_vcpu_elem);
             BUG_ON( sdom != svc->sdom );
 
             /* Increment credit */
@@ -1601,12 +1598,12 @@ csched_tick(void *_cpu)
     set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
 }
 
-static struct csched_vcpu *
+static struct csched_unit *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
     const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, 
cpu));
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
-    struct csched_vcpu *speer;
+    struct csched_unit *speer;
     struct list_head *iter;
     struct vcpu *vc;
 
@@ -1616,7 +1613,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
      * Don't steal from an idle CPU's runq because it's about to
      * pick up work from it itself.
      */
-    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
+    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu)->vcpu_list)) )
         goto out;
 
     list_for_each( iter, &peer_pcpu->runq )
@@ -1678,12 +1675,12 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
     return NULL;
 }
 
-static struct csched_vcpu *
+static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
-    struct csched_vcpu *snext, bool_t *stolen)
+    struct csched_unit *snext, bool_t *stolen)
 {
     struct cpupool *c = per_cpu(cpupool, cpu);
-    struct csched_vcpu *speer;
+    struct csched_unit *speer;
     cpumask_t workers;
     cpumask_t *online;
     int peer_cpu, first_cpu, peer_node, bstep;
@@ -1832,9 +1829,9 @@ csched_schedule(
 {
     const int cpu = smp_processor_id();
     struct list_head * const runq = RUNQ(cpu);
-    struct csched_vcpu * const scurr = CSCHED_VCPU(current);
+    struct csched_unit * const scurr = CSCHED_UNIT(current->sched_unit);
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_vcpu *snext;
+    struct csched_unit *snext;
     struct task_slice ret;
     s_time_t runtime, tslice;
 
@@ -1951,7 +1948,7 @@ csched_schedule(
     if ( tasklet_work_scheduled )
     {
         TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
-        snext = CSCHED_VCPU(idle_vcpu[cpu]);
+        snext = CSCHED_UNIT(idle_vcpu[cpu]->sched_unit);
         snext->pri = CSCHED_PRI_TS_BOOST;
     }
 
@@ -2003,7 +2000,7 @@ out:
 }
 
 static void
-csched_dump_vcpu(struct csched_vcpu *svc)
+csched_dump_vcpu(struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -2039,7 +2036,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     struct list_head *runq, *iter;
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_pcpu *spc;
-    struct csched_vcpu *svc;
+    struct csched_unit *svc;
     spinlock_t *lock;
     unsigned long flags;
     int loop;
@@ -2063,7 +2060,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
            CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
-    svc = CSCHED_VCPU(curr_on_cpu(cpu));
+    svc = CSCHED_UNIT(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
@@ -2132,10 +2129,10 @@ csched_dump(const struct scheduler *ops)
 
         list_for_each( iter_svc, &sdom->active_vcpu )
         {
-            struct csched_vcpu *svc;
+            struct csched_unit *svc;
             spinlock_t *lock;
 
-            svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
+            svc = list_entry(iter_svc, struct csched_unit, active_vcpu_elem);
             lock = vcpu_schedule_lock(svc->vcpu);
 
             printk("\t%3d: ", ++loop);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 37192e6713..afeb70b845 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -176,7 +176,7 @@
  *     load balancing;
  *  + serializes runqueue operations (removing and inserting vcpus);
  *  + protects runqueue-wide data in csched2_runqueue_data;
- *  + protects vcpu parameters in csched2_vcpu for the vcpu in the
+ *  + protects vcpu parameters in csched2_unit for the vcpu in the
  *    runqueue.
  *
  * - Private scheduler lock
@@ -512,7 +512,7 @@ struct csched2_pcpu {
 /*
  * Virtual CPU
  */
-struct csched2_vcpu {
+struct csched2_unit {
     struct csched2_dom *sdom;          /* Up-pointer to domain                
*/
     struct vcpu *vcpu;                 /* Up-pointer, to vcpu                 
*/
     struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue          
*/
@@ -571,9 +571,9 @@ static inline struct csched2_pcpu *csched2_pcpu(unsigned 
int cpu)
     return per_cpu(schedule_data, cpu).sched_priv;
 }
 
-static inline struct csched2_vcpu *csched2_vcpu(const struct vcpu *v)
+static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
 {
-    return v->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline struct csched2_dom *csched2_dom(const struct domain *d)
@@ -595,7 +595,7 @@ static inline struct csched2_runqueue_data *c2rqd(const 
struct scheduler *ops,
 }
 
 /* Does the domain of this vCPU have a cap? */
-static inline bool has_cap(const struct csched2_vcpu *svc)
+static inline bool has_cap(const struct csched2_unit *svc)
 {
     return svc->budget != STIME_MAX;
 }
@@ -689,7 +689,7 @@ void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
  * Of course, 1, 2 and 3 makes sense only if svc has a soft affinity. Also
  * note that at least 5 is guaranteed to _always_ return at least one pcpu.
  */
-static int get_fallback_cpu(struct csched2_vcpu *svc)
+static int get_fallback_cpu(struct csched2_unit *svc)
 {
     struct vcpu *v = svc->vcpu;
     unsigned int bs;
@@ -774,7 +774,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
  * FIXME: Do pre-calculated division?
  */
 static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
-                          struct csched2_vcpu *svc)
+                          struct csched2_unit *svc)
 {
     uint64_t val = time * rqd->max_weight + svc->residual;
 
@@ -782,7 +782,7 @@ static void t2c_update(struct csched2_runqueue_data *rqd, 
s_time_t time,
     svc->credit -= val;
 }
 
-static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct 
csched2_vcpu *svc)
+static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct 
csched2_unit *svc)
 {
     return credit * svc->weight / rqd->max_weight;
 }
@@ -791,14 +791,14 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, 
s_time_t credit, struct c
  * Runqueue related code.
  */
 
-static inline int vcpu_on_runq(struct csched2_vcpu *svc)
+static inline int vcpu_on_runq(struct csched2_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static inline struct csched2_vcpu * runq_elem(struct list_head *elem)
+static inline struct csched2_unit * runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched2_vcpu, runq_elem);
+    return list_entry(elem, struct csched2_unit, runq_elem);
 }
 
 static void activate_runqueue(struct csched2_private *prv, int rqi)
@@ -916,7 +916,7 @@ static void update_max_weight(struct csched2_runqueue_data 
*rqd, int new_weight,
 
         list_for_each( iter, &rqd->svc )
         {
-            struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, 
rqd_elem);
+            struct csched2_unit * svc = list_entry(iter, struct csched2_unit, 
rqd_elem);
 
             if ( svc->weight > max_weight )
                 max_weight = svc->weight;
@@ -941,7 +941,7 @@ static void update_max_weight(struct csched2_runqueue_data 
*rqd, int new_weight,
 
 /* Add and remove from runqueue assignment (not active run queue) */
 static void
-_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
+_runq_assign(struct csched2_unit *svc, struct csched2_runqueue_data *rqd)
 {
 
     svc->rqd = rqd;
@@ -971,7 +971,7 @@ _runq_assign(struct csched2_vcpu *svc, struct 
csched2_runqueue_data *rqd)
 static void
 runq_assign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu *svc = vc->sched_unit->priv;
+    struct csched2_unit *svc = vc->sched_unit->priv;
 
     ASSERT(svc->rqd == NULL);
 
@@ -979,7 +979,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
 }
 
 static void
-_runq_deassign(struct csched2_vcpu *svc)
+_runq_deassign(struct csched2_unit *svc)
 {
     struct csched2_runqueue_data *rqd = svc->rqd;
 
@@ -998,7 +998,7 @@ _runq_deassign(struct csched2_vcpu *svc)
 static void
 runq_deassign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu *svc = vc->sched_unit->priv;
+    struct csched2_unit *svc = vc->sched_unit->priv;
 
     ASSERT(svc->rqd == c2rqd(ops, vc->processor));
 
@@ -1200,7 +1200,7 @@ update_runq_load(const struct scheduler *ops,
 
 static void
 update_svc_load(const struct scheduler *ops,
-                struct csched2_vcpu *svc, int change, s_time_t now)
+                struct csched2_unit *svc, int change, s_time_t now)
 {
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, vcpu_load;
@@ -1260,7 +1260,7 @@ update_svc_load(const struct scheduler *ops,
 static void
 update_load(const struct scheduler *ops,
             struct csched2_runqueue_data *rqd,
-            struct csched2_vcpu *svc, int change, s_time_t now)
+            struct csched2_unit *svc, int change, s_time_t now)
 {
     trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
 
@@ -1270,7 +1270,7 @@ update_load(const struct scheduler *ops,
 }
 
 static void
-runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct csched2_unit *svc)
 {
     struct list_head *iter;
     unsigned int cpu = svc->vcpu->processor;
@@ -1289,7 +1289,7 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
 
     list_for_each( iter, runq )
     {
-        struct csched2_vcpu * iter_svc = runq_elem(iter);
+        struct csched2_unit * iter_svc = runq_elem(iter);
 
         if ( svc->credit > iter_svc->credit )
             break;
@@ -1313,13 +1313,13 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
     }
 }
 
-static inline void runq_remove(struct csched2_vcpu *svc)
+static inline void runq_remove(struct csched2_unit *svc)
 {
     ASSERT(vcpu_on_runq(svc));
     list_del_init(&svc->runq_elem);
 }
 
-void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, 
s_time_t);
+void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_unit *, 
s_time_t);
 
 static inline void
 tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
@@ -1335,7 +1335,7 @@ tickle_cpu(unsigned int cpu, struct csched2_runqueue_data 
*rqd)
  * whether or not it already run for more than the ratelimit, to which we
  * apply some tolerance).
  */
-static inline bool is_preemptable(const struct csched2_vcpu *svc,
+static inline bool is_preemptable(const struct csched2_unit *svc,
                                     s_time_t now, s_time_t ratelimit)
 {
     if ( ratelimit <= CSCHED2_RATELIMIT_TICKLE_TOLERANCE )
@@ -1361,10 +1361,10 @@ static inline bool is_preemptable(const struct 
csched2_vcpu *svc,
  * Within the same class, the highest difference of credit.
  */
 static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
-                             struct csched2_vcpu *new, unsigned int cpu)
+                             struct csched2_unit *new, unsigned int cpu)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
-    struct csched2_vcpu * cur = csched2_vcpu(curr_on_cpu(cpu));
+    struct csched2_unit * cur = csched2_unit(curr_on_cpu(cpu));
     struct csched2_private *prv = csched2_priv(ops);
     s_time_t score;
 
@@ -1433,7 +1433,7 @@ static s_time_t tickle_score(const struct scheduler *ops, 
s_time_t now,
  * pick up some work, so it would be wrong to consider it idle.
  */
 static void
-runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t 
now)
+runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t 
now)
 {
     int i, ipid = -1;
     s_time_t max = 0;
@@ -1588,7 +1588,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
         return;
     }
 
-    ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)));
+    ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)->vcpu_list));
     SCHED_STAT_CRANK(tickled_busy_cpu);
  tickle:
     BUG_ON(ipid == -1);
@@ -1615,7 +1615,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
  * Credit-related code
  */
 static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
-                         struct csched2_vcpu *snext)
+                         struct csched2_unit *snext)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct list_head *iter;
@@ -1645,10 +1645,10 @@ static void reset_credit(const struct scheduler *ops, 
int cpu, s_time_t now,
     list_for_each( iter, &rqd->svc )
     {
         unsigned int svc_cpu;
-        struct csched2_vcpu * svc;
+        struct csched2_unit * svc;
         int start_credit;
 
-        svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+        svc = list_entry(iter, struct csched2_unit, rqd_elem);
         svc_cpu = svc->vcpu->processor;
 
         ASSERT(!is_idle_vcpu(svc->vcpu));
@@ -1658,7 +1658,7 @@ static void reset_credit(const struct scheduler *ops, int 
cpu, s_time_t now,
          * If svc is running, it is our responsibility to make sure, here,
          * that the credit it has spent so far get accounted.
          */
-        if ( svc->vcpu == curr_on_cpu(svc_cpu) )
+        if ( svc->vcpu == curr_on_cpu(svc_cpu)->vcpu_list )
         {
             burn_credits(rqd, svc, now);
             /*
@@ -1710,11 +1710,11 @@ static void reset_credit(const struct scheduler *ops, 
int cpu, s_time_t now,
 }
 
 void burn_credits(struct csched2_runqueue_data *rqd,
-                  struct csched2_vcpu *svc, s_time_t now)
+                  struct csched2_unit *svc, s_time_t now)
 {
     s_time_t delta;
 
-    ASSERT(svc == csched2_vcpu(curr_on_cpu(svc->vcpu->processor)));
+    ASSERT(svc == csched2_unit(curr_on_cpu(svc->vcpu->processor)));
 
     if ( unlikely(is_idle_vcpu(svc->vcpu)) )
     {
@@ -1764,7 +1764,7 @@ void burn_credits(struct csched2_runqueue_data *rqd,
  * Budget-related code.
  */
 
-static void park_vcpu(struct csched2_vcpu *svc)
+static void park_vcpu(struct csched2_unit *svc)
 {
     struct vcpu *v = svc->vcpu;
 
@@ -1793,7 +1793,7 @@ static void park_vcpu(struct csched2_vcpu *svc)
     list_add(&svc->parked_elem, &svc->sdom->parked_vcpus);
 }
 
-static bool vcpu_grab_budget(struct csched2_vcpu *svc)
+static bool vcpu_grab_budget(struct csched2_unit *svc)
 {
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
@@ -1840,7 +1840,7 @@ static bool vcpu_grab_budget(struct csched2_vcpu *svc)
 }
 
 static void
-vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
+vcpu_return_budget(struct csched2_unit *svc, struct list_head *parked)
 {
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
@@ -1883,7 +1883,7 @@ vcpu_return_budget(struct csched2_vcpu *svc, struct 
list_head *parked)
 static void
 unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
 {
-    struct csched2_vcpu *svc, *tmp;
+    struct csched2_unit *svc, *tmp;
     spinlock_t *lock;
 
     list_for_each_entry_safe(svc, tmp, vcpus, parked_elem)
@@ -2005,7 +2005,7 @@ static void replenish_domain_budget(void* data)
 static inline void
 csched2_vcpu_check(struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(vc->sched_unit);
     struct csched2_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
@@ -2031,10 +2031,10 @@ csched2_alloc_udata(const struct scheduler *ops, struct 
sched_unit *unit,
                     void *dd)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched2_vcpu *svc;
+    struct csched2_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched2_vcpu);
+    svc = xzalloc(struct csched2_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -2075,12 +2075,12 @@ static void
 csched2_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
 
     ASSERT(!is_idle_vcpu(vc));
     SCHED_STAT_CRANK(vcpu_sleep);
 
-    if ( curr_on_cpu(vc->processor) == vc )
+    if ( curr_on_cpu(vc->processor) == unit )
     {
         tickle_cpu(vc->processor, svc->rqd);
     }
@@ -2098,7 +2098,7 @@ static void
 csched2_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     unsigned int cpu = vc->processor;
     s_time_t now;
 
@@ -2106,7 +2106,7 @@ csched2_unit_wake(const struct scheduler *ops, struct 
sched_unit *unit)
 
     ASSERT(!is_idle_vcpu(vc));
 
-    if ( unlikely(curr_on_cpu(cpu) == vc) )
+    if ( unlikely(curr_on_cpu(cpu) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         goto out;
@@ -2153,8 +2153,7 @@ out:
 static void
 csched2_unit_yield(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *v = unit->vcpu_list;
-    struct csched2_vcpu * const svc = csched2_vcpu(v);
+    struct csched2_unit * const svc = csched2_unit(unit);
 
     __set_bit(__CSFLAG_vcpu_yield, &svc->flags);
 }
@@ -2163,7 +2162,7 @@ static void
 csched2_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
     s_time_t now = NOW();
     LIST_HEAD(were_parked);
@@ -2209,7 +2208,7 @@ csched2_res_pick(const struct scheduler *ops, const 
struct sched_unit *unit)
     struct vcpu *vc = unit->vcpu_list;
     int i, min_rqi = -1, min_s_rqi = -1;
     unsigned int new_cpu, cpu = vc->processor;
-    struct csched2_vcpu *svc = csched2_vcpu(vc);
+    struct csched2_unit *svc = csched2_unit(unit);
     s_time_t min_avgload = MAX_LOAD, min_s_avgload = MAX_LOAD;
     bool has_soft;
 
@@ -2431,15 +2430,15 @@ csched2_res_pick(const struct scheduler *ops, const 
struct sched_unit *unit)
 typedef struct {
     /* NB: Modified by consider() */
     s_time_t load_delta;
-    struct csched2_vcpu * best_push_svc, *best_pull_svc;
+    struct csched2_unit * best_push_svc, *best_pull_svc;
     /* NB: Read by consider() */
     struct csched2_runqueue_data *lrqd;
     struct csched2_runqueue_data *orqd;                  
 } balance_state_t;
 
 static void consider(balance_state_t *st, 
-                     struct csched2_vcpu *push_svc,
-                     struct csched2_vcpu *pull_svc)
+                     struct csched2_unit *push_svc,
+                     struct csched2_unit *pull_svc)
 {
     s_time_t l_load, o_load, delta;
 
@@ -2472,8 +2471,8 @@ static void consider(balance_state_t *st,
 
 
 static void migrate(const struct scheduler *ops,
-                    struct csched2_vcpu *svc, 
-                    struct csched2_runqueue_data *trqd, 
+                    struct csched2_unit *svc,
+                    struct csched2_runqueue_data *trqd,
                     s_time_t now)
 {
     int cpu = svc->vcpu->processor;
@@ -2542,7 +2541,7 @@ static void migrate(const struct scheduler *ops,
  *  - svc is not already flagged to migrate,
  *  - if svc is allowed to run on at least one of the pcpus of rqd.
  */
-static bool vcpu_is_migrateable(struct csched2_vcpu *svc,
+static bool vcpu_is_migrateable(struct csched2_unit *svc,
                                   struct csched2_runqueue_data *rqd)
 {
     struct vcpu *v = svc->vcpu;
@@ -2692,7 +2691,7 @@ retry:
     /* Reuse load delta (as we're trying to minimize it) */
     list_for_each( push_iter, &st.lrqd->svc )
     {
-        struct csched2_vcpu * push_svc = list_entry(push_iter, struct 
csched2_vcpu, rqd_elem);
+        struct csched2_unit * push_svc = list_entry(push_iter, struct 
csched2_unit, rqd_elem);
 
         update_svc_load(ops, push_svc, 0, now);
 
@@ -2701,7 +2700,7 @@ retry:
 
         list_for_each( pull_iter, &st.orqd->svc )
         {
-            struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct 
csched2_vcpu, rqd_elem);
+            struct csched2_unit * pull_svc = list_entry(pull_iter, struct 
csched2_unit, rqd_elem);
             
             if ( !inner_load_updated )
                 update_svc_load(ops, pull_svc, 0, now);
@@ -2720,7 +2719,7 @@ retry:
 
     list_for_each( pull_iter, &st.orqd->svc )
     {
-        struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct 
csched2_vcpu, rqd_elem);
+        struct csched2_unit * pull_svc = list_entry(pull_iter, struct 
csched2_unit, rqd_elem);
         
         if ( !vcpu_is_migrateable(pull_svc, st.lrqd) )
             continue;
@@ -2747,7 +2746,7 @@ csched2_unit_migrate(
 {
     struct vcpu *vc = unit->vcpu_list;
     struct domain *d = vc->domain;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     struct csched2_runqueue_data *trqd;
     s_time_t now = NOW();
 
@@ -2848,7 +2847,7 @@ csched2_dom_cntl(
             /* Update weights for vcpus, and max_weight for runqueues on which 
they reside */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = csched2_vcpu(v);
+                struct csched2_unit *svc = csched2_unit(v->sched_unit);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
@@ -2862,7 +2861,7 @@ csched2_dom_cntl(
         /* Cap */
         if ( op->u.credit2.cap != 0 )
         {
-            struct csched2_vcpu *svc;
+            struct csched2_unit *svc;
             spinlock_t *lock;
 
             /* Cap is only valid if it's below 100 * nr_of_vCPUS */
@@ -2886,7 +2885,7 @@ csched2_dom_cntl(
              */
             for_each_vcpu ( d, v )
             {
-                svc = csched2_vcpu(v);
+                svc = csched2_unit(v->sched_unit);
                 lock = vcpu_schedule_lock(svc->vcpu);
                 /*
                  * Too small quotas would in theory cause a lot of overhead,
@@ -2929,14 +2928,14 @@ csched2_dom_cntl(
                  */
                 for_each_vcpu ( d, v )
                 {
-                    svc = csched2_vcpu(v);
+                    svc = csched2_unit(v->sched_unit);
                     lock = vcpu_schedule_lock(svc->vcpu);
                     if ( v->is_running )
                     {
                         unsigned int cpu = v->processor;
                         struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
 
-                        ASSERT(curr_on_cpu(cpu) == v);
+                        ASSERT(curr_on_cpu(cpu)->vcpu_list == v);
 
                         /*
                          * We are triggering a reschedule on the vCPU's
@@ -2976,7 +2975,7 @@ csched2_dom_cntl(
             /* Disable budget accounting for all the vCPUs. */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = csched2_vcpu(v);
+                struct csched2_unit *svc = csched2_unit(v->sched_unit);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 svc->budget = STIME_MAX;
@@ -3013,8 +3012,7 @@ static void
 csched2_aff_cntl(const struct scheduler *ops, struct sched_unit *unit,
                  const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct vcpu *v = unit->vcpu_list;
-    struct csched2_vcpu *svc = csched2_vcpu(v);
+    struct csched2_unit *svc = csched2_unit(unit);
 
     if ( !hard )
         return;
@@ -3114,7 +3112,7 @@ static void
 csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched2_vcpu *svc = unit->priv;
+    struct csched2_unit *svc = unit->priv;
     struct csched2_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -3146,7 +3144,7 @@ csched2_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
 static void
 csched2_free_udata(const struct scheduler *ops, void *priv)
 {
-    struct csched2_vcpu *svc = priv;
+    struct csched2_unit *svc = priv;
 
     xfree(svc);
 }
@@ -3155,7 +3153,7 @@ static void
 csched2_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct csched2_vcpu * const svc = csched2_vcpu(vc);
+    struct csched2_unit * const svc = csched2_unit(unit);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(vc));
@@ -3176,7 +3174,7 @@ csched2_unit_remove(const struct scheduler *ops, struct 
sched_unit *unit)
 /* How long should we let this vcpu run for? */
 static s_time_t
 csched2_runtime(const struct scheduler *ops, int cpu,
-                struct csched2_vcpu *snext, s_time_t now)
+                struct csched2_unit *snext, s_time_t now)
 {
     s_time_t time, min_time;
     int rt_credit; /* Proposed runtime measured in credits */
@@ -3221,7 +3219,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
      */
     if ( ! list_empty(runq) )
     {
-        struct csched2_vcpu *swait = runq_elem(runq->next);
+        struct csched2_unit *swait = runq_elem(runq->next);
 
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
@@ -3272,14 +3270,14 @@ csched2_runtime(const struct scheduler *ops, int cpu,
 /*
  * Find a candidate.
  */
-static struct csched2_vcpu *
+static struct csched2_unit *
 runq_candidate(struct csched2_runqueue_data *rqd,
-               struct csched2_vcpu *scurr,
+               struct csched2_unit *scurr,
                int cpu, s_time_t now,
                unsigned int *skipped)
 {
     struct list_head *iter, *temp;
-    struct csched2_vcpu *snext = NULL;
+    struct csched2_unit *snext = NULL;
     struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
     bool yield = false, soft_aff_preempt = false;
 
@@ -3360,12 +3358,12 @@ runq_candidate(struct csched2_runqueue_data *rqd,
     if ( vcpu_runnable(scurr->vcpu) && !soft_aff_preempt )
         snext = scurr;
     else
-        snext = csched2_vcpu(idle_vcpu[cpu]);
+        snext = csched2_unit(idle_vcpu[cpu]->sched_unit);
 
  check_runq:
     list_for_each_safe( iter, temp, &rqd->runq )
     {
-        struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, 
runq_elem);
+        struct csched2_unit * svc = list_entry(iter, struct csched2_unit, 
runq_elem);
 
         if ( unlikely(tb_init_done) )
         {
@@ -3464,8 +3462,8 @@ csched2_schedule(
 {
     const int cpu = smp_processor_id();
     struct csched2_runqueue_data *rqd;
-    struct csched2_vcpu * const scurr = csched2_vcpu(current);
-    struct csched2_vcpu *snext = NULL;
+    struct csched2_unit * const scurr = csched2_unit(current->sched_unit);
+    struct csched2_unit *snext = NULL;
     unsigned int skipped_vcpus = 0;
     struct task_slice ret;
     bool tickled;
@@ -3541,7 +3539,7 @@ csched2_schedule(
     {
         __clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
         trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
-        snext = csched2_vcpu(idle_vcpu[cpu]);
+        snext = csched2_unit(idle_vcpu[cpu]->sched_unit);
     }
     else
         snext = runq_candidate(rqd, scurr, cpu, now, &skipped_vcpus);
@@ -3644,7 +3642,7 @@ csched2_schedule(
 }
 
 static void
-csched2_dump_vcpu(struct csched2_private *prv, struct csched2_vcpu *svc)
+csched2_dump_vcpu(struct csched2_private *prv, struct csched2_unit *svc)
 {
     printk("[%i.%i] flags=%x cpu=%i",
             svc->vcpu->domain->domain_id,
@@ -3668,7 +3666,7 @@ static inline void
 dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
-    struct csched2_vcpu *svc;
+    struct csched2_unit *svc;
 
     printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
            cpu, c2r(cpu),
@@ -3676,7 +3674,7 @@ dump_pcpu(const struct scheduler *ops, int cpu)
            CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
-    svc = csched2_vcpu(curr_on_cpu(cpu));
+    svc = csched2_unit(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
@@ -3749,7 +3747,7 @@ csched2_dump(const struct scheduler *ops)
 
         for_each_vcpu( sdom->dom, v )
         {
-            struct csched2_vcpu * const svc = csched2_vcpu(v);
+            struct csched2_unit * const svc = csched2_unit(v->sched_unit);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(svc->vcpu);
@@ -3778,7 +3776,7 @@ csched2_dump(const struct scheduler *ops)
         printk("RUNQ:\n");
         list_for_each( iter, runq )
         {
-            struct csched2_vcpu *svc = runq_elem(iter);
+            struct csched2_unit *svc = runq_elem(iter);
 
             if ( svc )
             {
@@ -3882,7 +3880,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
                      void *pdata, void *vdata)
 {
     struct csched2_private *prv = csched2_priv(new_ops);
-    struct csched2_vcpu *svc = vdata;
+    struct csched2_unit *svc = vdata;
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index cb400f55d0..3619774318 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -93,7 +93,7 @@ DEFINE_PER_CPU(struct null_pcpu, npc);
 /*
  * Virtual CPU
  */
-struct null_vcpu {
+struct null_unit {
     struct list_head waitq_elem;
     struct vcpu *vcpu;
 };
@@ -114,9 +114,9 @@ static inline struct null_private *null_priv(const struct 
scheduler *ops)
     return ops->sched_data;
 }
 
-static inline struct null_vcpu *null_vcpu(const struct vcpu *v)
+static inline struct null_unit *null_unit(const struct sched_unit *unit)
 {
-    return v->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
@@ -189,9 +189,9 @@ static void *null_alloc_udata(const struct scheduler *ops,
                               struct sched_unit *unit, void *dd)
 {
     struct vcpu *v = unit->vcpu_list;
-    struct null_vcpu *nvc;
+    struct null_unit *nvc;
 
-    nvc = xzalloc(struct null_vcpu);
+    nvc = xzalloc(struct null_unit);
     if ( nvc == NULL )
         return NULL;
 
@@ -205,7 +205,7 @@ static void *null_alloc_udata(const struct scheduler *ops,
 
 static void null_free_udata(const struct scheduler *ops, void *priv)
 {
-    struct null_vcpu *nvc = priv;
+    struct null_unit *nvc = priv;
 
     xfree(nvc);
 }
@@ -362,9 +362,9 @@ static bool vcpu_deassign(struct null_private *prv, struct 
vcpu *v)
 {
     unsigned int bs;
     unsigned int cpu = v->processor;
-    struct null_vcpu *wvc;
+    struct null_unit *wvc;
 
-    ASSERT(list_empty(&null_vcpu(v)->waitq_elem));
+    ASSERT(list_empty(&null_unit(v->sched_unit)->waitq_elem));
     ASSERT(per_cpu(npc, v->processor).vcpu == v);
     ASSERT(!cpumask_test_cpu(v->processor, &prv->cpus_free));
 
@@ -421,7 +421,7 @@ static spinlock_t *null_switch_sched(struct scheduler 
*new_ops,
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct null_private *prv = null_priv(new_ops);
-    struct null_vcpu *nvc = vdata;
+    struct null_unit *nvc = vdata;
 
     ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
 
@@ -444,7 +444,7 @@ static void null_unit_insert(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     unsigned int cpu;
     spinlock_t *lock;
 
@@ -508,7 +508,7 @@ static void null_unit_remove(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(v));
@@ -546,12 +546,12 @@ static void null_unit_wake(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
     unsigned int cpu = v->processor;
 
     ASSERT(!is_idle_vcpu(v));
 
-    if ( unlikely(curr_on_cpu(cpu) == v) )
+    if ( unlikely(curr_on_cpu(cpu) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -631,7 +631,7 @@ static void null_unit_sleep(const struct scheduler *ops,
      */
     if ( unlikely(!is_vcpu_online(v)) )
     {
-        struct null_vcpu *nvc = null_vcpu(v);
+        struct null_unit *nvc = null_unit(unit);
 
         if ( unlikely(!list_empty(&nvc->waitq_elem)) )
         {
@@ -644,7 +644,7 @@ static void null_unit_sleep(const struct scheduler *ops,
     }
 
     /* If v is not assigned to a pCPU, or is not running, no need to bother */
-    if ( likely(!tickled && curr_on_cpu(cpu) == v) )
+    if ( likely(!tickled && curr_on_cpu(cpu) == unit) )
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
 
     SCHED_STAT_CRANK(vcpu_sleep);
@@ -662,7 +662,7 @@ static void null_unit_migrate(const struct scheduler *ops,
 {
     struct vcpu *v = unit->vcpu_list;
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc = null_vcpu(v);
+    struct null_unit *nvc = null_unit(unit);
 
     ASSERT(!is_idle_vcpu(v));
 
@@ -758,7 +758,7 @@ static void null_unit_migrate(const struct scheduler *ops,
 #ifndef NDEBUG
 static inline void null_vcpu_check(struct vcpu *v)
 {
-    struct null_vcpu * const nvc = null_vcpu(v);
+    struct null_unit * const nvc = null_unit(v->sched_unit);
     struct null_dom * const ndom = v->domain->sched_priv;
 
     BUG_ON(nvc->vcpu != v);
@@ -788,7 +788,7 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
     unsigned int bs;
     const unsigned int cpu = smp_processor_id();
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *wvc;
+    struct null_unit *wvc;
     struct task_slice ret;
 
     SCHED_STAT_CRANK(schedule);
@@ -874,7 +874,7 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
     return ret;
 }
 
-static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
+static inline void dump_vcpu(struct null_private *prv, struct null_unit *nvc)
 {
     printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id,
             nvc->vcpu->vcpu_id, list_empty(&nvc->waitq_elem) ?
@@ -884,7 +884,7 @@ static inline void dump_vcpu(struct null_private *prv, 
struct null_vcpu *nvc)
 static void null_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct null_vcpu *nvc;
+    struct null_unit *nvc;
     spinlock_t *lock;
     unsigned long flags;
 
@@ -898,7 +898,7 @@ static void null_dump_pcpu(const struct scheduler *ops, int 
cpu)
     printk("\n");
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
-    nvc = null_vcpu(curr_on_cpu(cpu));
+    nvc = null_unit(curr_on_cpu(cpu));
     if ( nvc && !is_idle_vcpu(nvc->vcpu) )
     {
         printk("\trun: ");
@@ -932,7 +932,7 @@ static void null_dump(const struct scheduler *ops)
         printk("\tDomain: %d\n", ndom->dom->domain_id);
         for_each_vcpu( ndom->dom, v )
         {
-            struct null_vcpu * const nvc = null_vcpu(v);
+            struct null_unit * const nvc = null_unit(v->sched_unit);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(nvc->vcpu);
@@ -950,7 +950,7 @@ static void null_dump(const struct scheduler *ops)
     spin_lock(&prv->waitq_lock);
     list_for_each( iter, &prv->waitq )
     {
-        struct null_vcpu *nvc = list_entry(iter, struct null_vcpu, waitq_elem);
+        struct null_unit *nvc = list_entry(iter, struct null_unit, waitq_elem);
 
         if ( loop++ != 0 )
             printk(", ");
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 6ca792e643..57da55d90f 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -195,7 +195,7 @@ struct rt_private {
 /*
  * Virtual CPU
  */
-struct rt_vcpu {
+struct rt_unit {
     struct list_head q_elem;     /* on the runq/depletedq list */
     struct list_head replq_elem; /* on the replenishment events list */
 
@@ -233,9 +233,9 @@ static inline struct rt_private *rt_priv(const struct 
scheduler *ops)
     return ops->sched_data;
 }
 
-static inline struct rt_vcpu *rt_vcpu(const struct vcpu *vcpu)
+static inline struct rt_unit *rt_unit(const struct sched_unit *unit)
 {
-    return vcpu->sched_unit->priv;
+    return unit->priv;
 }
 
 static inline struct list_head *rt_runq(const struct scheduler *ops)
@@ -253,7 +253,7 @@ static inline struct list_head *rt_replq(const struct 
scheduler *ops)
     return &rt_priv(ops)->replq;
 }
 
-static inline bool has_extratime(const struct rt_vcpu *svc)
+static inline bool has_extratime(const struct rt_unit *svc)
 {
     return svc->flags & RTDS_extratime;
 }
@@ -263,25 +263,25 @@ static inline bool has_extratime(const struct rt_vcpu 
*svc)
  * and the replenishment events queue.
  */
 static int
-vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_unit *svc)
 {
    return !list_empty(&svc->q_elem);
 }
 
-static struct rt_vcpu *
+static struct rt_unit *
 q_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct rt_vcpu, q_elem);
+    return list_entry(elem, struct rt_unit, q_elem);
 }
 
-static struct rt_vcpu *
+static struct rt_unit *
 replq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct rt_vcpu, replq_elem);
+    return list_entry(elem, struct rt_unit, replq_elem);
 }
 
 static int
-vcpu_on_replq(const struct rt_vcpu *svc)
+vcpu_on_replq(const struct rt_unit *svc)
 {
     return !list_empty(&svc->replq_elem);
 }
@@ -291,7 +291,7 @@ vcpu_on_replq(const struct rt_vcpu *svc)
  * Otherwise, return value < 0
  */
 static s_time_t
-compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
+compare_vcpu_priority(const struct rt_unit *v1, const struct rt_unit *v2)
 {
     int prio = v2->priority_level - v1->priority_level;
 
@@ -305,7 +305,7 @@ compare_vcpu_priority(const struct rt_vcpu *v1, const 
struct rt_vcpu *v2)
  * Debug related code, dump vcpu/cpu information
  */
 static void
-rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
+rt_dump_vcpu(const struct scheduler *ops, const struct rt_unit *svc)
 {
     cpumask_t *cpupool_mask, *mask;
 
@@ -351,13 +351,13 @@ static void
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
     printk("CPU[%02d]\n", cpu);
     /* current VCPU (nothing to say if that's the idle vcpu). */
-    svc = rt_vcpu(curr_on_cpu(cpu));
+    svc = rt_unit(curr_on_cpu(cpu));
     if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         rt_dump_vcpu(ops, svc);
@@ -370,7 +370,7 @@ rt_dump(const struct scheduler *ops)
 {
     struct list_head *runq, *depletedq, *replq, *iter;
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     struct rt_dom *sdom;
     unsigned long flags;
 
@@ -414,7 +414,7 @@ rt_dump(const struct scheduler *ops)
 
         for_each_vcpu ( sdom->dom, v )
         {
-            svc = rt_vcpu(v);
+            svc = rt_unit(v->sched_unit);
             rt_dump_vcpu(ops, svc);
         }
     }
@@ -428,7 +428,7 @@ rt_dump(const struct scheduler *ops)
  * it needs to be updated to the deadline of the current period
  */
 static void
-rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
+rt_update_deadline(s_time_t now, struct rt_unit *svc)
 {
     ASSERT(now >= svc->cur_deadline);
     ASSERT(svc->period != 0);
@@ -499,8 +499,8 @@ deadline_queue_remove(struct list_head *queue, struct 
list_head *elem)
 }
 
 static inline bool
-deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
-                      struct rt_vcpu *svc, struct list_head *elem,
+deadline_queue_insert(struct rt_unit * (*qelem)(struct list_head *),
+                      struct rt_unit *svc, struct list_head *elem,
                       struct list_head *queue)
 {
     struct list_head *iter;
@@ -508,7 +508,7 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct 
list_head *),
 
     list_for_each ( iter, queue )
     {
-        struct rt_vcpu * iter_svc = (*qelem)(iter);
+        struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_vcpu_priority(svc, iter_svc) > 0 )
             break;
         pos++;
@@ -522,14 +522,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct 
list_head *),
   deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
 
 static inline void
-q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_unit *svc)
 {
     ASSERT( vcpu_on_q(svc) );
     list_del_init(&svc->q_elem);
 }
 
 static inline void
-replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_remove(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *replq = rt_replq(ops);
@@ -546,7 +546,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu 
*svc)
          */
         if ( !list_empty(replq) )
         {
-            struct rt_vcpu *svc_next = replq_elem(replq->next);
+            struct rt_unit *svc_next = replq_elem(replq->next);
             set_timer(&prv->repl_timer, svc_next->cur_deadline);
         }
         else
@@ -560,7 +560,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu 
*svc)
  * Insert svc without budget in DepletedQ unsorted;
  */
 static void
-runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
@@ -578,7 +578,7 @@ runq_insert(const struct scheduler *ops, struct rt_vcpu 
*svc)
 }
 
 static void
-replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_insert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
     struct rt_private *prv = rt_priv(ops);
@@ -600,10 +600,10 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu 
*svc)
  * changed.
  */
 static void
-replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
-    struct rt_vcpu *rearm_svc = svc;
+    struct rt_unit *rearm_svc = svc;
     bool_t rearm = 0;
 
     ASSERT( vcpu_on_replq(svc) );
@@ -734,7 +734,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                 void *pdata, void *vdata)
 {
     struct rt_private *prv = rt_priv(new_ops);
-    struct rt_vcpu *svc = vdata;
+    struct rt_unit *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -841,10 +841,10 @@ static void *
 rt_alloc_udata(const struct scheduler *ops, struct sched_unit *unit, void *dd)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct rt_vcpu);
+    svc = xzalloc(struct rt_unit);
     if ( svc == NULL )
         return NULL;
 
@@ -869,7 +869,7 @@ rt_alloc_udata(const struct scheduler *ops, struct 
sched_unit *unit, void *dd)
 static void
 rt_free_udata(const struct scheduler *ops, void *priv)
 {
-    struct rt_vcpu *svc = priv;
+    struct rt_unit *svc = priv;
 
     xfree(svc);
 }
@@ -885,7 +885,7 @@ static void
 rt_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct rt_vcpu *svc = rt_vcpu(vc);
+    struct rt_unit *svc = rt_unit(unit);
     s_time_t now;
     spinlock_t *lock;
 
@@ -914,13 +914,13 @@ rt_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
 }
 
 /*
- * Remove rt_vcpu svc from the old scheduler in source cpupool.
+ * Remove rt_unit svc from the old scheduler in source cpupool.
  */
 static void
 rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
     struct rt_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -942,7 +942,7 @@ rt_unit_remove(const struct scheduler *ops, struct 
sched_unit *unit)
  * Burn budget in nanosecond granularity
  */
 static void
-burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
+burn_budget(const struct scheduler *ops, struct rt_unit *svc, s_time_t now)
 {
     s_time_t delta;
 
@@ -1006,13 +1006,13 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu 
*svc, s_time_t now)
  * RunQ is sorted. Pick first one within cpumask. If no one, return NULL
  * lock is grabbed before calling this function
  */
-static struct rt_vcpu *
+static struct rt_unit *
 runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
-    struct rt_vcpu *svc = NULL;
-    struct rt_vcpu *iter_svc = NULL;
+    struct rt_unit *svc = NULL;
+    struct rt_unit *iter_svc = NULL;
     cpumask_t cpu_common;
     cpumask_t *online;
 
@@ -1063,8 +1063,8 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
 {
     const int cpu = smp_processor_id();
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *const scurr = rt_vcpu(current);
-    struct rt_vcpu *snext = NULL;
+    struct rt_unit *const scurr = rt_unit(current->sched_unit);
+    struct rt_unit *snext = NULL;
     struct task_slice ret = { .migrated = 0 };
 
     /* TRACE */
@@ -1090,13 +1090,13 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
     if ( tasklet_work_scheduled )
     {
         trace_var(TRC_RTDS_SCHED_TASKLET, 1, 0,  NULL);
-        snext = rt_vcpu(idle_vcpu[cpu]);
+        snext = rt_unit(idle_vcpu[cpu]->sched_unit);
     }
     else
     {
         snext = runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
-            snext = rt_vcpu(idle_vcpu[cpu]);
+            snext = rt_unit(idle_vcpu[cpu]->sched_unit);
 
         /* if scurr has higher priority and budget, still pick scurr */
         if ( !is_idle_vcpu(current) &&
@@ -1142,12 +1142,12 @@ static void
 rt_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
 
     BUG_ON( is_idle_vcpu(vc) );
     SCHED_STAT_CRANK(vcpu_sleep);
 
-    if ( curr_on_cpu(vc->processor) == vc )
+    if ( curr_on_cpu(vc->processor) == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
     else if ( vcpu_on_q(svc) )
     {
@@ -1177,11 +1177,11 @@ rt_unit_sleep(const struct scheduler *ops, struct 
sched_unit *unit)
  * lock is grabbed before calling this function
  */
 static void
-runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
+runq_tickle(const struct scheduler *ops, struct rt_unit *new)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *latest_deadline_vcpu = NULL; /* lowest priority */
-    struct rt_vcpu *iter_svc;
+    struct rt_unit *latest_deadline_vcpu = NULL; /* lowest priority */
+    struct rt_unit *iter_svc;
     struct vcpu *iter_vc;
     int cpu = 0, cpu_to_tickle = 0;
     cpumask_t not_tickled;
@@ -1202,14 +1202,14 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu 
*new)
     cpu = cpumask_test_or_cycle(new->vcpu->processor, &not_tickled);
     while ( cpu!= nr_cpu_ids )
     {
-        iter_vc = curr_on_cpu(cpu);
+        iter_vc = curr_on_cpu(cpu)->vcpu_list;
         if ( is_idle_vcpu(iter_vc) )
         {
             SCHED_STAT_CRANK(tickled_idle_cpu);
             cpu_to_tickle = cpu;
             goto out;
         }
-        iter_svc = rt_vcpu(iter_vc);
+        iter_svc = rt_unit(iter_vc->sched_unit);
         if ( latest_deadline_vcpu == NULL ||
              compare_vcpu_priority(iter_svc, latest_deadline_vcpu) < 0 )
             latest_deadline_vcpu = iter_svc;
@@ -1258,13 +1258,13 @@ static void
 rt_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct rt_vcpu * const svc = rt_vcpu(vc);
+    struct rt_unit * const svc = rt_unit(unit);
     s_time_t now;
     bool_t missed;
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1329,7 +1329,7 @@ static void
 rt_context_saved(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
-    struct rt_vcpu *svc = rt_vcpu(vc);
+    struct rt_unit *svc = rt_unit(unit);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
     __clear_bit(__RTDS_scheduled, &svc->flags);
@@ -1360,7 +1360,7 @@ rt_dom_cntl(
     struct xen_domctl_scheduler_op *op)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     struct vcpu *v;
     unsigned long flags;
     int rc = 0;
@@ -1384,7 +1384,7 @@ rt_dom_cntl(
         spin_lock_irqsave(&prv->lock, flags);
         for_each_vcpu ( d, v )
         {
-            svc = rt_vcpu(v);
+            svc = rt_unit(v->sched_unit);
             svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec 
*/
             svc->budget = MICROSECS(op->u.rtds.budget);
         }
@@ -1410,7 +1410,7 @@ rt_dom_cntl(
             if ( op->cmd == XEN_DOMCTL_SCHEDOP_getvcpuinfo )
             {
                 spin_lock_irqsave(&prv->lock, flags);
-                svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+                svc = rt_unit(d->vcpu[local_sched.vcpuid]->sched_unit);
                 local_sched.u.rtds.budget = svc->budget / MICROSECS(1);
                 local_sched.u.rtds.period = svc->period / MICROSECS(1);
                 if ( has_extratime(svc) )
@@ -1438,7 +1438,7 @@ rt_dom_cntl(
                 }
 
                 spin_lock_irqsave(&prv->lock, flags);
-                svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+                svc = rt_unit(d->vcpu[local_sched.vcpuid]->sched_unit);
                 svc->period = period;
                 svc->budget = budget;
                 if ( local_sched.u.rtds.flags & XEN_DOMCTL_SCHEDRT_extra )
@@ -1471,7 +1471,7 @@ static void repl_timer_handler(void *data){
     struct list_head *replq = rt_replq(ops);
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter, *tmp;
-    struct rt_vcpu *svc;
+    struct rt_unit *svc;
     LIST_HEAD(tmp_replq);
 
     spin_lock_irq(&prv->lock);
@@ -1513,10 +1513,10 @@ static void repl_timer_handler(void *data){
     {
         svc = replq_elem(iter);
 
-        if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
+        if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu->sched_unit &&
              !list_empty(runq) )
         {
-            struct rt_vcpu *next_on_runq = q_elem(runq->next);
+            struct rt_unit *next_on_runq = q_elem(runq->next);
 
             if ( compare_vcpu_priority(svc, next_on_runq) < 0 )
                 runq_tickle(ops, next_on_runq);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 8bca32f5c4..6d6d8a234f 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -394,7 +394,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = v;
+        per_cpu(schedule_data, v->processor).curr = unit;
         v->is_running = 1;
     }
     else
@@ -1607,7 +1607,7 @@ static void schedule(void)
 
     next = next_slice.task;
 
-    sd->curr = next;
+    sd->curr = next->sched_unit;
 
     if ( next_slice.time >= 0 ) /* -ve means no limit */
         set_timer(&sd->s_timer, now + next_slice.time);
@@ -1749,7 +1749,7 @@ static int cpu_schedule_up(unsigned int cpu)
      * allocated.
      */
 
-    sd->curr = idle_vcpu[cpu];
+    sd->curr = idle_vcpu[cpu]->sched_unit;
 
     sd->sched_priv = NULL;
 
@@ -1917,7 +1917,7 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0];
+    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
 }
 
 /*
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 4f61f65288..4b817347d5 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -36,7 +36,7 @@ extern int sched_ratelimit_us;
 struct schedule_data {
     spinlock_t         *schedule_lock,
                        _lock;
-    struct vcpu        *curr;           /* current task                    */
+    struct sched_unit  *curr;
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.