[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 16/46] xen/sched: make credit scheduler vcpu agnostic.



Switch credit scheduler completely from vcpu to sched_unit usage.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
 xen/common/sched_credit.c | 503 +++++++++++++++++++++++-----------------------
 1 file changed, 250 insertions(+), 253 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 9404ccfd90..e710b3c6fa 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -70,10 +70,10 @@
  * inconsistent set of locks. Therefore atomic-safe bit operations must
  * be used for accessing it.
  */
-#define CSCHED_FLAG_VCPU_PARKED    0x0  /* VCPU over capped credits */
-#define CSCHED_FLAG_VCPU_YIELD     0x1  /* VCPU yielding */
-#define CSCHED_FLAG_VCPU_MIGRATING 0x2  /* VCPU may have moved to a new pcpu */
-#define CSCHED_FLAG_VCPU_PINNED    0x4  /* VCPU can run only on 1 pcpu */
+#define CSCHED_FLAG_UNIT_PARKED    0x0  /* UNIT over capped credits */
+#define CSCHED_FLAG_UNIT_YIELD     0x1  /* UNIT yielding */
+#define CSCHED_FLAG_UNIT_MIGRATING 0x2  /* UNIT may have moved to a new pcpu */
+#define CSCHED_FLAG_UNIT_PINNED    0x4  /* UNIT can run only on 1 pcpu */
 
 
 /*
@@ -91,7 +91,7 @@
 /*
  * CSCHED_STATS
  *
- * Manage very basic per-vCPU counters and stats.
+ * Manage very basic per-unit counters and stats.
  *
  * Useful for debugging live systems. The stats are displayed
  * with runq dumps ('r' on the Xen console).
@@ -100,23 +100,23 @@
 
 #define CSCHED_STATS
 
-#define SCHED_VCPU_STATS_RESET(_V)                      \
+#define SCHED_UNIT_STATS_RESET(_V)                      \
     do                                                  \
     {                                                   \
         memset(&(_V)->stats, 0, sizeof((_V)->stats));   \
     } while ( 0 )
 
-#define SCHED_VCPU_STAT_CRANK(_V, _X)       (((_V)->stats._X)++)
+#define SCHED_UNIT_STAT_CRANK(_V, _X)       (((_V)->stats._X)++)
 
-#define SCHED_VCPU_STAT_SET(_V, _X, _Y)     (((_V)->stats._X) = (_Y))
+#define SCHED_UNIT_STAT_SET(_V, _X, _Y)     (((_V)->stats._X) = (_Y))
 
 #else /* !SCHED_STATS */
 
 #undef CSCHED_STATS
 
-#define SCHED_VCPU_STATS_RESET(_V)         do {} while ( 0 )
-#define SCHED_VCPU_STAT_CRANK(_V, _X)      do {} while ( 0 )
-#define SCHED_VCPU_STAT_SET(_V, _X, _Y)    do {} while ( 0 )
+#define SCHED_UNIT_STATS_RESET(_V)         do {} while ( 0 )
+#define SCHED_UNIT_STAT_CRANK(_V, _X)      do {} while ( 0 )
+#define SCHED_UNIT_STAT_SET(_V, _X, _Y)    do {} while ( 0 )
 
 #endif /* SCHED_STATS */
 
@@ -128,7 +128,7 @@
 #define TRC_CSCHED_SCHED_TASKLET TRC_SCHED_CLASS_EVT(CSCHED, 1)
 #define TRC_CSCHED_ACCOUNT_START TRC_SCHED_CLASS_EVT(CSCHED, 2)
 #define TRC_CSCHED_ACCOUNT_STOP  TRC_SCHED_CLASS_EVT(CSCHED, 3)
-#define TRC_CSCHED_STOLEN_VCPU   TRC_SCHED_CLASS_EVT(CSCHED, 4)
+#define TRC_CSCHED_STOLEN_UNIT   TRC_SCHED_CLASS_EVT(CSCHED, 4)
 #define TRC_CSCHED_PICKED_CPU    TRC_SCHED_CLASS_EVT(CSCHED, 5)
 #define TRC_CSCHED_TICKLE        TRC_SCHED_CLASS_EVT(CSCHED, 6)
 #define TRC_CSCHED_BOOST_START   TRC_SCHED_CLASS_EVT(CSCHED, 7)
@@ -158,15 +158,15 @@ struct csched_pcpu {
 };
 
 /*
- * Virtual CPU
+ * Virtual UNIT
  */
 struct csched_unit {
     struct list_head runq_elem;
-    struct list_head active_vcpu_elem;
+    struct list_head active_unit_elem;
 
     /* Up-pointers */
     struct csched_dom *sdom;
-    struct vcpu *vcpu;
+    struct sched_unit *unit;
 
     s_time_t start_time;   /* When we were scheduled (used for credit) */
     unsigned flags;
@@ -194,10 +194,10 @@ struct csched_unit {
  * Domain
  */
 struct csched_dom {
-    struct list_head active_vcpu;
+    struct list_head active_unit;
     struct list_head active_sdom_elem;
     struct domain *dom;
-    uint16_t active_vcpu_count;
+    uint16_t active_unit_count;
     uint16_t weight;
     uint16_t cap;
 };
@@ -217,7 +217,7 @@ struct csched_private {
 
     /* Period of master and tick in milliseconds */
     unsigned int tick_period_us, ticks_per_tslice;
-    s_time_t ratelimit, tslice, vcpu_migr_delay;
+    s_time_t ratelimit, tslice, unit_migr_delay;
 
     struct list_head active_sdom;
     uint32_t weight;
@@ -233,7 +233,7 @@ static void csched_tick(void *_cpu);
 static void csched_acct(void *dummy);
 
 static inline int
-__vcpu_on_runq(struct csched_unit *svc)
+__unit_on_runq(struct csched_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
@@ -244,7 +244,7 @@ __runq_elem(struct list_head *elem)
     return list_entry(elem, struct csched_unit, runq_elem);
 }
 
-/* Is the first element of cpu's runq (if any) cpu's idle vcpu? */
+/* Is the first element of cpu's runq (if any) cpu's idle unit? */
 static inline bool_t is_runq_idle(unsigned int cpu)
 {
     /*
@@ -253,7 +253,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
-           is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
+           is_idle_unit(__runq_elem(RUNQ(cpu)->next)->unit);
 }
 
 static inline void
@@ -275,11 +275,11 @@ dec_nr_runnable(unsigned int cpu)
 static inline void
 __runq_insert(struct csched_unit *svc)
 {
-    unsigned int cpu = svc->vcpu->processor;
+    unsigned int cpu = sched_unit_master(svc->unit);
     const struct list_head * const runq = RUNQ(cpu);
     struct list_head *iter;
 
-    BUG_ON( __vcpu_on_runq(svc) );
+    BUG_ON( __unit_on_runq(svc) );
 
     list_for_each( iter, runq )
     {
@@ -288,10 +288,10 @@ __runq_insert(struct csched_unit *svc)
             break;
     }
 
-    /* If the vcpu yielded, try to put it behind one lower-priority
-     * runnable vcpu if we can.  The next runq_sort will bring it forward
+    /* If the unit yielded, try to put it behind one lower-priority
+     * runnable unit if we can.  The next runq_sort will bring it forward
      * within 30ms if the queue too long. */
-    if ( test_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags)
+    if ( test_bit(CSCHED_FLAG_UNIT_YIELD, &svc->flags)
          && __runq_elem(iter)->pri > CSCHED_PRI_IDLE )
     {
         iter=iter->next;
@@ -307,20 +307,20 @@ static inline void
 runq_insert(struct csched_unit *svc)
 {
     __runq_insert(svc);
-    inc_nr_runnable(svc->vcpu->processor);
+    inc_nr_runnable(sched_unit_master(svc->unit));
 }
 
 static inline void
 __runq_remove(struct csched_unit *svc)
 {
-    BUG_ON( !__vcpu_on_runq(svc) );
+    BUG_ON( !__unit_on_runq(svc) );
     list_del_init(&svc->runq_elem);
 }
 
 static inline void
 runq_remove(struct csched_unit *svc)
 {
-    dec_nr_runnable(svc->vcpu->processor);
+    dec_nr_runnable(sched_unit_master(svc->unit));
     __runq_remove(svc);
 }
 
@@ -331,7 +331,7 @@ static void burn_credits(struct csched_unit *svc, s_time_t 
now)
     unsigned int credits;
 
     /* Assert svc is current */
-    ASSERT( svc == CSCHED_UNIT(curr_on_cpu(svc->vcpu->processor)) );
+    ASSERT( svc == CSCHED_UNIT(curr_on_cpu(sched_unit_master(svc->unit))) );
 
     if ( (delta = now - svc->start_time) <= 0 )
         return;
@@ -351,8 +351,8 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 
 static inline void __runq_tickle(struct csched_unit *new)
 {
-    unsigned int cpu = new->vcpu->processor;
-    struct sched_unit *unit = new->vcpu->sched_unit;
+    unsigned int cpu = sched_unit_master(new->unit);
+    struct sched_unit *unit = new->unit;
     struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask, idle_mask, *online;
@@ -366,16 +366,16 @@ static inline void __runq_tickle(struct csched_unit *new)
     idlers_empty = cpumask_empty(&idle_mask);
 
     /*
-     * Exclusive pinning is when a vcpu has hard-affinity with only one
-     * cpu, and there is no other vcpu that has hard-affinity with that
+     * Exclusive pinning is when a unit has hard-affinity with only one
+     * cpu, and there is no other unit that has hard-affinity with that
      * same cpu. This is infrequent, but if it happens, is for achieving
      * the most possible determinism, and least possible overhead for
-     * the vcpus in question.
+     * the units in question.
      *
      * Try to identify the vast majority of these situations, and deal
      * with them quickly.
      */
-    if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, &new->flags) &&
+    if ( unlikely(test_bit(CSCHED_FLAG_UNIT_PINNED, &new->flags) &&
                   cpumask_test_cpu(cpu, &idle_mask)) )
     {
         ASSERT(cpumask_cycle(cpu, unit->cpu_hard_affinity) == cpu);
@@ -386,7 +386,7 @@ static inline void __runq_tickle(struct csched_unit *new)
 
     /*
      * If the pcpu is idle, or there are no idlers and the new
-     * vcpu is a higher priority than the old vcpu, run it here.
+     * unit is a higher priority than the old unit, run it here.
      *
      * If there are idle cpus, first try to find one suitable to run
      * new, so we can avoid preempting cur.  If we cannot find a
@@ -405,7 +405,7 @@ static inline void __runq_tickle(struct csched_unit *new)
     else if ( !idlers_empty )
     {
         /*
-         * Soft and hard affinity balancing loop. For vcpus without
+         * Soft and hard affinity balancing loop. For units without
          * a useful soft affinity, consider hard affinity only.
          */
         for_each_affinity_balance_step( balance_step )
@@ -448,10 +448,10 @@ static inline void __runq_tickle(struct csched_unit *new)
             {
                 if ( cpumask_intersects(unit->cpu_hard_affinity, &idle_mask) )
                 {
-                    SCHED_VCPU_STAT_CRANK(cur, kicked_away);
-                    SCHED_VCPU_STAT_CRANK(cur, migrate_r);
+                    SCHED_UNIT_STAT_CRANK(cur, kicked_away);
+                    SCHED_UNIT_STAT_CRANK(cur, migrate_r);
                     SCHED_STAT_CRANK(migrate_kicked_away);
-                    set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
+                    sched_set_pause_flags_atomic(cur->unit, _VPF_migrating);
                 }
                 /* Tickle cpu anyway, to let new preempt cur. */
                 SCHED_STAT_CRANK(tickled_busy_cpu);
@@ -607,7 +607,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu 
*spc, int cpu)
     spc->idle_bias = nr_cpu_ids - 1;
 
     /* Start off idling... */
-    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)->vcpu_list));
+    BUG_ON(!is_idle_unit(curr_on_cpu(cpu)));
     cpumask_set_cpu(cpu, prv->idlers);
     spc->nr_runnable = 0;
 }
@@ -632,9 +632,9 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_unit *svc = vdata;
 
-    ASSERT(svc && is_idle_vcpu(svc->vcpu));
+    ASSERT(svc && is_idle_unit(svc->unit));
 
-    idle_vcpu[cpu]->sched_unit->priv = vdata;
+    sched_idle_unit(cpu)->priv = vdata;
 
     /*
      * We are holding the runqueue lock already (it's been taken in
@@ -651,33 +651,33 @@ csched_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
 
 #ifndef NDEBUG
 static inline void
-__csched_vcpu_check(struct vcpu *vc)
+__csched_unit_check(struct sched_unit *unit)
 {
-    struct csched_unit * const svc = CSCHED_UNIT(vc->sched_unit);
+    struct csched_unit * const svc = CSCHED_UNIT(unit);
     struct csched_dom * const sdom = svc->sdom;
 
-    BUG_ON( svc->vcpu != vc );
-    BUG_ON( sdom != CSCHED_DOM(vc->domain) );
+    BUG_ON( svc->unit != unit );
+    BUG_ON( sdom != CSCHED_DOM(unit->domain) );
     if ( sdom )
     {
-        BUG_ON( is_idle_vcpu(vc) );
-        BUG_ON( sdom->dom != vc->domain );
+        BUG_ON( is_idle_unit(unit) );
+        BUG_ON( sdom->dom != unit->domain );
     }
     else
     {
-        BUG_ON( !is_idle_vcpu(vc) );
+        BUG_ON( !is_idle_unit(unit) );
     }
 
     SCHED_STAT_CRANK(unit_check);
 }
-#define CSCHED_VCPU_CHECK(_vc)  (__csched_vcpu_check(_vc))
+#define CSCHED_UNIT_CHECK(unit)  (__csched_unit_check(unit))
 #else
-#define CSCHED_VCPU_CHECK(_vc)
+#define CSCHED_UNIT_CHECK(unit)
 #endif
 
 /*
- * Delay, in microseconds, between migrations of a VCPU between PCPUs.
- * This prevents rapid fluttering of a VCPU between CPUs, and reduces the
+ * Delay, in microseconds, between migrations of a UNIT between PCPUs.
+ * This prevents rapid fluttering of a UNIT between CPUs, and reduces the
  * implicit overheads such as cache-warming. 1ms (1000) has been measured
  * as a good value.
  */
@@ -688,8 +688,8 @@ static inline bool
 __csched_vcpu_is_cache_hot(const struct csched_private *prv,
                            const struct csched_unit *svc)
 {
-    bool hot = prv->vcpu_migr_delay &&
-               (NOW() - svc->last_sched_time) < prv->vcpu_migr_delay;
+    bool hot = prv->unit_migr_delay &&
+               (NOW() - svc->last_sched_time) < prv->unit_migr_delay;
 
     if ( hot )
         SCHED_STAT_CRANK(unit_hot);
@@ -698,37 +698,39 @@ __csched_vcpu_is_cache_hot(const struct csched_private 
*prv,
 }
 
 static inline int
-__csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc,
+__csched_unit_is_migrateable(const struct csched_private *prv,
+                             struct sched_unit *unit,
                              int dest_cpu, cpumask_t *mask)
 {
-    const struct csched_unit *svc = CSCHED_UNIT(vc->sched_unit);
+    const struct csched_unit *svc = CSCHED_UNIT(unit);
     /*
      * Don't pick up work that's hot on peer PCPU, or that can't (or
      * would prefer not to) run on cpu.
      *
-     * The caller is supposed to have already checked that vc is also
+     * The caller is supposed to have already checked that unit is also
      * not running.
      */
-    ASSERT(!vc->sched_unit->is_running);
+    ASSERT(!unit->is_running);
 
     return !__csched_vcpu_is_cache_hot(prv, svc) &&
            cpumask_test_cpu(dest_cpu, mask);
 }
 
 static int
-_csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
+_csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit,
+                 bool_t commit)
 {
-    /* We must always use vc->procssor's scratch space */
-    cpumask_t *cpus = cpumask_scratch_cpu(vc->processor);
+    int cpu = sched_unit_master(unit);
+    /* We must always use cpu's scratch space */
+    cpumask_t *cpus = cpumask_scratch_cpu(cpu);
     cpumask_t idlers;
-    cpumask_t *online = cpupool_domain_cpumask(vc->domain);
+    cpumask_t *online = cpupool_domain_cpumask(unit->domain);
     struct csched_pcpu *spc = NULL;
-    int cpu = vc->processor;
     int balance_step;
 
     for_each_affinity_balance_step( balance_step )
     {
-        affinity_balance_cpumask(vc->sched_unit, balance_step, cpus);
+        affinity_balance_cpumask(unit, balance_step, cpus);
         cpumask_and(cpus, online, cpus);
         /*
          * We want to pick up a pcpu among the ones that are online and
@@ -747,12 +749,13 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
          * balancing step all together.
          */
         if ( balance_step == BALANCE_SOFT_AFFINITY &&
-             (!has_soft_affinity(vc->sched_unit) || cpumask_empty(cpus)) )
+             (!has_soft_affinity(unit) || cpumask_empty(cpus)) )
             continue;
 
         /* If present, prefer vc's current processor */
-        cpu = cpumask_test_cpu(vc->processor, cpus)
-                ? vc->processor : cpumask_cycle(vc->processor, cpus);
+        cpu = cpumask_test_cpu(sched_unit_master(unit), cpus)
+                ? sched_unit_master(unit)
+                : cpumask_cycle(sched_unit_master(unit), cpus);
         ASSERT(cpumask_test_cpu(cpu, cpus));
 
         /*
@@ -764,15 +767,15 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
          * We give preference to the idle execution vehicle with the most
          * idling neighbours in its grouping. This distributes work across
          * distinct cores first and guarantees we don't do something stupid
-         * like run two VCPUs on co-hyperthreads while there are idle cores
+         * like run two UNITs on co-hyperthreads while there are idle cores
          * or sockets.
          *
          * Notice that, when computing the "idleness" of cpu, we may want to
-         * discount vc. That is, iff vc is the currently running and the only
-         * runnable vcpu on cpu, we add cpu to the idlers.
+         * discount unit. That is, iff unit is the currently running and the
+         * only runnable unit on cpu, we add cpu to the idlers.
          */
         cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
-        if ( vc->processor == cpu && is_runq_idle(cpu) )
+        if ( sched_unit_master(unit) == cpu && is_runq_idle(cpu) )
             __cpumask_set_cpu(cpu, &idlers);
         cpumask_and(cpus, &idlers, cpus);
 
@@ -782,7 +785,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
          * CPU, as we just &&-ed it with idlers). In fact, if we are on SMT, 
and
          * cpu points to a busy thread with an idle sibling, both the threads
          * will be considered the same, from the "idleness" calculation point
-         * of view", preventing vcpu from being moved to the thread that is
+         * of view", preventing unit from being moved to the thread that is
          * actually idle.
          *
          * Notice that cpumask_test_cpu() is quicker than cpumask_empty(), so
@@ -848,7 +851,8 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
     if ( commit && spc )
        spc->idle_bias = cpu;
 
-    TRACE_3D(TRC_CSCHED_PICKED_CPU, vc->domain->domain_id, vc->vcpu_id, cpu);
+    TRACE_3D(TRC_CSCHED_PICKED_CPU, unit->domain->domain_id, unit->unit_id,
+             cpu);
 
     return cpu;
 }
@@ -856,7 +860,6 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
 static struct sched_resource *
 csched_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu_list;
     struct csched_unit *svc = CSCHED_UNIT(unit);
 
     /*
@@ -866,26 +869,26 @@ csched_res_pick(const struct scheduler *ops, const struct 
sched_unit *unit)
      * csched_unit_wake() (still called from vcpu_migrate()) we won't
      * get boosted, which we don't deserve as we are "only" migrating.
      */
-    set_bit(CSCHED_FLAG_VCPU_MIGRATING, &svc->flags);
-    return get_sched_res(_csched_cpu_pick(ops, vc, 1));
+    set_bit(CSCHED_FLAG_UNIT_MIGRATING, &svc->flags);
+    return get_sched_res(_csched_cpu_pick(ops, unit, 1));
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_private *prv, struct csched_unit *svc)
+__csched_unit_acct_start(struct csched_private *prv, struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    if ( list_empty(&svc->active_vcpu_elem) )
+    if ( list_empty(&svc->active_unit_elem) )
     {
-        SCHED_VCPU_STAT_CRANK(svc, state_active);
+        SCHED_UNIT_STAT_CRANK(svc, state_active);
         SCHED_STAT_CRANK(acct_unit_active);
 
-        sdom->active_vcpu_count++;
-        list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
-        /* Make weight per-vcpu */
+        sdom->active_unit_count++;
+        list_add(&svc->active_unit_elem, &sdom->active_unit);
+        /* Make weight per-unit */
         prv->weight += sdom->weight;
         if ( list_empty(&sdom->active_sdom_elem) )
         {
@@ -894,56 +897,56 @@ __csched_vcpu_acct_start(struct csched_private *prv, 
struct csched_unit *svc)
     }
 
     TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
-             svc->vcpu->vcpu_id, sdom->active_vcpu_count);
+             svc->unit->unit_id, sdom->active_unit_count);
 
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static inline void
-__csched_vcpu_acct_stop_locked(struct csched_private *prv,
+__csched_unit_acct_stop_locked(struct csched_private *prv,
     struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
-    BUG_ON( list_empty(&svc->active_vcpu_elem) );
+    BUG_ON( list_empty(&svc->active_unit_elem) );
 
-    SCHED_VCPU_STAT_CRANK(svc, state_idle);
+    SCHED_UNIT_STAT_CRANK(svc, state_idle);
     SCHED_STAT_CRANK(acct_unit_idle);
 
     BUG_ON( prv->weight < sdom->weight );
-    sdom->active_vcpu_count--;
-    list_del_init(&svc->active_vcpu_elem);
+    sdom->active_unit_count--;
+    list_del_init(&svc->active_unit_elem);
     prv->weight -= sdom->weight;
-    if ( list_empty(&sdom->active_vcpu) )
+    if ( list_empty(&sdom->active_unit) )
     {
         list_del_init(&sdom->active_sdom_elem);
     }
 
     TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
-             svc->vcpu->vcpu_id, sdom->active_vcpu_count);
+             svc->unit->unit_id, sdom->active_unit_count);
 }
 
 static void
-csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
+csched_unit_acct(struct csched_private *prv, unsigned int cpu)
 {
     struct sched_unit *currunit = current->sched_unit;
     struct csched_unit * const svc = CSCHED_UNIT(currunit);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
-    ASSERT( current->processor == cpu );
+    ASSERT( sched_unit_master(currunit) == cpu );
     ASSERT( svc->sdom != NULL );
-    ASSERT( !is_idle_vcpu(svc->vcpu) );
+    ASSERT( !is_idle_unit(svc->unit) );
 
     /*
-     * If this VCPU's priority was boosted when it last awoke, reset it.
-     * If the VCPU is found here, then it's consuming a non-negligeable
+     * If this UNIT's priority was boosted when it last awoke, reset it.
+     * If the UNIT is found here, then it's consuming a non-negligeable
      * amount of CPU resources and should no longer be boosted.
      */
     if ( svc->pri == CSCHED_PRI_TS_BOOST )
     {
         svc->pri = CSCHED_PRI_TS_UNDER;
         TRACE_2D(TRC_CSCHED_BOOST_END, svc->sdom->dom->domain_id,
-                 svc->vcpu->vcpu_id);
+                 svc->unit->unit_id);
     }
 
     /*
@@ -952,12 +955,12 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int 
cpu)
     burn_credits(svc, NOW());
 
     /*
-     * Put this VCPU and domain back on the active list if it was
+     * Put this UNIT and domain back on the active list if it was
      * idling.
      */
-    if ( list_empty(&svc->active_vcpu_elem) )
+    if ( list_empty(&svc->active_unit_elem) )
     {
-        __csched_vcpu_acct_start(prv, svc);
+        __csched_unit_acct_start(prv, svc);
     }
     else
     {
@@ -970,15 +973,15 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int 
cpu)
          * migrating it to run elsewhere (see multi-core and multi-thread
          * support in csched_res_pick()).
          */
-        new_cpu = _csched_cpu_pick(ops, current, 0);
+        new_cpu = _csched_cpu_pick(ops, currunit, 0);
 
         unit_schedule_unlock_irqrestore(lock, flags, currunit);
 
         if ( new_cpu != cpu )
         {
-            SCHED_VCPU_STAT_CRANK(svc, migrate_r);
+            SCHED_UNIT_STAT_CRANK(svc, migrate_r);
             SCHED_STAT_CRANK(migrate_running);
-            set_bit(_VPF_migrating, &current->pause_flags);
+            sched_set_pause_flags_atomic(currunit, _VPF_migrating);
             /*
              * As we are about to tickle cpu, we should clear its bit in
              * idlers. But, if we are here, it means there is someone running
@@ -995,21 +998,20 @@ static void *
 csched_alloc_udata(const struct scheduler *ops, struct sched_unit *unit,
                    void *dd)
 {
-    struct vcpu *vc = unit->vcpu_list;
     struct csched_unit *svc;
 
-    /* Allocate per-VCPU info */
+    /* Allocate per-UNIT info */
     svc = xzalloc(struct csched_unit);
     if ( svc == NULL )
         return NULL;
 
     INIT_LIST_HEAD(&svc->runq_elem);
-    INIT_LIST_HEAD(&svc->active_vcpu_elem);
+    INIT_LIST_HEAD(&svc->active_unit_elem);
     svc->sdom = dd;
-    svc->vcpu = vc;
-    svc->pri = is_idle_domain(vc->domain) ?
+    svc->unit = unit;
+    svc->pri = is_idle_unit(unit) ?
         CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
-    SCHED_VCPU_STATS_RESET(svc);
+    SCHED_UNIT_STATS_RESET(svc);
     SCHED_STAT_CRANK(unit_alloc);
     return svc;
 }
@@ -1017,24 +1019,21 @@ csched_alloc_udata(const struct scheduler *ops, struct 
sched_unit *unit,
 static void
 csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu_list;
     struct csched_unit *svc = unit->priv;
     spinlock_t *lock;
 
-    BUG_ON( is_idle_vcpu(vc) );
+    BUG_ON( is_idle_unit(unit) );
 
     /* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
     lock = unit_schedule_lock_irq(unit);
 
-    unit->res = csched_res_pick(ops, unit);
-    vc->processor = unit->res->master_cpu;
+    sched_set_res(unit, csched_res_pick(ops, unit));
 
     spin_unlock_irq(lock);
 
     lock = unit_schedule_lock_irq(unit);
 
-    if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) &&
-         !vc->sched_unit->is_running )
+    if ( !__unit_on_runq(svc) && unit_runnable(unit) && !unit->is_running )
         runq_insert(svc);
 
     unit_schedule_unlock_irq(lock, unit);
@@ -1061,18 +1060,18 @@ csched_unit_remove(const struct scheduler *ops, struct 
sched_unit *unit)
 
     SCHED_STAT_CRANK(unit_remove);
 
-    ASSERT(!__vcpu_on_runq(svc));
+    ASSERT(!__unit_on_runq(svc));
 
-    if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
+    if ( test_and_clear_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
     {
         SCHED_STAT_CRANK(unit_unpark);
-        vcpu_unpause(svc->vcpu);
+        vcpu_unpause(svc->unit->vcpu_list);
     }
 
     spin_lock_irq(&prv->lock);
 
-    if ( !list_empty(&svc->active_vcpu_elem) )
-        __csched_vcpu_acct_stop_locked(prv, svc);
+    if ( !list_empty(&svc->active_unit_elem) )
+        __csched_unit_acct_stop_locked(prv, svc);
 
     spin_unlock_irq(&prv->lock);
 
@@ -1082,86 +1081,85 @@ csched_unit_remove(const struct scheduler *ops, struct 
sched_unit *unit)
 static void
 csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu_list;
     struct csched_unit * const svc = CSCHED_UNIT(unit);
-    unsigned int cpu = vc->processor;
+    unsigned int cpu = sched_unit_master(unit);
 
     SCHED_STAT_CRANK(unit_sleep);
 
-    BUG_ON( is_idle_vcpu(vc) );
+    BUG_ON( is_idle_unit(unit) );
 
     if ( curr_on_cpu(cpu) == unit )
     {
         /*
          * We are about to tickle cpu, so we should clear its bit in idlers.
-         * But, we are here because vc is going to sleep while running on cpu,
+         * But, we are here because unit is going to sleep while running on 
cpu,
          * so the bit must be zero already.
          */
         ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(per_cpu(scheduler, 
cpu))->idlers));
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
     }
-    else if ( __vcpu_on_runq(svc) )
+    else if ( __unit_on_runq(svc) )
         runq_remove(svc);
 }
 
 static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu_list;
     struct csched_unit * const svc = CSCHED_UNIT(unit);
     bool_t migrating;
 
-    BUG_ON( is_idle_vcpu(vc) );
+    BUG_ON( is_idle_unit(unit) );
 
-    if ( unlikely(curr_on_cpu(vc->processor) == unit) )
+    if ( unlikely(curr_on_cpu(sched_unit_master(unit)) == unit) )
     {
         SCHED_STAT_CRANK(unit_wake_running);
         return;
     }
-    if ( unlikely(__vcpu_on_runq(svc)) )
+    if ( unlikely(__unit_on_runq(svc)) )
     {
         SCHED_STAT_CRANK(unit_wake_onrunq);
         return;
     }
 
-    if ( likely(vcpu_runnable(vc)) )
+    if ( likely(unit_runnable(unit)) )
         SCHED_STAT_CRANK(unit_wake_runnable);
     else
         SCHED_STAT_CRANK(unit_wake_not_runnable);
 
     /*
-     * We temporarly boost the priority of awaking VCPUs!
+     * We temporarily boost the priority of awaking UNITs!
      *
-     * If this VCPU consumes a non negligeable amount of CPU, it
+     * If this UNIT consumes a non negligible amount of CPU, it
      * will eventually find itself in the credit accounting code
      * path where its priority will be reset to normal.
      *
-     * If on the other hand the VCPU consumes little CPU and is
+     * If on the other hand the UNIT consumes little CPU and is
      * blocking and awoken a lot (doing I/O for example), its
      * priority will remain boosted, optimizing it's wake-to-run
      * latencies.
      *
-     * This allows wake-to-run latency sensitive VCPUs to preempt
-     * more CPU resource intensive VCPUs without impacting overall 
+     * This allows wake-to-run latency sensitive UNITs to preempt
+     * more CPU resource intensive UNITs without impacting overall
      * system fairness.
      *
      * There are two cases, when we don't want to boost:
-     *  - VCPUs that are waking up after a migration, rather than
+     *  - UNITs that are waking up after a migration, rather than
      *    after having block;
-     *  - VCPUs of capped domains unpausing after earning credits
+     *  - UNITs of capped domains unpausing after earning credits
      *    they had overspent.
      */
-    migrating = test_and_clear_bit(CSCHED_FLAG_VCPU_MIGRATING, &svc->flags);
+    migrating = test_and_clear_bit(CSCHED_FLAG_UNIT_MIGRATING, &svc->flags);
 
     if ( !migrating && svc->pri == CSCHED_PRI_TS_UNDER &&
-         !test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
+         !test_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
     {
-        TRACE_2D(TRC_CSCHED_BOOST_START, vc->domain->domain_id, vc->vcpu_id);
+        TRACE_2D(TRC_CSCHED_BOOST_START, unit->domain->domain_id,
+                 unit->unit_id);
         SCHED_STAT_CRANK(unit_boost);
         svc->pri = CSCHED_PRI_TS_BOOST;
     }
 
-    /* Put the VCPU on the runq and tickle CPUs */
+    /* Put the UNIT on the runq and tickle CPUs */
     runq_insert(svc);
     __runq_tickle(svc);
 }
@@ -1172,7 +1170,7 @@ csched_unit_yield(const struct scheduler *ops, struct 
sched_unit *unit)
     struct csched_unit * const svc = CSCHED_UNIT(unit);
 
     /* Let the scheduler know that this vcpu is trying to yield */
-    set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags);
+    set_bit(CSCHED_FLAG_UNIT_YIELD, &svc->flags);
 }
 
 static int
@@ -1201,8 +1199,8 @@ csched_dom_cntl(
         {
             if ( !list_empty(&sdom->active_sdom_elem) )
             {
-                prv->weight -= sdom->weight * sdom->active_vcpu_count;
-                prv->weight += op->u.credit.weight * sdom->active_vcpu_count;
+                prv->weight -= sdom->weight * sdom->active_unit_count;
+                prv->weight += op->u.credit.weight * sdom->active_unit_count;
             }
             sdom->weight = op->u.credit.weight;
         }
@@ -1231,9 +1229,9 @@ csched_aff_cntl(const struct scheduler *ops, struct 
sched_unit *unit,
 
     /* Are we becoming exclusively pinned? */
     if ( cpumask_weight(hard) == 1 )
-        set_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags);
+        set_bit(CSCHED_FLAG_UNIT_PINNED, &svc->flags);
     else
-        clear_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags);
+        clear_bit(CSCHED_FLAG_UNIT_PINNED, &svc->flags);
 }
 
 static inline void
@@ -1276,14 +1274,14 @@ csched_sys_cntl(const struct scheduler *ops,
         else if ( prv->ratelimit && !params->ratelimit_us )
             printk(XENLOG_INFO "Disabling context switch rate limiting\n");
         prv->ratelimit = MICROSECS(params->ratelimit_us);
-        prv->vcpu_migr_delay = MICROSECS(params->vcpu_migr_delay_us);
+        prv->unit_migr_delay = MICROSECS(params->vcpu_migr_delay_us);
         spin_unlock_irqrestore(&prv->lock, flags);
 
         /* FALLTHRU */
     case XEN_SYSCTL_SCHEDOP_getinfo:
         params->tslice_ms = prv->tslice / MILLISECS(1);
         params->ratelimit_us = prv->ratelimit / MICROSECS(1);
-        params->vcpu_migr_delay_us = prv->vcpu_migr_delay / MICROSECS(1);
+        params->vcpu_migr_delay_us = prv->unit_migr_delay / MICROSECS(1);
         rc = 0;
         break;
     }
@@ -1301,7 +1299,7 @@ csched_alloc_domdata(const struct scheduler *ops, struct 
domain *dom)
         return ERR_PTR(-ENOMEM);
 
     /* Initialize credit and weight */
-    INIT_LIST_HEAD(&sdom->active_vcpu);
+    INIT_LIST_HEAD(&sdom->active_unit);
     INIT_LIST_HEAD(&sdom->active_sdom_elem);
     sdom->dom = dom;
     sdom->weight = CSCHED_DEFAULT_WEIGHT;
@@ -1318,7 +1316,7 @@ csched_free_domdata(const struct scheduler *ops, void 
*data)
 /*
  * This is a O(n) optimized sort of the runq.
  *
- * Time-share VCPUs can only be one of two priorities, UNDER or OVER. We walk
+ * Time-share UNITs can only be one of two priorities, UNDER or OVER. We walk
  * through the runq and move up any UNDERs that are preceded by OVERS. We
  * remember the last UNDER to make the move up operation O(1).
  */
@@ -1371,7 +1369,7 @@ csched_acct(void* dummy)
 {
     struct csched_private *prv = dummy;
     unsigned long flags;
-    struct list_head *iter_vcpu, *next_vcpu;
+    struct list_head *iter_unit, *next_unit;
     struct list_head *iter_sdom, *next_sdom;
     struct csched_unit *svc;
     struct csched_dom *sdom;
@@ -1418,26 +1416,26 @@ csched_acct(void* dummy)
         sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
 
         BUG_ON( is_idle_domain(sdom->dom) );
-        BUG_ON( sdom->active_vcpu_count == 0 );
+        BUG_ON( sdom->active_unit_count == 0 );
         BUG_ON( sdom->weight == 0 );
-        BUG_ON( (sdom->weight * sdom->active_vcpu_count) > weight_left );
+        BUG_ON( (sdom->weight * sdom->active_unit_count) > weight_left );
 
-        weight_left -= ( sdom->weight * sdom->active_vcpu_count );
+        weight_left -= ( sdom->weight * sdom->active_unit_count );
 
         /*
          * A domain's fair share is computed using its weight in competition
          * with that of all other active domains.
          *
-         * At most, a domain can use credits to run all its active VCPUs
+         * At most, a domain can use credits to run all its active UNITs
          * for one full accounting period. We allow a domain to earn more
          * only when the system-wide credit balance is negative.
          */
-        credit_peak = sdom->active_vcpu_count * prv->credits_per_tslice;
+        credit_peak = sdom->active_unit_count * prv->credits_per_tslice;
         if ( prv->credit_balance < 0 )
         {
             credit_peak += ( ( -prv->credit_balance
                                * sdom->weight
-                               * sdom->active_vcpu_count) +
+                               * sdom->active_unit_count) +
                              (weight_total - 1)
                            ) / weight_total;
         }
@@ -1448,14 +1446,14 @@ csched_acct(void* dummy)
             if ( credit_cap < credit_peak )
                 credit_peak = credit_cap;
 
-            /* FIXME -- set cap per-vcpu as well...? */
-            credit_cap = ( credit_cap + ( sdom->active_vcpu_count - 1 )
-                         ) / sdom->active_vcpu_count;
+            /* FIXME -- set cap per-unit as well...? */
+            credit_cap = ( credit_cap + ( sdom->active_unit_count - 1 )
+                         ) / sdom->active_unit_count;
         }
 
         credit_fair = ( ( credit_total
                           * sdom->weight
-                          * sdom->active_vcpu_count )
+                          * sdom->active_unit_count )
                         + (weight_total - 1)
                       ) / weight_total;
 
@@ -1489,14 +1487,14 @@ csched_acct(void* dummy)
             credit_fair = credit_peak;
         }
 
-        /* Compute fair share per VCPU */
-        credit_fair = ( credit_fair + ( sdom->active_vcpu_count - 1 )
-                      ) / sdom->active_vcpu_count;
+        /* Compute fair share per UNIT */
+        credit_fair = ( credit_fair + ( sdom->active_unit_count - 1 )
+                      ) / sdom->active_unit_count;
 
 
-        list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
+        list_for_each_safe( iter_unit, next_unit, &sdom->active_unit )
         {
-            svc = list_entry(iter_vcpu, struct csched_unit, active_vcpu_elem);
+            svc = list_entry(iter_unit, struct csched_unit, active_unit_elem);
             BUG_ON( sdom != svc->sdom );
 
             /* Increment credit */
@@ -1504,20 +1502,20 @@ csched_acct(void* dummy)
             credit = atomic_read(&svc->credit);
 
             /*
-             * Recompute priority or, if VCPU is idling, remove it from
+             * Recompute priority or, if UNIT is idling, remove it from
              * the active list.
              */
             if ( credit < 0 )
             {
                 svc->pri = CSCHED_PRI_TS_OVER;
 
-                /* Park running VCPUs of capped-out domains */
+                /* Park running UNITs of capped-out domains */
                 if ( sdom->cap != 0U &&
                      credit < -credit_cap &&
-                     !test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
+                     !test_and_set_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
                 {
                     SCHED_STAT_CRANK(unit_park);
-                    vcpu_pause_nosync(svc->vcpu);
+                    vcpu_pause_nosync(svc->unit->vcpu_list);
                 }
 
                 /* Lower bound on credits */
@@ -1533,22 +1531,22 @@ csched_acct(void* dummy)
                 svc->pri = CSCHED_PRI_TS_UNDER;
 
                 /* Unpark any capped domains whose credits go positive */
-                if ( test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
+                if ( test_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
                 {
                     /*
                      * It's important to unset the flag AFTER the unpause()
-                     * call to make sure the VCPU's priority is not boosted
+                     * call to make sure the UNIT's priority is not boosted
                      * if it is woken up here.
                      */
                     SCHED_STAT_CRANK(unit_unpark);
-                    vcpu_unpause(svc->vcpu);
-                    clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags);
+                    vcpu_unpause(svc->unit->vcpu_list);
+                    clear_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags);
                 }
 
-                /* Upper bound on credits means VCPU stops earning */
+                /* Upper bound on credits means UNIT stops earning */
                 if ( credit > prv->credits_per_tslice )
                 {
-                    __csched_vcpu_acct_stop_locked(prv, svc);
+                    __csched_unit_acct_stop_locked(prv, svc);
                     /* Divide credits in half, so that when it starts
                      * accounting again, it starts a little bit "ahead" */
                     credit /= 2;
@@ -1556,8 +1554,8 @@ csched_acct(void* dummy)
                 }
             }
 
-            SCHED_VCPU_STAT_SET(svc, credit_last, credit);
-            SCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
+            SCHED_UNIT_STAT_SET(svc, credit_last, credit);
+            SCHED_UNIT_STAT_SET(svc, credit_incr, credit_fair);
             credit_balance += credit;
         }
     }
@@ -1583,10 +1581,10 @@ csched_tick(void *_cpu)
     spc->tick++;
 
     /*
-     * Accounting for running VCPU
+     * Accounting for running UNIT
      */
-    if ( !is_idle_vcpu(current) )
-        csched_vcpu_acct(prv, cpu);
+    if ( !is_idle_unit(current->sched_unit) )
+        csched_unit_acct(prv, cpu);
 
     /*
      * Check if runq needs to be sorted
@@ -1607,7 +1605,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
     struct csched_unit *speer;
     struct list_head *iter;
-    struct vcpu *vc;
+    struct sched_unit *unit;
 
     ASSERT(peer_pcpu != NULL);
 
@@ -1615,7 +1613,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
      * Don't steal from an idle CPU's runq because it's about to
      * pick up work from it itself.
      */
-    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu)->vcpu_list)) )
+    if ( unlikely(is_idle_unit(curr_on_cpu(peer_cpu))) )
         goto out;
 
     list_for_each( iter, &peer_pcpu->runq )
@@ -1623,46 +1621,44 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
         speer = __runq_elem(iter);
 
         /*
-         * If next available VCPU here is not of strictly higher
+         * If next available UNIT here is not of strictly higher
          * priority than ours, this PCPU is useless to us.
          */
         if ( speer->pri <= pri )
             break;
 
-        /* Is this VCPU runnable on our PCPU? */
-        vc = speer->vcpu;
-        BUG_ON( is_idle_vcpu(vc) );
+        /* Is this UNIT runnable on our PCPU? */
+        unit = speer->unit;
+        BUG_ON( is_idle_unit(unit) );
 
         /*
-         * If the vcpu is still in peer_cpu's scheduling tail, or if it
+         * If the unit is still in peer_cpu's scheduling tail, or if it
          * has no useful soft affinity, skip it.
          *
          * In fact, what we want is to check if we have any "soft-affine
          * work" to steal, before starting to look at "hard-affine work".
          *
-         * Notice that, if not even one vCPU on this runq has a useful
+         * Notice that, if not even one unit on this runq has a useful
          * soft affinity, we could have avoid considering this runq for
          * a soft balancing step in the first place. This, for instance,
          * can be implemented by taking note of on what runq there are
-         * vCPUs with useful soft affinities in some sort of bitmap
+         * units with useful soft affinities in some sort of bitmap
          * or counter.
          */
-        if ( vc->sched_unit->is_running ||
-             (balance_step == BALANCE_SOFT_AFFINITY &&
-              !has_soft_affinity(vc->sched_unit)) )
+        if ( unit->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
+                                  !has_soft_affinity(unit)) )
             continue;
 
-        affinity_balance_cpumask(vc->sched_unit, balance_step, 
cpumask_scratch);
-        if ( __csched_vcpu_is_migrateable(prv, vc, cpu, cpumask_scratch) )
+        affinity_balance_cpumask(unit, balance_step, cpumask_scratch);
+        if ( __csched_unit_is_migrateable(prv, unit, cpu, cpumask_scratch) )
         {
             /* We got a candidate. Grab it! */
-            TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
-                     vc->domain->domain_id, vc->vcpu_id);
-            SCHED_VCPU_STAT_CRANK(speer, migrate_q);
+            TRACE_3D(TRC_CSCHED_STOLEN_UNIT, peer_cpu,
+                     unit->domain->domain_id, unit->unit_id);
+            SCHED_UNIT_STAT_CRANK(speer, migrate_q);
             SCHED_STAT_CRANK(migrate_queued);
-            WARN_ON(vc->is_urgent);
             runq_remove(speer);
-            sched_set_res(vc->sched_unit, get_sched_res(cpu));
+            sched_set_res(unit, get_sched_res(cpu));
             /*
              * speer will start executing directly on cpu, without having to
              * go through runq_insert(). So we must update the runnable count
@@ -1688,7 +1684,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
     int peer_cpu, first_cpu, peer_node, bstep;
     int node = cpu_to_node(cpu);
 
-    BUG_ON( cpu != snext->vcpu->processor );
+    BUG_ON( cpu != sched_unit_master(snext->unit) );
     online = cpupool_online_cpumask(c);
 
     /*
@@ -1717,7 +1713,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
         /*
          * We peek at the non-idling CPUs in a node-wise fashion. In fact,
          * it is more likely that we find some affine work on our same
-         * node, not to mention that migrating vcpus within the same node
+         * node, not to mention that migrating units within the same node
          * could well expected to be cheaper than across-nodes (memory
          * stays local, there might be some node-wide cache[s], etc.).
          */
@@ -1738,7 +1734,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
                 spinlock_t *lock;
 
                 /*
-                 * If there is only one runnable vCPU on peer_cpu, it means
+                 * If there is only one runnable unit on peer_cpu, it means
                  * there's no one to be stolen in its runqueue, so skip it.
                  *
                  * Checking this without holding the lock is racy... But that's
@@ -1751,13 +1747,13 @@ csched_load_balance(struct csched_private *prv, int cpu,
                  *   And we can avoid that by re-checking nr_runnable after
                  *   having grabbed the lock, if we want;
                  * - if we race with inc_nr_runnable(), we skip a pCPU that may
-                 *   have runnable vCPUs in its runqueue, but that's not a
+                 *   have runnable units in its runqueue, but that's not a
                  *   problem because:
                  *   + if racing with csched_unit_insert() or 
csched_unit_wake(),
-                 *     __runq_tickle() will be called afterwords, so the vCPU
+                 *     __runq_tickle() will be called afterwords, so the unit
                  *     won't get stuck in the runqueue for too long;
-                 *   + if racing with csched_runq_steal(), it may be that a
-                 *     vCPU that we could have picked up, stays in a runqueue
+                 *   + if racing with csched_runq_steal(), it may be that an
+                 *     unit that we could have picked up, stays in a runqueue
                  *     until someone else tries to steal it again. But this is
                  *     no worse than what can happen already (without this
                  *     optimization), it the pCPU would schedule right after we
@@ -1792,7 +1788,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
                     csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) : NULL;
                 pcpu_schedule_unlock(lock, peer_cpu);
 
-                /* As soon as one vcpu is found, balancing ends */
+                /* As soon as one unit is found, balancing ends */
                 if ( speer != NULL )
                 {
                     *stolen = 1;
@@ -1831,14 +1827,15 @@ csched_schedule(
 {
     const int cpu = smp_processor_id();
     struct list_head * const runq = RUNQ(cpu);
-    struct csched_unit * const scurr = CSCHED_UNIT(current->sched_unit);
+    struct sched_unit *unit = current->sched_unit;
+    struct csched_unit * const scurr = CSCHED_UNIT(unit);
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_unit *snext;
     struct task_slice ret;
     s_time_t runtime, tslice;
 
     SCHED_STAT_CRANK(schedule);
-    CSCHED_VCPU_CHECK(current);
+    CSCHED_UNIT_CHECK(unit);
 
     /*
      * Here in Credit1 code, we usually just call TRACE_nD() helpers, and
@@ -1852,31 +1849,31 @@ csched_schedule(
         } d;
         d.cpu = cpu;
         d.tasklet = tasklet_work_scheduled;
-        d.idle = is_idle_vcpu(current);
+        d.idle = is_idle_unit(unit);
         __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d),
                     (unsigned char *)&d);
     }
 
-    runtime = now - current->sched_unit->state_entry_time;
+    runtime = now - unit->state_entry_time;
     if ( runtime < 0 ) /* Does this ever happen? */
         runtime = 0;
 
-    if ( !is_idle_vcpu(scurr->vcpu) )
+    if ( !is_idle_unit(unit) )
     {
-        /* Update credits of a non-idle VCPU. */
+        /* Update credits of a non-idle UNIT. */
         burn_credits(scurr, now);
         scurr->start_time -= now;
         scurr->last_sched_time = now;
     }
     else
     {
-        /* Re-instate a boosted idle VCPU as normal-idle. */
+        /* Re-instate a boosted idle UNIT as normal-idle. */
         scurr->pri = CSCHED_PRI_IDLE;
     }
 
     /* Choices, choices:
-     * - If we have a tasklet, we need to run the idle vcpu no matter what.
-     * - If sched rate limiting is in effect, and the current vcpu has
+     * - If we have a tasklet, we need to run the idle unit no matter what.
+     * - If sched rate limiting is in effect, and the current unit has
      *   run for less than that amount of time, continue the current one,
      *   but with a shorter timeslice and return it immediately
      * - Otherwise, chose the one with the highest priority (which may
@@ -1894,11 +1891,11 @@ csched_schedule(
      * In fact, it may be the case that scurr is about to spin, and there's
      * no point forcing it to do so until rate limiting expires.
      */
-    if ( !test_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags)
+    if ( !test_bit(CSCHED_FLAG_UNIT_YIELD, &scurr->flags)
          && !tasklet_work_scheduled
          && prv->ratelimit
-         && vcpu_runnable(current)
-         && !is_idle_vcpu(current)
+         && unit_runnable(unit)
+         && !is_idle_unit(unit)
          && runtime < prv->ratelimit )
     {
         snext = scurr;
@@ -1916,11 +1913,11 @@ csched_schedule(
         if ( unlikely(tb_init_done) )
         {
             struct {
-                unsigned vcpu:16, dom:16;
+                unsigned unit:16, dom:16;
                 unsigned runtime;
             } d;
-            d.dom = scurr->vcpu->domain->domain_id;
-            d.vcpu = scurr->vcpu->vcpu_id;
+            d.dom = unit->domain->domain_id;
+            d.unit = unit->unit_id;
             d.runtime = runtime;
             __trace_var(TRC_CSCHED_RATELIMIT, 1, sizeof(d),
                         (unsigned char *)&d);
@@ -1932,13 +1929,13 @@ csched_schedule(
     tslice = prv->tslice;
 
     /*
-     * Select next runnable local VCPU (ie top of local runq)
+     * Select next runnable local UNIT (ie top of local runq)
      */
-    if ( vcpu_runnable(current) )
+    if ( unit_runnable(unit) )
         __runq_insert(scurr);
     else
     {
-        BUG_ON( is_idle_vcpu(current) || list_empty(runq) );
+        BUG_ON( is_idle_unit(unit) || list_empty(runq) );
         /* Current has blocked. Update the runnable counter for this cpu. */
         dec_nr_runnable(cpu);
     }
@@ -1946,23 +1943,23 @@ csched_schedule(
     snext = __runq_elem(runq->next);
     ret.migrated = 0;
 
-    /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+    /* Tasklet work (which runs in idle UNIT context) overrides all else. */
     if ( tasklet_work_scheduled )
     {
         TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
-        snext = CSCHED_UNIT(idle_vcpu[cpu]->sched_unit);
+        snext = CSCHED_UNIT(sched_idle_unit(cpu));
         snext->pri = CSCHED_PRI_TS_BOOST;
     }
 
     /*
      * Clear YIELD flag before scheduling out
      */
-    clear_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags);
+    clear_bit(CSCHED_FLAG_UNIT_YIELD, &scurr->flags);
 
     /*
      * SMP Load balance:
      *
-     * If the next highest priority local runnable VCPU has already eaten
+     * If the next highest priority local runnable UNIT has already eaten
      * through its credits, look on other PCPUs to see if we have more
      * urgent work... If not, csched_load_balance() will return snext, but
      * already removed from the runq.
@@ -1986,32 +1983,32 @@ csched_schedule(
         cpumask_clear_cpu(cpu, prv->idlers);
     }
 
-    if ( !is_idle_vcpu(snext->vcpu) )
+    if ( !is_idle_unit(snext->unit) )
         snext->start_time += now;
 
 out:
     /*
      * Return task to run next...
      */
-    ret.time = (is_idle_vcpu(snext->vcpu) ?
+    ret.time = (is_idle_unit(snext->unit) ?
                 -1 : tslice);
-    ret.task = snext->vcpu->sched_unit;
+    ret.task = snext->unit;
 
-    CSCHED_VCPU_CHECK(ret.task->vcpu_list);
+    CSCHED_UNIT_CHECK(ret.task);
     return ret;
 }
 
 static void
-csched_dump_vcpu(struct csched_unit *svc)
+csched_dump_unit(struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
     printk("[%i.%i] pri=%i flags=%x cpu=%i",
-            svc->vcpu->domain->domain_id,
-            svc->vcpu->vcpu_id,
+            svc->unit->domain->domain_id,
+            svc->unit->unit_id,
             svc->pri,
             svc->flags,
-            svc->vcpu->processor);
+            sched_unit_master(svc->unit));
 
     if ( sdom )
     {
@@ -2045,7 +2042,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
 
     /*
      * We need both locks:
-     * - csched_dump_vcpu() wants to access domains' scheduling
+     * - csched_dump_unit() wants to access domains' scheduling
      *   parameters, which are protected by the private scheduler lock;
      * - we scan through the runqueue, so we need the proper runqueue
      *   lock (the one of the runqueue of this cpu).
@@ -2061,12 +2058,12 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
            CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
            CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
-    /* current VCPU (nothing to say if that's the idle vcpu). */
+    /* current UNIT (nothing to say if that's the idle unit). */
     svc = CSCHED_UNIT(curr_on_cpu(cpu));
-    if ( svc && !is_idle_vcpu(svc->vcpu) )
+    if ( svc && !is_idle_unit(svc->unit) )
     {
         printk("\trun: ");
-        csched_dump_vcpu(svc);
+        csched_dump_unit(svc);
     }
 
     loop = 0;
@@ -2076,7 +2073,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
         if ( svc )
         {
             printk("\t%3d: ", ++loop);
-            csched_dump_vcpu(svc);
+            csched_dump_unit(svc);
         }
     }
 
@@ -2118,29 +2115,29 @@ csched_dump(const struct scheduler *ops)
            prv->ratelimit / MICROSECS(1),
            CSCHED_CREDITS_PER_MSEC,
            prv->ticks_per_tslice,
-           prv->vcpu_migr_delay/ MICROSECS(1));
+           prv->unit_migr_delay/ MICROSECS(1));
 
     printk("idlers: %*pb\n", CPUMASK_PR(prv->idlers));
 
-    printk("active vcpus:\n");
+    printk("active units:\n");
     loop = 0;
     list_for_each( iter_sdom, &prv->active_sdom )
     {
         struct csched_dom *sdom;
         sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
 
-        list_for_each( iter_svc, &sdom->active_vcpu )
+        list_for_each( iter_svc, &sdom->active_unit )
         {
             struct csched_unit *svc;
             spinlock_t *lock;
 
-            svc = list_entry(iter_svc, struct csched_unit, active_vcpu_elem);
-            lock = unit_schedule_lock(svc->vcpu->sched_unit);
+            svc = list_entry(iter_svc, struct csched_unit, active_unit_elem);
+            lock = unit_schedule_lock(svc->unit);
 
             printk("\t%3d: ", ++loop);
-            csched_dump_vcpu(svc);
+            csched_dump_unit(svc);
 
-            unit_schedule_unlock(lock, svc->vcpu->sched_unit);
+            unit_schedule_unlock(lock, svc->unit);
         }
     }
 
@@ -2214,7 +2211,7 @@ csched_init(struct scheduler *ops)
     else
         prv->ratelimit = MICROSECS(sched_ratelimit_us);
 
-    prv->vcpu_migr_delay = MICROSECS(vcpu_migration_delay_us);
+    prv->unit_migr_delay = MICROSECS(vcpu_migration_delay_us);
 
     return 0;
 }
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.