[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 15/49] xen/sched: move per cpu scheduler private data into struct sched_resource



This prepares support of larger scheduling granularities, e.g. core
scheduling.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/sched_arinc653.c   |  6 ++---
 xen/common/sched_credit.c     | 14 +++++------
 xen/common/sched_credit2.c    | 24 +++++++++----------
 xen/common/sched_null.c       |  8 +++----
 xen/common/sched_rt.c         | 12 +++++-----
 xen/common/schedule.c         | 56 +++++++++++++++++++++----------------------
 xen/include/asm-x86/cpuidle.h |  2 +-
 xen/include/xen/sched-if.h    | 20 +++++++---------
 8 files changed, 68 insertions(+), 74 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 5701baf337..9dc1ff6a73 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_item_sleep(const struct scheduler *ops, struct 
sched_item *item)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == item )
+    if ( per_cpu(sched_res, vc->processor)->curr == item )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -642,7 +642,7 @@ static void
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
@@ -650,7 +650,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
     idle_vcpu[cpu]->sched_item->priv = vdata;
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
+    per_cpu(sched_res, cpu)->sched_priv = NULL; /* no pdata */
 
     /*
      * (Re?)route the lock to its default location. We actually do not use
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 6552d4c087..e8369b3648 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)per_cpu(sched_res, _c)->sched_priv)
 #define CSCHED_ITEM(item)   ((struct csched_item *) (item)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -248,7 +248,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -257,7 +257,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -265,7 +265,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -615,7 +615,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, 
int cpu)
 {
     unsigned long flags;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
 
     /*
      * This is called either during during boot, resume or hotplug, in
@@ -635,7 +635,7 @@ static void
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_item *svc = vdata;
 
@@ -654,7 +654,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
     spin_unlock(&prv->lock);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 5a3a0babab..df0e7282ce 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -567,7 +567,7 @@ static inline struct csched2_private *csched2_priv(const 
struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return per_cpu(sched_res, cpu)->sched_priv;
 }
 
 static inline struct csched2_item *csched2_item(const struct sched_item *item)
@@ -1276,7 +1276,7 @@ runq_insert(const struct scheduler *ops, struct 
csched2_item *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1797,7 +1797,7 @@ static bool vcpu_grab_budget(struct csched2_item *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1844,7 +1844,7 @@ vcpu_return_budget(struct csched2_item *svc, struct 
list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2101,7 +2101,7 @@ csched2_item_wake(const struct scheduler *ops, struct 
sched_item *item)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2229,7 +2229,7 @@ csched2_res_pick(const struct scheduler *ops, struct 
sched_item *item)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2569,7 +2569,7 @@ static void balance_load(const struct scheduler *ops, int 
cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3475,7 +3475,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3864,7 +3864,7 @@ csched2_init_pdata(const struct scheduler *ops, void 
*pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3903,10 +3903,10 @@ csched2_switch_sched(struct scheduler *new_ops, 
unsigned int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(per_cpu(sched_res, cpu)->schedule_lock != &prv->rqd[rqi].lock);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
@@ -3914,7 +3914,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
      * taking it, find all the initializations we've done above in place.
      */
     smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     write_unlock(&prv->lock);
 }
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index f7a2650c48..a9cfa163b9 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -168,7 +168,7 @@ static void init_pdata(struct null_private *prv, unsigned 
int cpu)
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
 
     /* alloc_pdata is not implemented, so we want this to be NULL. */
     ASSERT(!pdata);
@@ -277,7 +277,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -388,7 +388,7 @@ static void vcpu_deassign(struct null_private *prv, struct 
vcpu *v,
 static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                               void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_item *nvc = vdata;
 
@@ -406,7 +406,7 @@ static void null_switch_sched(struct scheduler *new_ops, 
unsigned int cpu,
     init_pdata(prv, cpu);
 
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
+    per_cpu(sched_res, cpu)->sched_priv = pdata;
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index a3cd00f765..0019646b52 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -723,7 +723,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int 
cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -745,7 +745,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(per_cpu(sched_res, cpu)->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
@@ -763,7 +763,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 
     idle_vcpu[cpu]->sched_item->priv = vdata;
     per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
+    per_cpu(sched_res, cpu)->sched_priv = NULL; /* no pdata */
 
     /*
      * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
@@ -771,7 +771,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * taking it, find all the initializations we've done above in place.
      */
     smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    per_cpu(sched_res, cpu)->schedule_lock = &prv->lock;
 }
 
 static void
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 90eb915e4e..a9a9f2b691 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -61,7 +61,6 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU(struct sched_resource *, sched_res);
 
@@ -161,7 +160,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&per_cpu(sched_res, v->processor)->urgent_count);
         }
     }
     else
@@ -170,7 +169,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&per_cpu(sched_res, v->processor)->urgent_count);
         }
     }
 }
@@ -181,7 +180,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -338,7 +337,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = item;
+        per_cpu(sched_res, v->processor)->curr = item;
         v->is_running = 1;
     }
     else
@@ -463,7 +462,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&per_cpu(sched_res, v->processor)->urgent_count);
     SCHED_OP(vcpu_scheduler(v), remove_item, item);
     SCHED_OP(vcpu_scheduler(v), free_vdata, item->priv);
     sched_free_item(item);
@@ -510,7 +509,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -605,8 +604,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&per_cpu(sched_res, new_cpu)->urgent_count);
+        atomic_dec(&per_cpu(sched_res, old_cpu)->urgent_count);
     }
 
     /*
@@ -678,20 +677,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = per_cpu(sched_res, old_cpu)->schedule_lock;
+        new_lock = per_cpu(sched_res, new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == per_cpu(sched_res, old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -699,7 +698,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = SCHED_OP(vcpu_scheduler(v), pick_resource,
                                v->sched_item)->processor;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1492,7 +1491,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1501,7 +1500,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = this_cpu(sched_res);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1643,15 +1642,14 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd;
     void *sched_priv;
-    struct sched_resource *res;
 
-    res = xmalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xmalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->processor = cpu;
-    per_cpu(sched_res, cpu) = res;
+    sd->processor = cpu;
+    per_cpu(sched_res, cpu) = sd;
 
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
@@ -1707,7 +1705,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     struct scheduler *sched = per_cpu(scheduler, cpu);
 
     SCHED_OP(sched, free_pdata, sd->sched_priv, cpu);
@@ -1727,7 +1725,7 @@ static int cpu_schedule_callback(
 {
     unsigned int cpu = (unsigned long)hcpu;
     struct scheduler *sched = per_cpu(scheduler, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = per_cpu(sched_res, cpu);
     int rc = 0;
 
     /*
@@ -1860,10 +1858,10 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_item;
-    this_cpu(schedule_data).sched_priv = SCHED_OP(&ops, alloc_pdata, 0);
-    BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
-    SCHED_OP(&ops, init_pdata, this_cpu(schedule_data).sched_priv, 0);
+    this_cpu(sched_res)->curr = idle_vcpu[0]->sched_item;
+    this_cpu(sched_res)->sched_priv = SCHED_OP(&ops, alloc_pdata, 0);
+    BUG_ON(IS_ERR(this_cpu(sched_res)->sched_priv));
+    SCHED_OP(&ops, init_pdata, this_cpu(sched_res)->sched_priv, 0);
 }
 
 /*
@@ -1943,7 +1941,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     old_lock = pcpu_schedule_lock_irq(cpu);
 
     vpriv_old = idle->sched_item->priv;
-    ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
+    ppriv_old = per_cpu(sched_res, cpu)->sched_priv;
     SCHED_OP(new_ops, switch_sched, cpu, ppriv, vpriv);
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 08da01803f..f520145752 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -33,7 +33,7 @@ void update_last_cx_stat(struct acpi_processor_power *,
  */
 static inline int sched_has_urgent_vcpu(void)
 {
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
+    return atomic_read(&this_cpu(sched_res)->urgent_count);
 }
 
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 85b77dafdc..4bc053e9f7 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,18 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_item  *curr;           /* current task                    */
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
+    unsigned            processor;
 };
 
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
-
-struct sched_resource {
-    unsigned     processor;
-};
+#define curr_on_cpu(c)    (per_cpu(sched_res, c)->curr)
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -83,7 +79,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param 
EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = per_cpu(sched_res, cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -93,7 +89,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param 
EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == per_cpu(sched_res, cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -103,7 +99,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param 
EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == per_cpu(sched_res, cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -132,11 +128,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, 
_irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = per_cpu(sched_res, cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == per_cpu(sched_res, cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.