[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 03/60] xen/sched: let sched_switch_sched() return new lock address



Instead of setting the scheduler percpu lock address in each of the
switch_sched instances of the different schedulers do that in
schedule_cpu_switch() which is the single caller of that function.
For that purpose let sched_switch_sched() just return the new lock
address.

This allows to set the new struct scheduler and struct schedule_data
values in the percpu area in schedule_cpu_switch() instead of the
schedulers, too.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/sched_arinc653.c | 14 ++------------
 xen/common/sched_credit.c   | 13 ++-----------
 xen/common/sched_credit2.c  | 15 +++------------
 xen/common/sched_null.c     | 16 ++++------------
 xen/common/sched_rt.c       | 12 ++----------
 xen/common/schedule.c       | 18 +++++++++++++++---
 xen/include/xen/sched-if.h  |  9 +++++----
 7 files changed, 33 insertions(+), 64 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index a4c6d00b81..72b988ea5f 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -630,7 +630,7 @@ a653sched_pick_cpu(const struct scheduler *ops, struct vcpu 
*vc)
  * @param pdata     scheduler specific PCPU data (we don't have any)
  * @param vdata     scheduler specific VCPU data of the idle vcpu
  */
-static void
+static spinlock_t *
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
@@ -641,17 +641,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
 
     idle_vcpu[cpu]->sched_priv = vdata;
 
-    per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
-
-    /*
-     * (Re?)route the lock to its default location. We actually do not use
-     * it, but if we leave it pointing to where it does now (i.e., the
-     * runqueue lock for this PCPU in the default scheduler), we'd be
-     * causing unnecessary contention on that lock (in cases where it is
-     * shared among multiple PCPUs, like in Credit2 and RTDS).
-     */
-    sd->schedule_lock = &sd->_lock;
+    return &sd->_lock;
 }
 
 /**
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 7b7facbace..621c408ed0 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -631,7 +631,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, 
int cpu)
 }
 
 /* Change the scheduler of cpu to us (Credit). */
-static void
+static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
@@ -653,16 +653,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
     init_pdata(prv, pdata, cpu);
     spin_unlock(&prv->lock);
 
-    per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
-
-    /*
-     * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
-     * if it is free (and it can be) we want that anyone that manages
-     * taking it, finds all the initializations we've done above in place.
-     */
-    smp_mb();
-    sd->schedule_lock = &sd->_lock;
+    return &sd->_lock;
 }
 
 #ifndef NDEBUG
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 9c1c3b4e08..8e4381d8a7 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3855,7 +3855,7 @@ csched2_init_pdata(const struct scheduler *ops, void 
*pdata, int cpu)
 }
 
 /* Change the scheduler of cpu to us (Credit2). */
-static void
+static spinlock_t *
 csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                      void *pdata, void *vdata)
 {
@@ -3888,18 +3888,9 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
      */
     ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
 
-    per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
-
-    /*
-     * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
-     * if it is free (and it can be) we want that anyone that manages
-     * taking it, find all the initializations we've done above in place.
-     */
-    smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
-
     write_unlock(&prv->lock);
+
+    return &prv->rqd[rqi].lock;
 }
 
 static void
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index a59dbb2692..10427b37ab 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -381,8 +381,9 @@ static void vcpu_deassign(struct null_private *prv, struct 
vcpu *v,
 }
 
 /* Change the scheduler of cpu to us (null). */
-static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
-                              void *pdata, void *vdata)
+static spinlock_t *null_switch_sched(struct scheduler *new_ops,
+                                     unsigned int cpu,
+                                     void *pdata, void *vdata)
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     struct null_private *prv = null_priv(new_ops);
@@ -401,16 +402,7 @@ static void null_switch_sched(struct scheduler *new_ops, 
unsigned int cpu,
 
     init_pdata(prv, cpu);
 
-    per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = pdata;
-
-    /*
-     * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
-     * if it is free (and it can be) we want that anyone that manages
-     * taking it, finds all the initializations we've done above in place.
-     */
-    smp_mb();
-    sd->schedule_lock = &sd->_lock;
+    return &sd->_lock;
 }
 
 static void null_vcpu_insert(const struct scheduler *ops, struct vcpu *v)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index f1b81f0373..0acfc3d702 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -729,7 +729,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int 
cpu)
 }
 
 /* Change the scheduler of cpu to us (RTDS). */
-static void
+static spinlock_t *
 rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                 void *pdata, void *vdata)
 {
@@ -761,16 +761,8 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
     }
 
     idle_vcpu[cpu]->sched_priv = vdata;
-    per_cpu(scheduler, cpu) = new_ops;
-    per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
 
-    /*
-     * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
-     * if it is free (and it can be) we want that anyone that manages
-     * taking it, find all the initializations we've done above in place.
-     */
-    smp_mb();
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    return &prv->lock;
 }
 
 static void
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 4da970c543..6dc96b3cd4 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1812,7 +1812,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
-    spinlock_t * old_lock;
+    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    spinlock_t *old_lock, *new_lock;
 
     /*
      * pCPUs only move from a valid cpupool to free (i.e., out of any pool),
@@ -1870,8 +1871,19 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     old_lock = pcpu_schedule_lock_irq(cpu);
 
     vpriv_old = idle->sched_priv;
-    ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
-    sched_switch_sched(new_ops, cpu, ppriv, vpriv);
+    ppriv_old = sd->sched_priv;
+    new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv);
+
+    per_cpu(scheduler, cpu) = new_ops;
+    sd->sched_priv = ppriv;
+
+    /*
+     * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
+     * if it is free (and it can be) we want that anyone that manages
+     * taking it, finds all the initializations we've done above in place.
+     */
+    smp_mb();
+    sd->schedule_lock = new_lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock_irq(old_lock);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index b3c3e189d9..b8e2b2e49e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -153,7 +153,7 @@ struct scheduler {
     /* Idempotent. */
     void         (*free_domdata)   (const struct scheduler *, void *);
 
-    void         (*switch_sched)   (struct scheduler *, unsigned int,
+    spinlock_t * (*switch_sched)   (struct scheduler *, unsigned int,
                                     void *, void *);
 
     /* Activate / deactivate vcpus in a cpu pool */
@@ -195,10 +195,11 @@ static inline void sched_deinit(struct scheduler *s)
     s->deinit(s);
 }
 
-static inline void sched_switch_sched(struct scheduler *s, unsigned int cpu,
-                                      void *pdata, void *vdata)
+static inline spinlock_t *sched_switch_sched(struct scheduler *s,
+                                             unsigned int cpu,
+                                             void *pdata, void *vdata)
 {
-    s->switch_sched(s, cpu, pdata, vdata);
+    return s->switch_sched(s, cpu, pdata, vdata);
 }
 
 static inline void sched_dump_settings(const struct scheduler *s)
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.