[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 38/47] xen/sched: move per-cpu variable cpupool to struct sched_resource



Having a pointer to struct cpupool in struct sched_resource instead
of per cpu is enough.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V1: new patch
---
 xen/common/cpupool.c       | 4 +---
 xen/common/sched_credit.c  | 2 +-
 xen/common/sched_rt.c      | 2 +-
 xen/common/schedule.c      | 8 ++++----
 xen/include/xen/sched-if.h | 2 +-
 5 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index a5f4113915..e0333a8417 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -34,8 +34,6 @@ static cpumask_t cpupool_locked_cpus;
 
 static DEFINE_SPINLOCK(cpupool_lock);
 
-DEFINE_PER_CPU(struct cpupool *, cpupool);
-
 static void free_cpupool_struct(struct cpupool *c)
 {
     if ( c )
@@ -506,7 +504,7 @@ static int cpupool_cpu_add(unsigned int cpu)
      * (or unplugging would have failed) and that is the default behavior
      * anyway.
      */
-    per_cpu(cpupool, cpu) = NULL;
+    get_sched_res(cpu)->cpupool = NULL;
     ret = cpupool_assign_cpu_locked(cpupool0, cpu);
 
     spin_unlock(&cpupool_lock);
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index e47e865d76..41699f6b32 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1681,7 +1681,7 @@ static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
     struct csched_unit *snext, bool *stolen)
 {
-    struct cpupool *c = per_cpu(cpupool, cpu);
+    struct cpupool *c = get_sched_res(cpu)->cpupool;
     struct csched_unit *speer;
     cpumask_t workers;
     cpumask_t *online;
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 9f4e397334..d7eb6931ef 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -775,7 +775,7 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu, 
int cpu)
 
     if ( prv->repl_timer.cpu == cpu )
     {
-        struct cpupool *c = per_cpu(cpupool, cpu);
+        struct cpupool *c = get_sched_res(cpu)->cpupool;
         unsigned int new_cpu = cpumask_cycle(cpu, cpupool_online_cpumask(c));
 
         /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 1bd84a49bc..e360c9ec9f 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1093,7 +1093,7 @@ int cpu_disable_scheduler(unsigned int cpu)
     cpumask_t online_affinity;
     int ret = 0;
 
-    c = per_cpu(cpupool, cpu);
+    c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
         return ret;
 
@@ -1162,7 +1162,7 @@ static int cpu_disable_scheduler_check(unsigned int cpu)
     struct cpupool *c;
     struct vcpu *v;
 
-    c = per_cpu(cpupool, cpu);
+    c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
         return 0;
 
@@ -2514,8 +2514,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = get_sched_res(cpu)->scheduler;
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
-    struct cpupool *old_pool = per_cpu(cpupool, cpu);
     struct sched_resource *sd = get_sched_res(cpu);
+    struct cpupool *old_pool = sd->cpupool;
     spinlock_t *old_lock, *new_lock;
     unsigned long flags;
 
@@ -2597,7 +2597,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     sched_free_vdata(old_ops, vpriv_old);
     sched_free_pdata(old_ops, ppriv_old, cpu);
 
-    per_cpu(cpupool, cpu) = c;
+    get_sched_res(cpu)->cpupool = c;
     /* When a cpu is added to a pool, trigger it to go pick up some work */
     if ( c != NULL )
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 528874ab11..5625cafb6e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -37,6 +37,7 @@ extern const cpumask_t *sched_res_mask;
  * one it wants (This may be the one right in front of it).*/
 struct sched_resource {
     struct scheduler   *scheduler;
+    struct cpupool     *cpupool;
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;
@@ -52,7 +53,6 @@ struct sched_resource {
 
 #define curr_on_cpu(c)    (get_sched_res(c)->curr)
 
-DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
 
 static inline struct sched_resource *get_sched_res(unsigned int cpu)
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.