[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 09/49] xen/sched: alloc struct sched_item for each vcpu



Allocate a struct sched_item for each vcpu. This removes the need to
have it locally on the stack in schedule.c.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/schedule.c   | 68 +++++++++++++++++++++++--------------------------
 xen/include/xen/sched.h |  2 ++
 2 files changed, 34 insertions(+), 36 deletions(-)

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index d1a958143a..2b7d62ede7 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -256,10 +256,15 @@ static void sched_spin_unlock_double(spinlock_t *lock1, 
spinlock_t *lock2,
 int sched_init_vcpu(struct vcpu *v, unsigned int processor)
 {
     struct domain *d = v->domain;
-    struct sched_item item = { .vcpu = v };
+    struct sched_item *item;
 
     v->processor = processor;
 
+    if ( (item = xzalloc(struct sched_item)) == NULL )
+        return 1;
+    v->sched_item = item;
+    item->vcpu = v;
+
     /* Initialise the per-vcpu timers. */
     init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
                v, v->processor);
@@ -268,10 +273,14 @@ int sched_init_vcpu(struct vcpu *v, unsigned int 
processor)
     init_timer(&v->poll_timer, poll_timer_fn,
                v, v->processor);
 
-    v->sched_priv = SCHED_OP(dom_scheduler(d), alloc_vdata, &item,
+    v->sched_priv = SCHED_OP(dom_scheduler(d), alloc_vdata, item,
                      d->sched_priv);
     if ( v->sched_priv == NULL )
+    {
+        v->sched_item = NULL;
+        xfree(item);
         return 1;
+    }
 
     /*
      * Initialize affinity settings. The idler, and potentially
@@ -290,7 +299,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     }
     else
     {
-        SCHED_OP(dom_scheduler(d), insert_item, &item);
+        SCHED_OP(dom_scheduler(d), insert_item, item);
     }
 
     return 0;
@@ -311,7 +320,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     void *vcpudata;
     struct scheduler *old_ops;
     void *old_domdata;
-    struct sched_item item;
 
     for_each_vcpu ( d, v )
     {
@@ -332,8 +340,8 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     for_each_vcpu ( d, v )
     {
-        item.vcpu = v;
-        vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, &item, 
domdata);
+        vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata,
+                                         v->sched_item, domdata);
         if ( vcpu_priv[v->vcpu_id] == NULL )
         {
             for_each_vcpu ( d, v )
@@ -351,8 +359,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     for_each_vcpu ( d, v )
     {
-        item.vcpu = v;
-        SCHED_OP(old_ops, remove_item, &item);
+        SCHED_OP(old_ops, remove_item, v->sched_item);
     }
 
     d->cpupool = c;
@@ -363,7 +370,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     {
         spinlock_t *lock;
 
-        item.vcpu = v;
         vcpudata = v->sched_priv;
 
         migrate_timer(&v->periodic_timer, new_p);
@@ -388,7 +394,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
         new_p = cpumask_cycle(new_p, c->cpu_valid);
 
-        SCHED_OP(c->sched, insert_item, &item);
+        SCHED_OP(c->sched, insert_item, v->sched_item);
 
         SCHED_OP(old_ops, free_vdata, vcpudata);
     }
@@ -406,15 +412,17 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
 void sched_destroy_vcpu(struct vcpu *v)
 {
-    struct sched_item item = { .vcpu = v };
+    struct sched_item *item = v->sched_item;
 
     kill_timer(&v->periodic_timer);
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
         atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
-    SCHED_OP(vcpu_scheduler(v), remove_item, &item);
+    SCHED_OP(vcpu_scheduler(v), remove_item, item);
     SCHED_OP(vcpu_scheduler(v), free_vdata, v->sched_priv);
+    xfree(item);
+    v->sched_item = NULL;
 }
 
 int sched_init_domain(struct domain *d, int poolid)
@@ -458,8 +466,6 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    struct sched_item item = { .vcpu = v };
-
     ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
@@ -467,7 +473,7 @@ void vcpu_sleep_nosync_locked(struct vcpu *v)
         if ( v->runstate.state == RUNSTATE_runnable )
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
 
-        SCHED_OP(vcpu_scheduler(v), sleep, &item);
+        SCHED_OP(vcpu_scheduler(v), sleep, v->sched_item);
     }
 }
 
@@ -499,7 +505,6 @@ void vcpu_wake(struct vcpu *v)
 {
     unsigned long flags;
     spinlock_t *lock;
-    struct sched_item item = { .vcpu = v };
 
     TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
 
@@ -509,7 +514,7 @@ void vcpu_wake(struct vcpu *v)
     {
         if ( v->runstate.state >= RUNSTATE_blocked )
             vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
-        SCHED_OP(vcpu_scheduler(v), wake, &item);
+        SCHED_OP(vcpu_scheduler(v), wake, v->sched_item);
     }
     else if ( !(v->pause_flags & VPF_blocked) )
     {
@@ -548,7 +553,6 @@ void vcpu_unblock(struct vcpu *v)
 static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
 {
     unsigned int old_cpu = v->processor;
-    struct sched_item item = { .vcpu = v };
 
     /*
      * Transfer urgency status to new CPU before switching CPUs, as
@@ -566,7 +570,7 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
      * pointer cant' change while the current lock is held.
      */
     if ( vcpu_scheduler(v)->migrate )
-        SCHED_OP(vcpu_scheduler(v), migrate, &item, new_cpu);
+        SCHED_OP(vcpu_scheduler(v), migrate, v->sched_item, new_cpu);
     else
         v->processor = new_cpu;
 }
@@ -610,7 +614,6 @@ static void vcpu_migrate_finish(struct vcpu *v)
     unsigned int old_cpu, new_cpu;
     spinlock_t *old_lock, *new_lock;
     bool_t pick_called = 0;
-    struct sched_item item = { .vcpu = v };
 
     /*
      * If the vcpu is currently running, this will be handled by
@@ -647,7 +650,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
                 break;
 
             /* Select a new CPU. */
-            new_cpu = SCHED_OP(vcpu_scheduler(v), pick_cpu, &item);
+            new_cpu = SCHED_OP(vcpu_scheduler(v), pick_cpu, v->sched_item);
             if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -717,7 +720,6 @@ void restore_vcpu_affinity(struct domain *d)
     {
         spinlock_t *lock;
         unsigned int old_cpu = v->processor;
-        struct sched_item item = { .vcpu = v };
 
         ASSERT(!vcpu_runnable(v));
 
@@ -753,7 +755,7 @@ void restore_vcpu_affinity(struct domain *d)
         v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
 
         lock = vcpu_schedule_lock_irq(v);
-        v->processor = SCHED_OP(vcpu_scheduler(v), pick_cpu, &item);
+        v->processor = SCHED_OP(vcpu_scheduler(v), pick_cpu, v->sched_item);
         spin_unlock_irq(lock);
 
         if ( old_cpu != v->processor )
@@ -871,9 +873,8 @@ static int cpu_disable_scheduler_check(unsigned int cpu)
 void sched_set_affinity(
     struct vcpu *v, const cpumask_t *hard, const cpumask_t *soft)
 {
-    struct sched_item item = { .vcpu = v };
-
-    SCHED_OP(dom_scheduler(v->domain), adjust_affinity, &item, hard, soft);
+    SCHED_OP(dom_scheduler(v->domain), adjust_affinity, v->sched_item,
+             hard, soft);
 
     if ( hard )
         cpumask_copy(v->cpu_hard_affinity, hard);
@@ -1049,10 +1050,9 @@ static long do_poll(struct sched_poll *sched_poll)
 long vcpu_yield(void)
 {
     struct vcpu * v=current;
-    struct sched_item item = { .vcpu = v };
     spinlock_t *lock = vcpu_schedule_lock_irq(v);
 
-    SCHED_OP(vcpu_scheduler(v), yield, &item);
+    SCHED_OP(vcpu_scheduler(v), yield, v->sched_item);
     vcpu_schedule_unlock_irq(lock, v);
 
     SCHED_STAT_CRANK(vcpu_yield);
@@ -1547,8 +1547,6 @@ static void schedule(void)
 
 void context_saved(struct vcpu *prev)
 {
-    struct sched_item item = { .vcpu = prev };
-
     /* Clear running flag /after/ writing context to memory. */
     smp_wmb();
 
@@ -1557,7 +1555,7 @@ void context_saved(struct vcpu *prev)
     /* Check for migration request /after/ clearing running flag. */
     smp_mb();
 
-    SCHED_OP(vcpu_scheduler(prev), context_saved, &item);
+    SCHED_OP(vcpu_scheduler(prev), context_saved, prev->sched_item);
 
     vcpu_migrate_finish(prev);
 }
@@ -1613,7 +1611,6 @@ static int cpu_schedule_up(unsigned int cpu)
     else
     {
         struct vcpu *idle = idle_vcpu[cpu];
-        struct sched_item item = { .vcpu = idle };
 
         /*
          * During (ACPI?) suspend the idle vCPU for this pCPU is not freed,
@@ -1627,7 +1624,7 @@ static int cpu_schedule_up(unsigned int cpu)
          */
         ASSERT(idle->sched_priv == NULL);
 
-        idle->sched_priv = SCHED_OP(&ops, alloc_vdata, &item,
+        idle->sched_priv = SCHED_OP(&ops, alloc_vdata, idle->sched_item,
                                     idle->domain->sched_priv);
         if ( idle->sched_priv == NULL )
             return -ENOMEM;
@@ -1820,7 +1817,6 @@ void __init scheduler_init(void)
 int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
     struct vcpu *idle;
-    struct sched_item item;
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
@@ -1856,11 +1852,11 @@ int schedule_cpu_switch(unsigned int cpu, struct 
cpupool *c)
      *    sched_priv field of the per-vCPU info of the idle domain.
      */
     idle = idle_vcpu[cpu];
-    item.vcpu = idle;
     ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
     if ( IS_ERR(ppriv) )
         return PTR_ERR(ppriv);
-    vpriv = SCHED_OP(new_ops, alloc_vdata, &item, idle->domain->sched_priv);
+    vpriv = SCHED_OP(new_ops, alloc_vdata, idle->sched_item,
+                     idle->domain->sched_priv);
     if ( vpriv == NULL )
     {
         SCHED_OP(new_ops, free_pdata, ppriv, cpu);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index edee52dfe4..c8aa2915c4 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -140,6 +140,7 @@ void evtchn_destroy(struct domain *d); /* from domain_kill 
*/
 void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */
 
 struct waitqueue_vcpu;
+struct sched_item;
 
 struct vcpu
 {
@@ -160,6 +161,7 @@ struct vcpu
 
     struct timer     poll_timer;    /* timeout for SCHEDOP_poll */
 
+    struct sched_item *sched_item;
     void            *sched_priv;    /* scheduler-specific data */
 
     struct vcpu_runstate_info runstate;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.