[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC V2 32/45] xen/sched: move struct task_slice into struct sched_item



In order to prepare for multiple vcpus per schedule item move struct
task_slice in schedule() from the local stack into struct sched_item
of the currently running item. To make access easier for the single
schedulers add the pointer of the currently running item as a parameter
of do_schedule().

While at it switch the tasklet_work_scheduled parameter of
do_schedule() from bool_t to bool.

As struct task_slice is only ever modified with the local schedule
lock held it is safe to directly set the different items in struct
sched_item instead of using an on-stack copy for returning the data.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/sched_arinc653.c | 20 +++++++-------------
 xen/common/sched_credit.c   | 25 +++++++++++--------------
 xen/common/sched_credit2.c  | 21 +++++++++------------
 xen/common/sched_null.c     | 26 ++++++++++++--------------
 xen/common/sched_rt.c       | 22 +++++++++++-----------
 xen/common/schedule.c       | 21 ++++++++++-----------
 xen/include/xen/sched-if.h  | 11 +++--------
 xen/include/xen/sched.h     |  6 ++++++
 8 files changed, 69 insertions(+), 83 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 3919c0a3e9..e98e98116b 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -497,18 +497,14 @@ a653sched_item_wake(const struct scheduler *ops, struct 
sched_item *item)
  *
  * @param ops       Pointer to this instance of the scheduler structure
  * @param now       Current time
- *
- * @return          Address of the ITEM structure scheduled to be run next
- *                  Amount of time to execute the returned ITEM
- *                  Flag for whether the ITEM was migrated
  */
-static struct task_slice
+static void
 a653sched_do_schedule(
     const struct scheduler *ops,
+    struct sched_item *prev,
     s_time_t now,
-    bool_t tasklet_work_scheduled)
+    bool tasklet_work_scheduled)
 {
-    struct task_slice ret;                      /* hold the chosen domain */
     struct sched_item *new_task = NULL;
     static unsigned int sched_index = 0;
     static s_time_t next_switch_time;
@@ -586,13 +582,11 @@ a653sched_do_schedule(
      * Return the amount of time the next domain has to run and the address
      * of the selected task's ITEM structure.
      */
-    ret.time = next_switch_time - now;
-    ret.task = new_task;
-    ret.migrated = 0;
-
-    BUG_ON(ret.time <= 0);
+    prev->next_time = next_switch_time - now;
+    prev->next_task = new_task;
+    new_task->migrated = false;
 
-    return ret;
+    BUG_ON(prev->next_time <= 0);
 }
 
 /**
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4734f52fc7..064f88ab23 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1689,7 +1689,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
 
 static struct csched_item *
 csched_load_balance(struct csched_private *prv, int cpu,
-    struct csched_item *snext, bool_t *stolen)
+    struct csched_item *snext, bool *stolen)
 {
     struct cpupool *c = per_cpu(cpupool, cpu);
     struct csched_item *speer;
@@ -1805,7 +1805,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
                 /* As soon as one item is found, balancing ends */
                 if ( speer != NULL )
                 {
-                    *stolen = 1;
+                    *stolen = true;
                     /*
                      * Next time we'll look for work to steal on this node, we
                      * will start from the next pCPU, with respect to this one,
@@ -1835,19 +1835,18 @@ csched_load_balance(struct csched_private *prv, int cpu,
  * This function is in the critical path. It is designed to be simple and
  * fast for the common case.
  */
-static struct task_slice
-csched_schedule(
-    const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
+static void csched_schedule(
+    const struct scheduler *ops, struct sched_item *item, s_time_t now,
+    bool tasklet_work_scheduled)
 {
     const unsigned int cpu = smp_processor_id();
     const unsigned int sched_cpu = sched_get_resource_cpu(cpu);
     struct list_head * const runq = RUNQ(sched_cpu);
-    struct sched_item *item = current->sched_item;
     struct csched_item * const scurr = CSCHED_ITEM(item);
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_item *snext;
-    struct task_slice ret;
     s_time_t runtime, tslice;
+    bool migrated = false;
 
     SCHED_STAT_CRANK(schedule);
     CSCHED_ITEM_CHECK(item);
@@ -1937,7 +1936,6 @@ csched_schedule(
                         (unsigned char *)&d);
         }
 
-        ret.migrated = 0;
         goto out;
     }
     tslice = prv->tslice;
@@ -1955,7 +1953,6 @@ csched_schedule(
     }
 
     snext = __runq_elem(runq->next);
-    ret.migrated = 0;
 
     /* Tasklet work (which runs in idle ITEM context) overrides all else. */
     if ( tasklet_work_scheduled )
@@ -1981,7 +1978,7 @@ csched_schedule(
     if ( snext->pri > CSCHED_PRI_TS_OVER )
         __runq_remove(snext);
     else
-        snext = csched_load_balance(prv, sched_cpu, snext, &ret.migrated);
+        snext = csched_load_balance(prv, sched_cpu, snext, &migrated);
 
     /*
      * Update idlers mask if necessary. When we're idling, other CPUs
@@ -2004,12 +2001,12 @@ out:
     /*
      * Return task to run next...
      */
-    ret.time = (is_idle_item(snext->item) ?
+    item->next_time = (is_idle_item(snext->item) ?
                 -1 : tslice);
-    ret.task = snext->item;
+    item->next_task = snext->item;
+    snext->item->migrated = migrated;
 
-    CSCHED_ITEM_CHECK(ret.task);
-    return ret;
+    CSCHED_ITEM_CHECK(item->next_task);
 }
 
 static void
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index d5cb8c0200..f1074be25d 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3443,19 +3443,18 @@ runq_candidate(struct csched2_runqueue_data *rqd,
  * This function is in the critical path. It is designed to be simple and
  * fast for the common case.
  */
-static struct task_slice
-csched2_schedule(
-    const struct scheduler *ops, s_time_t now, bool tasklet_work_scheduled)
+static void csched2_schedule(
+    const struct scheduler *ops, struct sched_item *curritem, s_time_t now,
+    bool tasklet_work_scheduled)
 {
     const unsigned int cpu = smp_processor_id();
     const unsigned int sched_cpu = sched_get_resource_cpu(cpu);
     struct csched2_runqueue_data *rqd;
-    struct sched_item *curritem = current->sched_item;
     struct csched2_item * const scurr = csched2_item(curritem);
     struct csched2_item *snext = NULL;
     unsigned int skipped_items = 0;
-    struct task_slice ret;
     bool tickled;
+    bool migrated = false;
 
     SCHED_STAT_CRANK(schedule);
     CSCHED2_ITEM_CHECK(curritem);
@@ -3540,8 +3539,6 @@ csched2_schedule(
          && item_runnable(curritem) )
         __set_bit(__CSFLAG_delayed_runq_add, &scurr->flags);
 
-    ret.migrated = 0;
-
     /* Accounting for non-idle tasks */
     if ( !is_idle_item(snext->item) )
     {
@@ -3591,7 +3588,7 @@ csched2_schedule(
             snext->credit += CSCHED2_MIGRATE_COMPENSATION;
             sched_set_res(snext->item, per_cpu(sched_res, sched_cpu));
             SCHED_STAT_CRANK(migrated);
-            ret.migrated = 1;
+            migrated = true;
         }
     }
     else
@@ -3622,11 +3619,11 @@ csched2_schedule(
     /*
      * Return task to run next...
      */
-    ret.time = csched2_runtime(ops, sched_cpu, snext, now);
-    ret.task = snext->item;
+    curritem->next_time = csched2_runtime(ops, sched_cpu, snext, now);
+    curritem->next_task = snext->item;
+    snext->item->migrated = migrated;
 
-    CSCHED2_ITEM_CHECK(ret.task);
-    return ret;
+    CSCHED2_ITEM_CHECK(curritem->next_task);
 }
 
 static void
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 34ce7a05d3..1af396dcdb 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -703,16 +703,14 @@ static inline void null_item_check(struct sched_item 
*item)
  *  - the item assigned to the pCPU, if there's one and it can run;
  *  - the idle item, otherwise.
  */
-static struct task_slice null_schedule(const struct scheduler *ops,
-                                       s_time_t now,
-                                       bool_t tasklet_work_scheduled)
+static void null_schedule(const struct scheduler *ops, struct sched_item *prev,
+                          s_time_t now, bool tasklet_work_scheduled)
 {
     unsigned int bs;
     const unsigned int cpu = smp_processor_id();
     const unsigned int sched_cpu = sched_get_resource_cpu(cpu);
     struct null_private *prv = null_priv(ops);
     struct null_item *wvc;
-    struct task_slice ret;
 
     SCHED_STAT_CRANK(schedule);
     NULL_ITEM_CHECK(current->sched_item);
@@ -740,19 +738,18 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
     if ( tasklet_work_scheduled )
     {
         trace_var(TRC_SNULL_TASKLET, 1, 0, NULL);
-        ret.task = sched_idle_item(sched_cpu);
+        prev->next_task = sched_idle_item(sched_cpu);
     }
     else
-        ret.task = per_cpu(npc, sched_cpu).item;
-    ret.migrated = 0;
-    ret.time = -1;
+        prev->next_task = per_cpu(npc, sched_cpu).item;
+    prev->next_time = -1;
 
     /*
      * We may be new in the cpupool, or just coming back online. In which
      * case, there may be items in the waitqueue that we can assign to us
      * and run.
      */
-    if ( unlikely(ret.task == NULL) )
+    if ( unlikely(prev->next_task == NULL) )
     {
         spin_lock(&prv->waitq_lock);
 
@@ -778,7 +775,7 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
                 {
                     item_assign(prv, wvc->item, sched_cpu);
                     list_del_init(&wvc->waitq_elem);
-                    ret.task = wvc->item;
+                    prev->next_task = wvc->item;
                     goto unlock;
                 }
             }
@@ -787,11 +784,12 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
         spin_unlock(&prv->waitq_lock);
     }
 
-    if ( unlikely(ret.task == NULL || !item_runnable(ret.task)) )
-        ret.task = sched_idle_item(sched_cpu);
+    if ( unlikely(prev->next_task == NULL || !item_runnable(prev->next_task)) )
+        prev->next_task = sched_idle_item(sched_cpu);
 
-    NULL_ITEM_CHECK(ret.task);
-    return ret;
+    NULL_ITEM_CHECK(prev->next_task);
+
+    prev->next_task->migrated = false;
 }
 
 static inline void dump_item(struct null_private *prv, struct null_item *nvc)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 2366e33beb..c5e8b559f3 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -1062,16 +1062,16 @@ runq_pick(const struct scheduler *ops, const cpumask_t 
*mask)
  * schedule function for rt scheduler.
  * The lock is already grabbed in schedule.c, no need to lock here
  */
-static struct task_slice
-rt_schedule(const struct scheduler *ops, s_time_t now, bool_t 
tasklet_work_scheduled)
+static void
+rt_schedule(const struct scheduler *ops, struct sched_item *curritem,
+            s_time_t now, bool tasklet_work_scheduled)
 {
     const unsigned int cpu = smp_processor_id();
     const unsigned int sched_cpu = sched_get_resource_cpu(cpu);
     struct rt_private *prv = rt_priv(ops);
-    struct rt_item *const scurr = rt_item(current->sched_item);
+    struct rt_item *const scurr = rt_item(curritem);
     struct rt_item *snext = NULL;
-    struct task_slice ret = { .migrated = 0 };
-    struct sched_item *curritem = current->sched_item;
+    bool migrated = false;
 
     /* TRACE */
     {
@@ -1119,7 +1119,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
         __set_bit(__RTDS_delayed_runq_add, &scurr->flags);
 
     snext->last_start = now;
-    ret.time =  -1; /* if an idle item is picked */
+    curritem->next_time =  -1; /* if an idle item is picked */
     if ( !is_idle_item(snext->item) )
     {
         if ( snext != scurr )
@@ -1130,13 +1130,13 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
         if ( sched_item_cpu(snext->item) != sched_cpu )
         {
             sched_set_res(snext->item, per_cpu(sched_res, sched_cpu));
-            ret.migrated = 1;
+            migrated = true;
         }
-        ret.time = snext->cur_budget; /* invoke the scheduler next time */
+        /* Invoke the scheduler next time. */
+        curritem->next_time = snext->cur_budget;
     }
-    ret.task = snext->item;
-
-    return ret;
+    curritem->next_task = snext->item;
+    snext->item->migrated = migrated;
 }
 
 /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 9f9d6eb95b..b5fb48c553 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1575,10 +1575,9 @@ static void schedule(void)
     s_time_t              now;
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
-    bool_t                tasklet_work_scheduled = 0;
+    bool                  tasklet_work_scheduled = false;
     struct sched_resource *sd;
     spinlock_t           *lock;
-    struct task_slice     next_slice;
     int cpu = smp_processor_id();
 
     ASSERT_NOT_IN_ATOMIC();
@@ -1594,12 +1593,12 @@ static void schedule(void)
         set_bit(_TASKLET_scheduled, tasklet_work);
         /* fallthrough */
     case TASKLET_enqueued|TASKLET_scheduled:
-        tasklet_work_scheduled = 1;
+        tasklet_work_scheduled = true;
         break;
     case TASKLET_scheduled:
         clear_bit(_TASKLET_scheduled, tasklet_work);
     case 0:
-        /*tasklet_work_scheduled = 0;*/
+        /*tasklet_work_scheduled = false;*/
         break;
     default:
         BUG();
@@ -1613,14 +1612,14 @@ static void schedule(void)
 
     /* get policy-specific decision on scheduling... */
     sched = this_cpu(scheduler);
-    next_slice = sched->do_schedule(sched, now, tasklet_work_scheduled);
+    sched->do_schedule(sched, prev, now, tasklet_work_scheduled);
 
-    next = next_slice.task;
+    next = prev->next_task;
 
     sd->curr = next;
 
-    if ( next_slice.time >= 0 ) /* -ve means no limit */
-        set_timer(&sd->s_timer, now + next_slice.time);
+    if ( prev->next_time >= 0 ) /* -ve means no limit */
+        set_timer(&sd->s_timer, now + prev->next_time);
 
     if ( unlikely(prev == next) )
     {
@@ -1628,7 +1627,7 @@ static void schedule(void)
         TRACE_4D(TRC_SCHED_SWITCH_INFCONT,
                  next->domain->domain_id, next->item_id,
                  now - prev->state_entry_time,
-                 next_slice.time);
+                 prev->next_time);
         trace_continue_running(next->vcpu);
         return continue_running(prev->vcpu);
     }
@@ -1640,7 +1639,7 @@ static void schedule(void)
              next->domain->domain_id, next->item_id,
              (next->vcpu->runstate.state == RUNSTATE_runnable) ?
              (now - next->state_entry_time) : 0,
-             next_slice.time);
+             prev->next_time);
 
     ASSERT(prev->vcpu->runstate.state == RUNSTATE_running);
 
@@ -1670,7 +1669,7 @@ static void schedule(void)
 
     stop_timer(&prev->vcpu->periodic_timer);
 
-    if ( next_slice.migrated )
+    if ( next->migrated )
         vcpu_move_irqs(next->vcpu);
 
     vcpu_periodic_timer_work(next->vcpu);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 2506538649..09544e05c0 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -180,12 +180,6 @@ static inline spinlock_t *pcpu_schedule_trylock(unsigned 
int cpu)
     return NULL;
 }
 
-struct task_slice {
-    struct sched_item *task;
-    s_time_t           time;
-    bool_t             migrated;
-};
-
 struct scheduler {
     char *name;             /* full name for this scheduler      */
     char *opt_name;         /* option name for this scheduler    */
@@ -228,8 +222,9 @@ struct scheduler {
     void         (*context_saved)  (const struct scheduler *,
                                     struct sched_item *);
 
-    struct task_slice (*do_schedule) (const struct scheduler *, s_time_t,
-                                      bool_t tasklet_work_scheduled);
+    void         (*do_schedule)    (const struct scheduler *,
+                                    struct sched_item *, s_time_t,
+                                    bool tasklet_work_scheduled);
 
     struct sched_resource * (*pick_resource) (const struct scheduler *,
                                               struct sched_item *);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cbd97f34c7..8bde790d27 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -281,6 +281,8 @@ struct sched_item {
     bool                   affinity_broken;
     /* Does soft affinity actually play a role (given hard affinity)? */
     bool                   soft_aff_effective;
+    /* Item has been migrated to other cpu(s). */
+    bool                   migrated;
     /* Bitmask of CPUs on which this VCPU may run. */
     cpumask_var_t          cpu_hard_affinity;
     /* Used to change affinity temporarily. */
@@ -289,6 +291,10 @@ struct sched_item {
     cpumask_var_t          cpu_hard_affinity_saved;
     /* Bitmask of CPUs on which this VCPU prefers to run. */
     cpumask_var_t          cpu_soft_affinity;
+
+    /* Next item to run. */
+    struct sched_item      *next_task;
+    s_time_t                next_time;
 };
 
 #define for_each_sched_item(d, e)                                         \
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.