[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] sched: rtds code clean-up



commit 36c659348837dd411ad6687a76825dd30dd8a419
Author:     Tianyang Chen <tiche@xxxxxxxxxxxxx>
AuthorDate: Thu Jun 30 14:00:34 2016 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jun 30 14:00:34 2016 +0200

    sched: rtds code clean-up
    
    No functional change:
     -aligned comments in rt_vcpu struct
     -removed double underscores from the names of some functions
     -fixed coding sytle for control structures involving lists
     -fixed typos in the comments
     -added comments for UPDATE_LIMIT_SHIFT
    
    Signed-off-by: Tianyang Chen <tiche@xxxxxxxxxxxxx>
    Reviewed-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
 xen/common/sched_rt.c | 90 +++++++++++++++++++++++++++------------------------
 1 file changed, 48 insertions(+), 42 deletions(-)

diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 5b077d7..d934193 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -80,7 +80,7 @@
  * in schedule.c
  *
  * The functions involes RunQ and needs to grab locks are:
- *    vcpu_insert, vcpu_remove, context_saved, __runq_insert
+ *    vcpu_insert, vcpu_remove, context_saved, runq_insert
  */
 
 
@@ -107,6 +107,12 @@
  */
 #define RTDS_MIN_BUDGET     (MICROSECS(10))
 
+/*
+ * UPDATE_LIMIT_SHIFT: a constant used in rt_update_deadline(). When finding
+ * the next deadline, performing addition could be faster if the difference
+ * between cur_deadline and now is small. If the difference is bigger than
+ * 1024 * period, use multiplication.
+ */
 #define UPDATE_LIMIT_SHIFT      10
 
 /*
@@ -158,12 +164,12 @@
 static void repl_timer_handler(void *data);
 
 /*
- * Systme-wide private data, include global RunQueue/DepletedQ
+ * System-wide private data, include global RunQueue/DepletedQ
  * Global lock is referenced by schedule_data.schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
-    spinlock_t lock;            /* the global coarse grand lock */
+    spinlock_t lock;            /* the global coarse-grained lock */
     struct list_head sdom;      /* list of availalbe domains, used for dump */
     struct list_head runq;      /* ordered list of runnable vcpus */
     struct list_head depletedq; /* unordered list of depleted vcpus */
@@ -176,7 +182,7 @@ struct rt_private {
  * Virtual CPU
  */
 struct rt_vcpu {
-    struct list_head q_elem;    /* on the runq/depletedq list */
+    struct list_head q_elem;     /* on the runq/depletedq list */
     struct list_head replq_elem; /* on the replenishment events list */
 
     /* Up-pointers */
@@ -188,11 +194,11 @@ struct rt_vcpu {
     s_time_t budget;
 
     /* VCPU current infomation in nanosecond */
-    s_time_t cur_budget;        /* current budget */
-    s_time_t last_start;        /* last start time */
-    s_time_t cur_deadline;      /* current deadline for EDF */
+    s_time_t cur_budget;         /* current budget */
+    s_time_t last_start;         /* last start time */
+    s_time_t cur_deadline;       /* current deadline for EDF */
 
-    unsigned flags;             /* mark __RTDS_scheduled, etc.. */
+    unsigned flags;              /* mark __RTDS_scheduled, etc.. */
 };
 
 /*
@@ -241,13 +247,13 @@ static inline struct list_head *rt_replq(const struct 
scheduler *ops)
  * and the replenishment events queue.
  */
 static int
-__vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_vcpu *svc)
 {
    return !list_empty(&svc->q_elem);
 }
 
 static struct rt_vcpu *
-__q_elem(struct list_head *elem)
+q_elem(struct list_head *elem)
 {
     return list_entry(elem, struct rt_vcpu, q_elem);
 }
@@ -303,7 +309,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
             svc->cur_budget,
             svc->cur_deadline,
             svc->last_start,
-            __vcpu_on_q(svc),
+            vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
             keyhandler_scratch);
@@ -339,28 +345,28 @@ rt_dump(const struct scheduler *ops)
     replq = rt_replq(ops);
 
     printk("Global RunQueue info:\n");
-    list_for_each( iter, runq )
+    list_for_each ( iter, runq )
     {
-        svc = __q_elem(iter);
+        svc = q_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Global DepletedQueue info:\n");
-    list_for_each( iter, depletedq )
+    list_for_each ( iter, depletedq )
     {
-        svc = __q_elem(iter);
+        svc = q_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Global Replenishment Events info:\n");
-    list_for_each( iter, replq )
+    list_for_each ( iter, replq )
     {
         svc = replq_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Domain info:\n");
-    list_for_each( iter, &prv->sdom )
+    list_for_each ( iter, &prv->sdom )
     {
         struct vcpu *v;
 
@@ -380,7 +386,7 @@ rt_dump(const struct scheduler *ops)
 
 /*
  * update deadline and budget when now >= cur_deadline
- * it need to be updated to the deadline of the current period
+ * it needs to be updated to the deadline of the current period
  */
 static void
 rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
@@ -463,14 +469,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct 
list_head *),
     return !pos;
 }
 #define deadline_runq_insert(...) \
-  deadline_queue_insert(&__q_elem, ##__VA_ARGS__)
+  deadline_queue_insert(&q_elem, ##__VA_ARGS__)
 #define deadline_replq_insert(...) \
   deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
 
 static inline void
-__q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_vcpu *svc)
 {
-    ASSERT( __vcpu_on_q(svc) );
+    ASSERT( vcpu_on_q(svc) );
     list_del_init(&svc->q_elem);
 }
 
@@ -506,13 +512,13 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu 
*svc)
  * Insert svc without budget in DepletedQ unsorted;
  */
 static void
-__runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
 
     ASSERT( spin_is_locked(&prv->lock) );
-    ASSERT( !__vcpu_on_q(svc) );
+    ASSERT( !vcpu_on_q(svc) );
     ASSERT( vcpu_on_replq(svc) );
 
     /* add svc to runq if svc still has budget */
@@ -851,12 +857,12 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu 
*vc)
     if ( now >= svc->cur_deadline )
         rt_update_deadline(now, svc);
 
-    if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) )
+    if ( !vcpu_on_q(svc) && vcpu_runnable(vc) )
     {
         replq_insert(ops, svc);
 
         if ( !vc->is_running )
-            __runq_insert(ops, svc);
+            runq_insert(ops, svc);
     }
     vcpu_schedule_unlock_irq(lock, vc);
 
@@ -878,8 +884,8 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
     BUG_ON( sdom == NULL );
 
     lock = vcpu_schedule_lock_irq(vc);
-    if ( __vcpu_on_q(svc) )
-        __q_remove(svc);
+    if ( vcpu_on_q(svc) )
+        q_remove(svc);
 
     if ( vcpu_on_replq(svc) )
         replq_remove(ops,svc);
@@ -966,7 +972,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu 
*svc, s_time_t now)
  * lock is grabbed before calling this function
  */
 static struct rt_vcpu *
-__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
+runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
@@ -975,9 +981,9 @@ __runq_pick(const struct scheduler *ops, const cpumask_t 
*mask)
     cpumask_t cpu_common;
     cpumask_t *online;
 
-    list_for_each(iter, runq)
+    list_for_each ( iter, runq )
     {
-        iter_svc = __q_elem(iter);
+        iter_svc = q_elem(iter);
 
         /* mask cpu_hard_affinity & cpupool & mask */
         online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
@@ -1039,7 +1045,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
     }
     else
     {
-        snext = __runq_pick(ops, cpumask_of(cpu));
+        snext = runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
             snext = rt_vcpu(idle_vcpu[cpu]);
 
@@ -1063,7 +1069,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
     {
         if ( snext != scurr )
         {
-            __q_remove(snext);
+            q_remove(snext);
             set_bit(__RTDS_scheduled, &snext->flags);
         }
         if ( snext->vcpu->processor != cpu )
@@ -1092,9 +1098,9 @@ rt_vcpu_sleep(const struct scheduler *ops, struct vcpu 
*vc)
 
     if ( curr_on_cpu(vc->processor) == vc )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
-    else if ( __vcpu_on_q(svc) )
+    else if ( vcpu_on_q(svc) )
     {
-        __q_remove(svc);
+        q_remove(svc);
         replq_remove(ops, svc);
     }
     else if ( svc->flags & RTDS_delayed_runq_add )
@@ -1212,7 +1218,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
     }
 
     /* on RunQ/DepletedQ, just update info is ok */
-    if ( unlikely(__vcpu_on_q(svc)) )
+    if ( unlikely(vcpu_on_q(svc)) )
     {
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
         return;
@@ -1257,7 +1263,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
     /* Replenishment event got cancelled when we blocked. Add it back. */
     replq_insert(ops, svc);
     /* insert svc to runq/depletedq because svc is not in queue now */
-    __runq_insert(ops, svc);
+    runq_insert(ops, svc);
 
     runq_tickle(ops, svc);
 }
@@ -1280,7 +1286,7 @@ rt_context_saved(const struct scheduler *ops, struct vcpu 
*vc)
     if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
          likely(vcpu_runnable(vc)) )
     {
-        __runq_insert(ops, svc);
+        runq_insert(ops, svc);
         runq_tickle(ops, svc);
     }
     else
@@ -1428,10 +1434,10 @@ static void repl_timer_handler(void *data){
         rt_update_deadline(now, svc);
         list_add(&svc->replq_elem, &tmp_replq);
 
-        if ( __vcpu_on_q(svc) )
+        if ( vcpu_on_q(svc) )
         {
-            __q_remove(svc);
-            __runq_insert(ops, svc);
+            q_remove(svc);
+            runq_insert(ops, svc);
         }
     }
 
@@ -1449,12 +1455,12 @@ static void repl_timer_handler(void *data){
         if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
              !list_empty(runq) )
         {
-            struct rt_vcpu *next_on_runq = __q_elem(runq->next);
+            struct rt_vcpu *next_on_runq = q_elem(runq->next);
 
             if ( svc->cur_deadline > next_on_runq->cur_deadline )
                 runq_tickle(ops, next_on_runq);
         }
-        else if ( __vcpu_on_q(svc) &&
+        else if ( vcpu_on_q(svc) &&
                   test_and_clear_bit(__RTDS_depleted, &svc->flags) )
             runq_tickle(ops, svc);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.