[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] xen: sched: rtds refactor code



No functional change:
 -Various coding style fix
 -Added comments for UPDATE_LIMIT_SHIFT.

Signed-off-by: Tianyang Chen <tiche@xxxxxxxxxxxxxx>
---
 xen/common/sched_rt.c |  106 ++++++++++++++++++++++++++-----------------------
 1 file changed, 56 insertions(+), 50 deletions(-)

diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 7f8f411..1584d53 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -80,7 +80,7 @@
  * in schedule.c
  *
  * The functions involes RunQ and needs to grab locks are:
- *    vcpu_insert, vcpu_remove, context_saved, __runq_insert
+ *    vcpu_insert, vcpu_remove, context_saved, runq_insert
  */
 
 
@@ -107,6 +107,12 @@
  */
 #define RTDS_MIN_BUDGET     (MICROSECS(10))
 
+/*
+ * UPDATE_LIMIT_SHIT: a constant used in rt_update_deadline(). When finding
+ * the next deadline, performing addition could be faster if the difference
+ * between cur_deadline and now is small. If the difference is bigger than
+ * 1024 * period, use multiplication.
+ */
 #define UPDATE_LIMIT_SHIFT      10
 
 /*
@@ -158,25 +164,25 @@
 static void repl_timer_handler(void *data);
 
 /*
- * Systme-wide private data, include global RunQueue/DepletedQ
+ * System-wide private data, include global RunQueue/DepletedQ
  * Global lock is referenced by schedule_data.schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
-    spinlock_t lock;            /* the global coarse grand lock */
-    struct list_head sdom;      /* list of availalbe domains, used for dump */
-    struct list_head runq;      /* ordered list of runnable vcpus */
-    struct list_head depletedq; /* unordered list of depleted vcpus */
-    struct list_head replq;     /* ordered list of vcpus that need 
replenishment */
-    cpumask_t tickled;          /* cpus been tickled */
-    struct timer *repl_timer;   /* replenishment timer */
+    spinlock_t lock;             /* the global coarse grand lock */
+    struct list_head sdom;       /* list of availalbe domains, used for dump */
+    struct list_head runq;       /* ordered list of runnable vcpus */
+    struct list_head depletedq;  /* unordered list of depleted vcpus */
+    struct list_head replq;      /* ordered list of vcpus that need 
replenishment */
+    cpumask_t tickled;           /* cpus been tickled */
+    struct timer *repl_timer;    /* replenishment timer */
 };
 
 /*
  * Virtual CPU
  */
 struct rt_vcpu {
-    struct list_head q_elem;    /* on the runq/depletedq list */
+    struct list_head q_elem;     /* on the runq/depletedq list */
     struct list_head replq_elem; /* on the replenishment events list */
 
     /* Up-pointers */
@@ -188,19 +194,19 @@ struct rt_vcpu {
     s_time_t budget;
 
     /* VCPU current infomation in nanosecond */
-    s_time_t cur_budget;        /* current budget */
-    s_time_t last_start;        /* last start time */
-    s_time_t cur_deadline;      /* current deadline for EDF */
+    s_time_t cur_budget;         /* current budget */
+    s_time_t last_start;         /* last start time */
+    s_time_t cur_deadline;       /* current deadline for EDF */
 
-    unsigned flags;             /* mark __RTDS_scheduled, etc.. */
+    unsigned flags;              /* mark __RTDS_scheduled, etc.. */
 };
 
 /*
  * Domain
  */
 struct rt_dom {
-    struct list_head sdom_elem; /* link list on rt_priv */
-    struct domain *dom;         /* pointer to upper domain */
+    struct list_head sdom_elem;  /* link list on rt_priv */
+    struct domain *dom;          /* pointer to upper domain */
 };
 
 /*
@@ -241,13 +247,13 @@ static inline struct list_head *rt_replq(const struct 
scheduler *ops)
  * and the replenishment events queue.
  */
 static int
-__vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_vcpu *svc)
 {
    return !list_empty(&svc->q_elem);
 }
 
 static struct rt_vcpu *
-__q_elem(struct list_head *elem)
+q_elem(struct list_head *elem)
 {
     return list_entry(elem, struct rt_vcpu, q_elem);
 }
@@ -303,7 +309,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
             svc->cur_budget,
             svc->cur_deadline,
             svc->last_start,
-            __vcpu_on_q(svc),
+            vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
             keyhandler_scratch);
@@ -339,28 +345,28 @@ rt_dump(const struct scheduler *ops)
     replq = rt_replq(ops);
 
     printk("Global RunQueue info:\n");
-    list_for_each( iter, runq )
+    list_for_each ( iter, runq )
     {
-        svc = __q_elem(iter);
+        svc = q_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Global DepletedQueue info:\n");
-    list_for_each( iter, depletedq )
+    list_for_each ( iter, depletedq )
     {
-        svc = __q_elem(iter);
+        svc = q_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Global Replenishment Events info:\n");
-    list_for_each( iter, replq )
+    list_for_each ( iter, replq )
     {
         svc = replq_elem(iter);
         rt_dump_vcpu(ops, svc);
     }
 
     printk("Domain info:\n");
-    list_for_each( iter, &prv->sdom )
+    list_for_each ( iter, &prv->sdom )
     {
         struct vcpu *v;
 
@@ -380,7 +386,7 @@ rt_dump(const struct scheduler *ops)
 
 /*
  * update deadline and budget when now >= cur_deadline
- * it need to be updated to the deadline of the current period
+ * it needs to be updated to the deadline of the current period
  */
 static void
 rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
@@ -463,14 +469,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct 
list_head *),
     return !pos;
 }
 #define deadline_runq_insert(...) \
-  deadline_queue_insert(&__q_elem, ##__VA_ARGS__)
+  deadline_queue_insert(&q_elem, ##__VA_ARGS__)
 #define deadline_replq_insert(...) \
   deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
 
 static inline void
-__q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_vcpu *svc)
 {
-    ASSERT( __vcpu_on_q(svc) );
+    ASSERT( vcpu_on_q(svc) );
     list_del_init(&svc->q_elem);
 }
 
@@ -506,13 +512,13 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu 
*svc)
  * Insert svc without budget in DepletedQ unsorted;
  */
 static void
-__runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
 {
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
 
     ASSERT( spin_is_locked(&prv->lock) );
-    ASSERT( !__vcpu_on_q(svc) );
+    ASSERT( !vcpu_on_q(svc) );
     ASSERT( vcpu_on_replq(svc) );
 
     /* add svc to runq if svc still has budget */
@@ -840,12 +846,12 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu 
*vc)
     if ( now >= svc->cur_deadline )
         rt_update_deadline(now, svc);
 
-    if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) )
+    if ( !vcpu_on_q(svc) && vcpu_runnable(vc) )
     {
         replq_insert(ops, svc);
 
         if ( !vc->is_running )
-            __runq_insert(ops, svc);
+            runq_insert(ops, svc);
     }
     vcpu_schedule_unlock_irq(lock, vc);
 
@@ -867,8 +873,8 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
     BUG_ON( sdom == NULL );
 
     lock = vcpu_schedule_lock_irq(vc);
-    if ( __vcpu_on_q(svc) )
-        __q_remove(svc);
+    if ( vcpu_on_q(svc) )
+        q_remove(svc);
 
     if ( vcpu_on_replq(svc) )
         replq_remove(ops,svc);
@@ -955,7 +961,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu 
*svc, s_time_t now)
  * lock is grabbed before calling this function
  */
 static struct rt_vcpu *
-__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
+runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
@@ -964,9 +970,9 @@ __runq_pick(const struct scheduler *ops, const cpumask_t 
*mask)
     cpumask_t cpu_common;
     cpumask_t *online;
 
-    list_for_each(iter, runq)
+    list_for_each ( iter, runq )
     {
-        iter_svc = __q_elem(iter);
+        iter_svc = q_elem(iter);
 
         /* mask cpu_hard_affinity & cpupool & mask */
         online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
@@ -1028,7 +1034,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
     }
     else
     {
-        snext = __runq_pick(ops, cpumask_of(cpu));
+        snext = runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
             snext = rt_vcpu(idle_vcpu[cpu]);
 
@@ -1052,7 +1058,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
     {
         if ( snext != scurr )
         {
-            __q_remove(snext);
+            q_remove(snext);
             set_bit(__RTDS_scheduled, &snext->flags);
         }
         if ( snext->vcpu->processor != cpu )
@@ -1081,9 +1087,9 @@ rt_vcpu_sleep(const struct scheduler *ops, struct vcpu 
*vc)
 
     if ( curr_on_cpu(vc->processor) == vc )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
-    else if ( __vcpu_on_q(svc) )
+    else if ( vcpu_on_q(svc) )
     {
-        __q_remove(svc);
+        q_remove(svc);
         replq_remove(ops, svc);
     }
     else if ( svc->flags & RTDS_delayed_runq_add )
@@ -1201,7 +1207,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
     }
 
     /* on RunQ/DepletedQ, just update info is ok */
-    if ( unlikely(__vcpu_on_q(svc)) )
+    if ( unlikely(vcpu_on_q(svc)) )
     {
         SCHED_STAT_CRANK(vcpu_wake_onrunq);
         return;
@@ -1245,7 +1251,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
     /* Replenishment event got cancelled when we blocked. Add it back. */
     replq_insert(ops, svc);
     /* insert svc to runq/depletedq because svc is not in queue now */
-    __runq_insert(ops, svc);
+    runq_insert(ops, svc);
 
     runq_tickle(ops, svc);
 }
@@ -1268,7 +1274,7 @@ rt_context_saved(const struct scheduler *ops, struct vcpu 
*vc)
     if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
          likely(vcpu_runnable(vc)) )
     {
-        __runq_insert(ops, svc);
+        runq_insert(ops, svc);
         runq_tickle(ops, svc);
     }
     else
@@ -1414,10 +1420,10 @@ static void repl_timer_handler(void *data){
         rt_update_deadline(now, svc);
         list_add(&svc->replq_elem, &tmp_replq);
 
-        if ( __vcpu_on_q(svc) )
+        if ( vcpu_on_q(svc) )
         {
-            __q_remove(svc);
-            __runq_insert(ops, svc);
+            q_remove(svc);
+            runq_insert(ops, svc);
         }
     }
 
@@ -1435,12 +1441,12 @@ static void repl_timer_handler(void *data){
         if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
              !list_empty(runq) )
         {
-            struct rt_vcpu *next_on_runq = __q_elem(runq->next);
+            struct rt_vcpu *next_on_runq = q_elem(runq->next);
 
             if ( svc->cur_deadline > next_on_runq->cur_deadline )
                 runq_tickle(ops, next_on_runq);
         }
-        else if ( __vcpu_on_q(svc) &&
+        else if ( vcpu_on_q(svc) &&
                   test_and_clear_bit(__RTDS_depleted, &svc->flags) )
             runq_tickle(ops, svc);
 
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.