[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/9] xen/sched: switch scheduling to bool where appropriate



Scheduling code has several places using int or bool_t instead of bool.
Switch those.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/sched/cpupool.c        | 10 +++++-----
 xen/common/sched/sched-if.h       |  2 +-
 xen/common/sched/sched_arinc653.c |  8 ++++----
 xen/common/sched/sched_credit.c   | 12 ++++++------
 xen/common/sched/sched_rt.c       | 14 +++++++-------
 xen/common/sched/schedule.c       | 14 +++++++-------
 xen/include/xen/sched.h           |  6 +++---
 7 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index d5b64d0a6a..14212bb4ae 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -154,7 +154,7 @@ static struct cpupool *alloc_cpupool_struct(void)
  * the searched id is returned
  * returns NULL if not found.
  */
-static struct cpupool *__cpupool_find_by_id(int id, int exact)
+static struct cpupool *__cpupool_find_by_id(int id, bool exact)
 {
     struct cpupool **q;
 
@@ -169,10 +169,10 @@ static struct cpupool *__cpupool_find_by_id(int id, int 
exact)
 
 static struct cpupool *cpupool_find_by_id(int poolid)
 {
-    return __cpupool_find_by_id(poolid, 1);
+    return __cpupool_find_by_id(poolid, true);
 }
 
-static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
+static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
 {
     struct cpupool *c;
     spin_lock(&cpupool_lock);
@@ -185,12 +185,12 @@ static struct cpupool *__cpupool_get_by_id(int poolid, 
int exact)
 
 struct cpupool *cpupool_get_by_id(int poolid)
 {
-    return __cpupool_get_by_id(poolid, 1);
+    return __cpupool_get_by_id(poolid, true);
 }
 
 static struct cpupool *cpupool_get_next_by_id(int poolid)
 {
-    return __cpupool_get_by_id(poolid, 0);
+    return __cpupool_get_by_id(poolid, false);
 }
 
 void cpupool_put(struct cpupool *pool)
diff --git a/xen/common/sched/sched-if.h b/xen/common/sched/sched-if.h
index edce354dc7..9d0db75cbb 100644
--- a/xen/common/sched/sched-if.h
+++ b/xen/common/sched/sched-if.h
@@ -589,7 +589,7 @@ unsigned int cpupool_get_granularity(const struct cpupool 
*c);
  * * The hard affinity is not a subset of soft affinity
  * * There is an overlap between the soft and hard affinity masks
  */
-static inline int has_soft_affinity(const struct sched_unit *unit)
+static inline bool has_soft_affinity(const struct sched_unit *unit)
 {
     return unit->soft_aff_effective &&
            !cpumask_subset(cpupool_domain_master_cpumask(unit->domain),
diff --git a/xen/common/sched/sched_arinc653.c 
b/xen/common/sched/sched_arinc653.c
index fe15754900..dc45378952 100644
--- a/xen/common/sched/sched_arinc653.c
+++ b/xen/common/sched/sched_arinc653.c
@@ -75,7 +75,7 @@ typedef struct arinc653_unit_s
      * arinc653_unit_t pointer. */
     struct sched_unit * unit;
     /* awake holds whether the UNIT has been woken with vcpu_wake() */
-    bool_t              awake;
+    bool                awake;
     /* list holds the linked list information for the list this UNIT
      * is stored in */
     struct list_head    list;
@@ -427,7 +427,7 @@ a653sched_alloc_udata(const struct scheduler *ops, struct 
sched_unit *unit,
      * will mark the UNIT awake.
      */
     svc->unit = unit;
-    svc->awake = 0;
+    svc->awake = false;
     if ( !is_idle_unit(unit) )
         list_add(&svc->list, &SCHED_PRIV(ops)->unit_list);
     update_schedule_units(ops);
@@ -473,7 +473,7 @@ static void
 a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
-        AUNIT(unit)->awake = 0;
+        AUNIT(unit)->awake = false;
 
     /*
      * If the UNIT being put to sleep is the same one that is currently
@@ -493,7 +493,7 @@ static void
 a653sched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
-        AUNIT(unit)->awake = 1;
+        AUNIT(unit)->awake = true;
 
     cpu_raise_softirq(sched_unit_master(unit), SCHEDULE_SOFTIRQ);
 }
diff --git a/xen/common/sched/sched_credit.c b/xen/common/sched/sched_credit.c
index 8b1de9b033..05930261d9 100644
--- a/xen/common/sched/sched_credit.c
+++ b/xen/common/sched/sched_credit.c
@@ -245,7 +245,7 @@ __runq_elem(struct list_head *elem)
 }
 
 /* Is the first element of cpu's runq (if any) cpu's idle unit? */
-static inline bool_t is_runq_idle(unsigned int cpu)
+static inline bool is_runq_idle(unsigned int cpu)
 {
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
@@ -344,7 +344,7 @@ static void burn_credits(struct csched_unit *svc, s_time_t 
now)
     svc->start_time += (credits * MILLISECS(1)) / CSCHED_CREDITS_PER_MSEC;
 }
 
-static bool_t __read_mostly opt_tickle_one_idle = 1;
+static bool __read_mostly opt_tickle_one_idle = true;
 boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
@@ -719,7 +719,7 @@ __csched_unit_is_migrateable(const struct csched_private 
*prv,
 
 static int
 _csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit,
-                 bool_t commit)
+                 bool commit)
 {
     int cpu = sched_unit_master(unit);
     /* We must always use cpu's scratch space */
@@ -871,7 +871,7 @@ csched_res_pick(const struct scheduler *ops, const struct 
sched_unit *unit)
      * get boosted, which we don't deserve as we are "only" migrating.
      */
     set_bit(CSCHED_FLAG_UNIT_MIGRATING, &svc->flags);
-    return get_sched_res(_csched_cpu_pick(ops, unit, 1));
+    return get_sched_res(_csched_cpu_pick(ops, unit, true));
 }
 
 static inline void
@@ -975,7 +975,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int 
cpu)
          * migrating it to run elsewhere (see multi-core and multi-thread
          * support in csched_res_pick()).
          */
-        new_cpu = _csched_cpu_pick(ops, currunit, 0);
+        new_cpu = _csched_cpu_pick(ops, currunit, false);
 
         unit_schedule_unlock_irqrestore(lock, flags, currunit);
 
@@ -1108,7 +1108,7 @@ static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
-    bool_t migrating;
+    bool migrating;
 
     BUG_ON( is_idle_unit(unit) );
 
diff --git a/xen/common/sched/sched_rt.c b/xen/common/sched/sched_rt.c
index 264a753116..8646d77343 100644
--- a/xen/common/sched/sched_rt.c
+++ b/xen/common/sched/sched_rt.c
@@ -490,10 +490,10 @@ rt_update_deadline(s_time_t now, struct rt_unit *svc)
 static inline bool
 deadline_queue_remove(struct list_head *queue, struct list_head *elem)
 {
-    int pos = 0;
+    bool pos = false;
 
     if ( queue->next != elem )
-        pos = 1;
+        pos = true;
 
     list_del_init(elem);
     return !pos;
@@ -505,14 +505,14 @@ deadline_queue_insert(struct rt_unit * (*qelem)(struct 
list_head *),
                       struct list_head *queue)
 {
     struct list_head *iter;
-    int pos = 0;
+    bool pos = false;
 
     list_for_each ( iter, queue )
     {
         struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_unit_priority(svc, iter_svc) > 0 )
             break;
-        pos++;
+        pos = true;
     }
     list_add_tail(elem, iter);
     return !pos;
@@ -605,7 +605,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit 
*svc)
 {
     struct list_head *replq = rt_replq(ops);
     struct rt_unit *rearm_svc = svc;
-    bool_t rearm = 0;
+    bool rearm = false;
 
     ASSERT( unit_on_replq(svc) );
 
@@ -622,7 +622,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit 
*svc)
     {
         deadline_replq_insert(svc, &svc->replq_elem, replq);
         rearm_svc = replq_elem(replq->next);
-        rearm = 1;
+        rearm = true;
     }
     else
         rearm = deadline_replq_insert(svc, &svc->replq_elem, replq);
@@ -1279,7 +1279,7 @@ rt_unit_wake(const struct scheduler *ops, struct 
sched_unit *unit)
 {
     struct rt_unit * const svc = rt_unit(unit);
     s_time_t now;
-    bool_t missed;
+    bool missed;
 
     BUG_ON( is_idle_unit(unit) );
 
diff --git a/xen/common/sched/schedule.c b/xen/common/sched/schedule.c
index db8ce146ca..3307e88b6c 100644
--- a/xen/common/sched/schedule.c
+++ b/xen/common/sched/schedule.c
@@ -53,7 +53,7 @@ string_param("sched", opt_sched);
  * scheduler will give preferrence to partially idle package compared to
  * the full idle package, when picking pCPU to schedule vCPU.
  */
-bool_t sched_smt_power_savings = 0;
+bool sched_smt_power_savings;
 boolean_param("sched_smt_power_savings", sched_smt_power_savings);
 
 /* Default scheduling rate limit: 1ms
@@ -574,7 +574,7 @@ int sched_init_vcpu(struct vcpu *v)
     {
         get_sched_res(v->processor)->curr = unit;
         get_sched_res(v->processor)->sched_unit_idle = unit;
-        v->is_running = 1;
+        v->is_running = true;
         unit->is_running = true;
         unit->state_entry_time = NOW();
     }
@@ -983,7 +983,7 @@ static void sched_unit_migrate_finish(struct sched_unit 
*unit)
     unsigned long flags;
     unsigned int old_cpu, new_cpu;
     spinlock_t *old_lock, *new_lock;
-    bool_t pick_called = 0;
+    bool pick_called = false;
     struct vcpu *v;
 
     /*
@@ -1029,7 +1029,7 @@ static void sched_unit_migrate_finish(struct sched_unit 
*unit)
             if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
                 break;
-            pick_called = 1;
+            pick_called = true;
         }
         else
         {
@@ -1037,7 +1037,7 @@ static void sched_unit_migrate_finish(struct sched_unit 
*unit)
              * We do not hold the scheduler lock appropriate for this vCPU.
              * Thus we cannot select a new CPU on this iteration. Try again.
              */
-            pick_called = 0;
+            pick_called = false;
         }
 
         sched_spin_unlock_double(old_lock, new_lock, flags);
@@ -2148,7 +2148,7 @@ static void sched_switch_units(struct sched_resource *sr,
             vcpu_runstate_change(vnext, vnext->new_state, now);
         }
 
-        vnext->is_running = 1;
+        vnext->is_running = true;
 
         if ( is_idle_vcpu(vnext) )
             vnext->sched_unit = next;
@@ -2219,7 +2219,7 @@ static void vcpu_context_saved(struct vcpu *vprev, struct 
vcpu *vnext)
     smp_wmb();
 
     if ( vprev != vnext )
-        vprev->is_running = 0;
+        vprev->is_running = false;
 }
 
 static void unit_context_saved(struct sched_resource *sr)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 55335d6ab3..b2f48a3512 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -557,18 +557,18 @@ static inline bool is_system_domain(const struct domain 
*d)
  * Use this when you don't have an existing reference to @d. It returns
  * FALSE if @d is being destroyed.
  */
-static always_inline int get_domain(struct domain *d)
+static always_inline bool get_domain(struct domain *d)
 {
     int old, seen = atomic_read(&d->refcnt);
     do
     {
         old = seen;
         if ( unlikely(old & DOMAIN_DESTROYED) )
-            return 0;
+            return false;
         seen = atomic_cmpxchg(&d->refcnt, old, old + 1);
     }
     while ( unlikely(seen != old) );
-    return 1;
+    return true;
 }
 
 /*
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.