[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/sched: switch scheduling to bool where appropriate



commit e4dc8ded7b9af6adf0f2af2509729342e4149162
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Fri Nov 8 12:50:58 2019 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Jan 22 17:37:11 2020 +0000

    xen/sched: switch scheduling to bool where appropriate
    
    Scheduling code has several places using int or bool_t instead of bool.
    Switch those.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
    Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
 xen/common/sched/arinc653.c |  8 ++++----
 xen/common/sched/core.c     | 14 +++++++-------
 xen/common/sched/cpupool.c  | 10 +++++-----
 xen/common/sched/credit.c   | 12 ++++++------
 xen/common/sched/private.h  |  2 +-
 xen/common/sched/rt.c       | 18 +++++++++---------
 xen/include/xen/sched.h     |  6 +++---
 7 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/xen/common/sched/arinc653.c b/xen/common/sched/arinc653.c
index 8895d92b5e..bce8021e3f 100644
--- a/xen/common/sched/arinc653.c
+++ b/xen/common/sched/arinc653.c
@@ -75,7 +75,7 @@ typedef struct arinc653_unit_s
      * arinc653_unit_t pointer. */
     struct sched_unit * unit;
     /* awake holds whether the UNIT has been woken with vcpu_wake() */
-    bool_t              awake;
+    bool                awake;
     /* list holds the linked list information for the list this UNIT
      * is stored in */
     struct list_head    list;
@@ -427,7 +427,7 @@ a653sched_alloc_udata(const struct scheduler *ops, struct 
sched_unit *unit,
      * will mark the UNIT awake.
      */
     svc->unit = unit;
-    svc->awake = 0;
+    svc->awake = false;
     if ( !is_idle_unit(unit) )
         list_add(&svc->list, &SCHED_PRIV(ops)->unit_list);
     update_schedule_units(ops);
@@ -473,7 +473,7 @@ static void
 a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
-        AUNIT(unit)->awake = 0;
+        AUNIT(unit)->awake = false;
 
     /*
      * If the UNIT being put to sleep is the same one that is currently
@@ -493,7 +493,7 @@ static void
 a653sched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     if ( AUNIT(unit) != NULL )
-        AUNIT(unit)->awake = 1;
+        AUNIT(unit)->awake = true;
 
     cpu_raise_softirq(sched_unit_master(unit), SCHEDULE_SOFTIRQ);
 }
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 4153d110be..896f82f4d2 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -53,7 +53,7 @@ string_param("sched", opt_sched);
  * scheduler will give preferrence to partially idle package compared to
  * the full idle package, when picking pCPU to schedule vCPU.
  */
-bool_t sched_smt_power_savings = 0;
+bool sched_smt_power_savings;
 boolean_param("sched_smt_power_savings", sched_smt_power_savings);
 
 /* Default scheduling rate limit: 1ms
@@ -574,7 +574,7 @@ int sched_init_vcpu(struct vcpu *v)
     {
         get_sched_res(v->processor)->curr = unit;
         get_sched_res(v->processor)->sched_unit_idle = unit;
-        v->is_running = 1;
+        v->is_running = true;
         unit->is_running = true;
         unit->state_entry_time = NOW();
     }
@@ -983,7 +983,7 @@ static void sched_unit_migrate_finish(struct sched_unit 
*unit)
     unsigned long flags;
     unsigned int old_cpu, new_cpu;
     spinlock_t *old_lock, *new_lock;
-    bool_t pick_called = 0;
+    bool pick_called = false;
     struct vcpu *v;
 
     /*
@@ -1029,7 +1029,7 @@ static void sched_unit_migrate_finish(struct sched_unit 
*unit)
             if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
                 break;
-            pick_called = 1;
+            pick_called = true;
         }
         else
         {
@@ -1037,7 +1037,7 @@ static void sched_unit_migrate_finish(struct sched_unit 
*unit)
              * We do not hold the scheduler lock appropriate for this vCPU.
              * Thus we cannot select a new CPU on this iteration. Try again.
              */
-            pick_called = 0;
+            pick_called = false;
         }
 
         sched_spin_unlock_double(old_lock, new_lock, flags);
@@ -2148,7 +2148,7 @@ static void sched_switch_units(struct sched_resource *sr,
             vcpu_runstate_change(vnext, vnext->new_state, now);
         }
 
-        vnext->is_running = 1;
+        vnext->is_running = true;
 
         if ( is_idle_vcpu(vnext) )
             vnext->sched_unit = next;
@@ -2219,7 +2219,7 @@ static void vcpu_context_saved(struct vcpu *vprev, struct 
vcpu *vnext)
     smp_wmb();
 
     if ( vprev != vnext )
-        vprev->is_running = 0;
+        vprev->is_running = false;
 }
 
 static void unit_context_saved(struct sched_resource *sr)
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 3060a7144a..28d5143e37 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -154,7 +154,7 @@ static struct cpupool *alloc_cpupool_struct(void)
  * the searched id is returned
  * returns NULL if not found.
  */
-static struct cpupool *__cpupool_find_by_id(int id, int exact)
+static struct cpupool *__cpupool_find_by_id(int id, bool exact)
 {
     struct cpupool **q;
 
@@ -169,10 +169,10 @@ static struct cpupool *__cpupool_find_by_id(int id, int 
exact)
 
 static struct cpupool *cpupool_find_by_id(int poolid)
 {
-    return __cpupool_find_by_id(poolid, 1);
+    return __cpupool_find_by_id(poolid, true);
 }
 
-static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
+static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
 {
     struct cpupool *c;
     spin_lock(&cpupool_lock);
@@ -185,12 +185,12 @@ static struct cpupool *__cpupool_get_by_id(int poolid, 
int exact)
 
 struct cpupool *cpupool_get_by_id(int poolid)
 {
-    return __cpupool_get_by_id(poolid, 1);
+    return __cpupool_get_by_id(poolid, true);
 }
 
 static struct cpupool *cpupool_get_next_by_id(int poolid)
 {
-    return __cpupool_get_by_id(poolid, 0);
+    return __cpupool_get_by_id(poolid, false);
 }
 
 void cpupool_put(struct cpupool *pool)
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 6b04f8f71c..a75efbd43d 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -245,7 +245,7 @@ __runq_elem(struct list_head *elem)
 }
 
 /* Is the first element of cpu's runq (if any) cpu's idle unit? */
-static inline bool_t is_runq_idle(unsigned int cpu)
+static inline bool is_runq_idle(unsigned int cpu)
 {
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
@@ -344,7 +344,7 @@ static void burn_credits(struct csched_unit *svc, s_time_t 
now)
     svc->start_time += (credits * MILLISECS(1)) / CSCHED_CREDITS_PER_MSEC;
 }
 
-static bool_t __read_mostly opt_tickle_one_idle = 1;
+static bool __read_mostly opt_tickle_one_idle = true;
 boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
@@ -719,7 +719,7 @@ __csched_unit_is_migrateable(const struct csched_private 
*prv,
 
 static int
 _csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit,
-                 bool_t commit)
+                 bool commit)
 {
     int cpu = sched_unit_master(unit);
     /* We must always use cpu's scratch space */
@@ -871,7 +871,7 @@ csched_res_pick(const struct scheduler *ops, const struct 
sched_unit *unit)
      * get boosted, which we don't deserve as we are "only" migrating.
      */
     set_bit(CSCHED_FLAG_UNIT_MIGRATING, &svc->flags);
-    return get_sched_res(_csched_cpu_pick(ops, unit, 1));
+    return get_sched_res(_csched_cpu_pick(ops, unit, true));
 }
 
 static inline void
@@ -975,7 +975,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int 
cpu)
          * migrating it to run elsewhere (see multi-core and multi-thread
          * support in csched_res_pick()).
          */
-        new_cpu = _csched_cpu_pick(ops, currunit, 0);
+        new_cpu = _csched_cpu_pick(ops, currunit, false);
 
         unit_schedule_unlock_irqrestore(lock, flags, currunit);
 
@@ -1108,7 +1108,7 @@ static void
 csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
-    bool_t migrating;
+    bool migrating;
 
     BUG_ON( is_idle_unit(unit) );
 
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index b7284dc970..2a94179baa 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -589,7 +589,7 @@ unsigned int cpupool_get_granularity(const struct cpupool 
*c);
  * * The hard affinity is not a subset of soft affinity
  * * There is an overlap between the soft and hard affinity masks
  */
-static inline int has_soft_affinity(const struct sched_unit *unit)
+static inline bool has_soft_affinity(const struct sched_unit *unit)
 {
     return unit->soft_aff_effective &&
            !cpumask_subset(cpupool_domain_master_cpumask(unit->domain),
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index ed72b9eb2f..c8cec50b56 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -490,13 +490,13 @@ rt_update_deadline(s_time_t now, struct rt_unit *svc)
 static inline bool
 deadline_queue_remove(struct list_head *queue, struct list_head *elem)
 {
-    int pos = 0;
+    bool first = false;
 
     if ( queue->next != elem )
-        pos = 1;
+        first = true;
 
     list_del_init(elem);
-    return !pos;
+    return !first;
 }
 
 static inline bool
@@ -505,17 +505,17 @@ deadline_queue_insert(struct rt_unit * (*qelem)(struct 
list_head *),
                       struct list_head *queue)
 {
     struct list_head *iter;
-    int pos = 0;
+    bool first = true;
 
     list_for_each ( iter, queue )
     {
         struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_unit_priority(svc, iter_svc) > 0 )
             break;
-        pos++;
+        first = false;
     }
     list_add_tail(elem, iter);
-    return !pos;
+    return first;
 }
 #define deadline_runq_insert(...) \
   deadline_queue_insert(&q_elem, ##__VA_ARGS__)
@@ -605,7 +605,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit 
*svc)
 {
     struct list_head *replq = rt_replq(ops);
     struct rt_unit *rearm_svc = svc;
-    bool_t rearm = 0;
+    bool rearm = false;
 
     ASSERT( unit_on_replq(svc) );
 
@@ -622,7 +622,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_unit 
*svc)
     {
         deadline_replq_insert(svc, &svc->replq_elem, replq);
         rearm_svc = replq_elem(replq->next);
-        rearm = 1;
+        rearm = true;
     }
     else
         rearm = deadline_replq_insert(svc, &svc->replq_elem, replq);
@@ -1279,7 +1279,7 @@ rt_unit_wake(const struct scheduler *ops, struct 
sched_unit *unit)
 {
     struct rt_unit * const svc = rt_unit(unit);
     s_time_t now;
-    bool_t missed;
+    bool missed;
 
     BUG_ON( is_idle_unit(unit) );
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 7269c9fc39..6978d336d6 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -560,18 +560,18 @@ static inline bool is_system_domain(const struct domain 
*d)
  * Use this when you don't have an existing reference to @d. It returns
  * FALSE if @d is being destroyed.
  */
-static always_inline int get_domain(struct domain *d)
+static always_inline bool get_domain(struct domain *d)
 {
     int old, seen = atomic_read(&d->refcnt);
     do
     {
         old = seen;
         if ( unlikely(old & DOMAIN_DESTROYED) )
-            return 0;
+            return false;
         seen = atomic_cmpxchg(&d->refcnt, old, old + 1);
     }
     while ( unlikely(seen != old) );
-    return 1;
+    return true;
 }
 
 /*
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.