[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: sched: improve robustness (and rename) DOM2OP()



commit 64c9b1ed69d5d62516869d428c5508fba8789636
Author:     Dario Faggioli <dario.faggioli@xxxxxxxxxx>
AuthorDate: Fri Apr 7 14:28:01 2017 +0200
Commit:     George Dunlap <george.dunlap@xxxxxxxxxx>
CommitDate: Fri Apr 7 15:53:14 2017 +0100

    xen: sched: improve robustness (and rename) DOM2OP()
    
    Clarify and enforce (with ASSERTs) when the function
    is called on the idle domain, and explain in comments
    what it means and when it is ok to do so.
    
    While there, change the name of the function to a more
    self-explanatory one, and do the same to VCPU2OP.
    
    Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/common/schedule.c | 56 ++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 37 insertions(+), 19 deletions(-)

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index d344b7c..5b44a56 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -77,8 +77,25 @@ static struct scheduler __read_mostly ops;
          (( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ )  \
           : (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 )
 
-#define DOM2OP(_d)    (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched))
-static inline struct scheduler *VCPU2OP(const struct vcpu *v)
+static inline struct scheduler *dom_scheduler(const struct domain *d)
+{
+    if ( likely(d->cpupool != NULL) )
+        return d->cpupool->sched;
+
+    /*
+     * If d->cpupool is NULL, this is the idle domain. This is special
+     * because the idle domain does not really belong to any cpupool, and,
+     * hence, does not really have a scheduler.
+     *
+     * This is (should be!) only called like this for allocating the idle
+     * vCPUs for the first time, during boot, in which case what we want
+     * is the default scheduler that has been, choosen at boot.
+     */
+    ASSERT(is_idle_domain(d));
+    return &ops;
+}
+
+static inline struct scheduler *vcpu_scheduler(const struct vcpu *v)
 {
     struct domain *d = v->domain;
 
@@ -260,7 +277,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     init_timer(&v->poll_timer, poll_timer_fn,
                v, v->processor);
 
-    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
+    v->sched_priv = SCHED_OP(dom_scheduler(d), alloc_vdata, v,
+                            d->sched_priv);
     if ( v->sched_priv == NULL )
         return 1;
 
@@ -272,7 +290,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     }
     else
     {
-        SCHED_OP(DOM2OP(d), insert_vcpu, v);
+        SCHED_OP(dom_scheduler(d), insert_vcpu, v);
     }
 
     return 0;
@@ -326,7 +344,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     domain_pause(d);
 
-    old_ops = DOM2OP(d);
+    old_ops = dom_scheduler(d);
     old_domdata = d->sched_priv;
 
     for_each_vcpu ( d, v )
@@ -389,8 +407,8 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
         atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
-    SCHED_OP(VCPU2OP(v), remove_vcpu, v);
-    SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
+    SCHED_OP(vcpu_scheduler(v), remove_vcpu, v);
+    SCHED_OP(vcpu_scheduler(v), free_vdata, v->sched_priv);
 }
 
 int sched_init_domain(struct domain *d, int poolid)
@@ -404,7 +422,7 @@ int sched_init_domain(struct domain *d, int poolid)
 
     SCHED_STAT_CRANK(dom_init);
     TRACE_1D(TRC_SCHED_DOM_ADD, d->domain_id);
-    return SCHED_OP(DOM2OP(d), init_domain, d);
+    return SCHED_OP(dom_scheduler(d), init_domain, d);
 }
 
 void sched_destroy_domain(struct domain *d)
@@ -413,7 +431,7 @@ void sched_destroy_domain(struct domain *d)
 
     SCHED_STAT_CRANK(dom_destroy);
     TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
-    SCHED_OP(DOM2OP(d), destroy_domain, d);
+    SCHED_OP(dom_scheduler(d), destroy_domain, d);
 
     cpupool_rm_domain(d);
 }
@@ -432,7 +450,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
         if ( v->runstate.state == RUNSTATE_runnable )
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
 
-        SCHED_OP(VCPU2OP(v), sleep, v);
+        SCHED_OP(vcpu_scheduler(v), sleep, v);
     }
 
     vcpu_schedule_unlock_irqrestore(lock, flags, v);
@@ -461,7 +479,7 @@ void vcpu_wake(struct vcpu *v)
     {
         if ( v->runstate.state >= RUNSTATE_blocked )
             vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
-        SCHED_OP(VCPU2OP(v), wake, v);
+        SCHED_OP(vcpu_scheduler(v), wake, v);
     }
     else if ( !(v->pause_flags & VPF_blocked) )
     {
@@ -516,8 +534,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
      * Actual CPU switch to new CPU.  This is safe because the lock
      * pointer cant' change while the current lock is held.
      */
-    if ( VCPU2OP(v)->migrate )
-        SCHED_OP(VCPU2OP(v), migrate, v, new_cpu);
+    if ( vcpu_scheduler(v)->migrate )
+        SCHED_OP(vcpu_scheduler(v), migrate, v, new_cpu);
     else
         v->processor = new_cpu;
 }
@@ -583,7 +601,7 @@ static void vcpu_migrate(struct vcpu *v)
                 break;
 
             /* Select a new CPU. */
-            new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+            new_cpu = SCHED_OP(vcpu_scheduler(v), pick_cpu, v);
             if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -685,7 +703,7 @@ void restore_vcpu_affinity(struct domain *d)
         spin_unlock_irq(lock);;
 
         lock = vcpu_schedule_lock_irq(v);
-        v->processor = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+        v->processor = SCHED_OP(vcpu_scheduler(v), pick_cpu, v);
         spin_unlock_irq(lock);
     }
 
@@ -975,7 +993,7 @@ long vcpu_yield(void)
     struct vcpu * v=current;
     spinlock_t *lock = vcpu_schedule_lock_irq(v);
 
-    SCHED_OP(VCPU2OP(v), yield, v);
+    SCHED_OP(vcpu_scheduler(v), yield, v);
     vcpu_schedule_unlock_irq(lock, v);
 
     SCHED_STAT_CRANK(vcpu_yield);
@@ -1288,7 +1306,7 @@ long sched_adjust(struct domain *d, struct 
xen_domctl_scheduler_op *op)
     if ( ret )
         return ret;
 
-    if ( op->sched_id != DOM2OP(d)->sched_id )
+    if ( op->sched_id != dom_scheduler(d)->sched_id )
         return -EINVAL;
 
     switch ( op->cmd )
@@ -1304,7 +1322,7 @@ long sched_adjust(struct domain *d, struct 
xen_domctl_scheduler_op *op)
 
     /* NB: the pluggable scheduler code needs to take care
      * of locking by itself. */
-    if ( (ret = SCHED_OP(DOM2OP(d), adjust, d, op)) == 0 )
+    if ( (ret = SCHED_OP(dom_scheduler(d), adjust, d, op)) == 0 )
         TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
 
     return ret;
@@ -1482,7 +1500,7 @@ void context_saved(struct vcpu *prev)
     /* Check for migration request /after/ clearing running flag. */
     smp_mb();
 
-    SCHED_OP(VCPU2OP(prev), context_saved, prev);
+    SCHED_OP(vcpu_scheduler(prev), context_saved, prev);
 
     if ( unlikely(prev->pause_flags & VPF_migrating) )
         vcpu_migrate(prev);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.