[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.2] scheduler: adjust internal locking interface



commit 6bed2fe1f08ca86488af66d027192460b395eba4
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Nov 15 11:32:51 2013 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Nov 15 11:32:51 2013 +0100

    scheduler: adjust internal locking interface
    
    Make the locking functions return the lock pointers, so they can be
    passed to the unlocking functions (which in turn can check that the
    lock is still actually providing the intended protection, i.e. the
    parameters determining which lock is the right one didn't change).
    
    Further use proper spin lock primitives rather than open coded
    local_irq_...() constructs, so that interrupts can be re-enabled as
    appropriate while spinning.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
    master commit: eedd60391610629b4e8a2e8278b857ff884f750d
    master date: 2013-10-14 08:57:56 +0200
---
 xen/arch/x86/cpu/mcheck/mce.c |   31 +++++-----
 xen/common/sched_credit.c     |   12 +++-
 xen/common/sched_credit2.c    |   20 +++---
 xen/common/sched_sedf.c       |   29 +++++---
 xen/common/schedule.c         |   61 +++++++++---------
 xen/include/xen/sched-if.h    |  138 +++++++++++++++++------------------------
 6 files changed, 139 insertions(+), 152 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index d03769c..66d8dfa 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -401,7 +401,7 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
     mctelem_cookie_t mctc = NULL;
     struct mca_summary bs;
     struct mc_info *mci = NULL;
-    int irqlocked = 0;
+    spinlock_t *lock = NULL;
     uint64_t gstatus;
     int ripv;
 
@@ -411,8 +411,7 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
 
     /* Disable interrupts for the _vcpu_. It may not re-scheduled to
      * another physical CPU. */
-    vcpu_schedule_lock_irq(v);
-    irqlocked = 1;
+    lock = vcpu_schedule_lock_irq(v);
 
     /* Read global status;  if it does not indicate machine check
      * in progress then bail as long as we have a valid ip to return to. */
@@ -420,8 +419,8 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
     ripv = ((gstatus & MCG_STATUS_RIPV) != 0);
     if (!(gstatus & MCG_STATUS_MCIP) && ripv) {
         add_taint(TAINT_MACHINE_CHECK); /* questionable */
-        vcpu_schedule_unlock_irq(v);
-        irqlocked = 0;
+        vcpu_schedule_unlock_irq(lock, v);
+        lock = NULL;
         goto cmn_handler_done;
     }
 
@@ -440,8 +439,8 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
 
     /* If no valid errors and our stack is intact, we're done */
     if (ripv && bs.errcnt == 0) {
-        vcpu_schedule_unlock_irq(v);
-        irqlocked = 0;
+        vcpu_schedule_unlock_irq(lock, v);
+        lock = NULL;
         goto cmn_handler_done;
     }
 
@@ -484,8 +483,8 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
     if (xen_state_lost) {
         /* Now we are going to panic anyway. Allow interrupts, so that
          * printk on serial console can work. */
-        vcpu_schedule_unlock_irq(v);
-        irqlocked = 0;
+        vcpu_schedule_unlock_irq(lock, v);
+        lock = NULL;
 
         printk("Terminal machine check exception occurred in "
                "hypervisor context.\n");
@@ -557,8 +556,8 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
              * domain_crash() the vcpu pointer is invalid.
              * Therefore, we must unlock the irqs before killing
              * it. */
-            vcpu_schedule_unlock_irq(v);
-            irqlocked = 0;
+            vcpu_schedule_unlock_irq(lock, v);
+            lock = NULL;
 
             /* DomU is impacted. Kill it and continue. */
             domain_crash(curdom);
@@ -569,8 +568,8 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
     case DOM0_TRAP:
     case DOMU_TRAP:
         /* Enable interrupts. */
-        vcpu_schedule_unlock_irq(v);
-        irqlocked = 0;
+        vcpu_schedule_unlock_irq(lock, v);
+        lock = NULL;
 
         /* guest softirqs and event callbacks are scheduled
          * immediately after this handler exits. */
@@ -580,13 +579,13 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long 
error_code,
         break;
 
     case DOM_NORMAL:
-        vcpu_schedule_unlock_irq(v);
-        irqlocked = 0;
+        vcpu_schedule_unlock_irq(lock, v);
+        lock = NULL;
         break;
     }
 
  cmn_handler_done:
-    BUG_ON(irqlocked);
+    BUG_ON(lock != NULL);
     BUG_ON(!ripv);
 
     if (bs.errcnt) {
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 68dc80b..ec30706 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -937,6 +937,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int 
cpu)
     struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
     struct list_head *runq, *elem, *next, *last_under;
     struct csched_vcpu *svc_elem;
+    spinlock_t *lock;
     unsigned long flags;
     int sort_epoch;
 
@@ -946,7 +947,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int 
cpu)
 
     spc->runq_sort_last = sort_epoch;
 
-    pcpu_schedule_lock_irqsave(cpu, flags);
+    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
     runq = &spc->runq;
     elem = runq->next;
@@ -971,7 +972,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int 
cpu)
         elem = next;
     }
 
-    pcpu_schedule_unlock_irqrestore(cpu, flags);
+    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 }
 
 static void
@@ -1288,6 +1289,8 @@ csched_load_balance(struct csched_private *prv, int cpu,
 
     while ( !cpumask_empty(&workers) )
     {
+        spinlock_t *lock;
+
         peer_cpu = cpumask_cycle(peer_cpu, &workers);
         cpumask_clear_cpu(peer_cpu, &workers);
 
@@ -1298,7 +1301,8 @@ csched_load_balance(struct csched_private *prv, int cpu,
          * cause a deadlock if the peer CPU is also load balancing and trying
          * to lock this CPU.
          */
-        if ( !pcpu_schedule_trylock(peer_cpu) )
+        lock = pcpu_schedule_trylock(peer_cpu);
+        if ( !lock )
         {
             CSCHED_STAT_CRANK(steal_trylock_failed);
             continue;
@@ -1309,7 +1313,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
          */
         speer = cpumask_test_cpu(peer_cpu, online) ?
             csched_runq_steal(peer_cpu, cpu, snext->pri) : NULL;
-        pcpu_schedule_unlock(peer_cpu);
+        pcpu_schedule_unlock(lock, peer_cpu);
         if ( speer != NULL )
         {
             *stolen = 1;
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 471f6d5..26d87b3 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -835,15 +835,17 @@ csched_vcpu_insert(const struct scheduler *ops, struct 
vcpu *vc)
      */
     if ( ! is_idle_vcpu(vc) )
     {
+        spinlock_t *lock;
+
         /* FIXME: Do we need the private lock here? */
         list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
 
         /* Add vcpu to runqueue of initial processor */
-        vcpu_schedule_lock_irq(vc);
+        lock = vcpu_schedule_lock_irq(vc);
 
         runq_assign(ops, vc);
 
-        vcpu_schedule_unlock_irq(vc);
+        vcpu_schedule_unlock_irq(lock, vc);
 
         sdom->nr_vcpus++;
     }
@@ -871,11 +873,11 @@ csched_vcpu_remove(const struct scheduler *ops, struct 
vcpu *vc)
     if ( ! is_idle_vcpu(vc) )
     {
         /* Remove from runqueue */
-        vcpu_schedule_lock_irq(vc);
+        spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
         runq_deassign(ops, vc);
 
-        vcpu_schedule_unlock_irq(vc);
+        vcpu_schedule_unlock_irq(lock, vc);
 
         /* Remove from sdom list.  Don't need a lock for this, as it's called
          * syncronously when nothing else can happen. */
@@ -962,8 +964,7 @@ csched_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
 {
     struct csched_vcpu * const svc = CSCHED_VCPU(vc);
     s_time_t now = NOW();
-
-    vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
     BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
 
@@ -989,7 +990,7 @@ csched_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
     else if ( !is_idle_vcpu(vc) )
         update_load(ops, svc->rqd, svc, -1, now);
 
-    vcpu_schedule_unlock_irq(vc);
+    vcpu_schedule_unlock_irq(lock, vc);
 }
 
 #define MAX_LOAD (1ULL<<60);
@@ -1406,14 +1407,14 @@ csched_dom_cntl(
                  * must never lock csched_priv.lock if we're holding a 
runqueue lock.
                  * Also, calling vcpu_schedule_lock() is enough, since IRQs 
have already
                  * been disabled. */
-                vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
                 BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor));
 
                 svc->weight = sdom->weight;
                 update_max_weight(svc->rqd, svc->weight, old_weight);
 
-                vcpu_schedule_unlock(svc->vcpu);
+                vcpu_schedule_unlock(lock, svc->vcpu);
             }
         }
     }
@@ -1923,6 +1924,7 @@ static void init_pcpu(const struct scheduler *ops, int 
cpu)
     cpumask_set_cpu(cpu, &rqd->idle);
     cpumask_set_cpu(cpu, &rqd->active);
 
+    /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock(old_lock);
 
     cpumask_set_cpu(cpu, &prv->initialized);
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 92ba36a..d126d4f 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -1349,14 +1349,16 @@ static int sedf_adjust_weights(struct cpupool *c, int 
nr_cpus, int *sumw, s_time
             if ( EDOM_INFO(p)->weight )
             {
                 /* Interrupts already off */
-                vcpu_schedule_lock(p);
+                spinlock_t *lock = vcpu_schedule_lock(p);
+
                 EDOM_INFO(p)->period_orig = 
                     EDOM_INFO(p)->period  = WEIGHT_PERIOD;
                 EDOM_INFO(p)->slice_orig  =
                     EDOM_INFO(p)->slice   = 
                     (EDOM_INFO(p)->weight *
                      (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
-                vcpu_schedule_unlock(p);
+
+                vcpu_schedule_unlock(lock, p);
             }
         }
     }
@@ -1417,21 +1419,24 @@ static int sedf_adjust(const struct scheduler *ops, 
struct domain *p, struct xen
                 {
                     /* (Here and everywhere in the following) IRQs are already 
off,
                      * hence vcpu_spin_lock() is the one. */
-                    vcpu_schedule_lock(v);
+                    spinlock_t *lock = vcpu_schedule_lock(v);
+
                     EDOM_INFO(v)->extraweight = op->u.sedf.weight;
                     EDOM_INFO(v)->weight = 0;
                     EDOM_INFO(v)->slice = 0;
                     EDOM_INFO(v)->period = WEIGHT_PERIOD;
-                    vcpu_schedule_unlock(v);
+                    vcpu_schedule_unlock(lock, v);
                 }
             }
             else
             {
                 /* Weight-driven domains with real-time execution */
-                for_each_vcpu ( p, v ) {
-                    vcpu_schedule_lock(v);
+                for_each_vcpu ( p, v )
+                {
+                    spinlock_t *lock = vcpu_schedule_lock(v);
+
                     EDOM_INFO(v)->weight = op->u.sedf.weight;
-                    vcpu_schedule_unlock(v);
+                    vcpu_schedule_unlock(lock, v);
                 }
             }
         }
@@ -1453,14 +1458,15 @@ static int sedf_adjust(const struct scheduler *ops, 
struct domain *p, struct xen
             /* Time-driven domains */
             for_each_vcpu ( p, v )
             {
-                vcpu_schedule_lock(v);
+                spinlock_t *lock = vcpu_schedule_lock(v);
+
                 EDOM_INFO(v)->weight = 0;
                 EDOM_INFO(v)->extraweight = 0;
                 EDOM_INFO(v)->period_orig = 
                     EDOM_INFO(v)->period  = op->u.sedf.period;
                 EDOM_INFO(v)->slice_orig  = 
                     EDOM_INFO(v)->slice   = op->u.sedf.slice;
-                vcpu_schedule_unlock(v);
+                vcpu_schedule_unlock(lock, v);
             }
         }
 
@@ -1470,13 +1476,14 @@ static int sedf_adjust(const struct scheduler *ops, 
struct domain *p, struct xen
 
         for_each_vcpu ( p, v )
         {
-            vcpu_schedule_lock(v);
+            spinlock_t *lock = vcpu_schedule_lock(v);
+
             EDOM_INFO(v)->status  = 
                 (EDOM_INFO(v)->status &
                  ~EXTRA_AWARE) | (op->u.sedf.extratime & EXTRA_AWARE);
             EDOM_INFO(v)->latency = op->u.sedf.latency;
             extraq_check(v);
-            vcpu_schedule_unlock(v);
+            vcpu_schedule_unlock(lock, v);
         }
     }
     else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 3f7d463..38661ea 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -159,18 +159,16 @@ static inline void vcpu_runstate_change(
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 {
+    spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
     s_time_t delta;
 
-    if ( unlikely(v != current) )
-        vcpu_schedule_lock_irq(v);
-
     memcpy(runstate, &v->runstate, sizeof(*runstate));
     delta = NOW() - runstate->state_entry_time;
     if ( delta > 0 )
         runstate->time[runstate->state] += delta;
 
-    if ( unlikely(v != current) )
-        vcpu_schedule_unlock_irq(v);
+    if ( unlikely(lock != NULL) )
+        vcpu_schedule_unlock_irq(lock, v);
 }
 
 uint64_t get_cpu_idle_time(unsigned int cpu)
@@ -330,8 +328,7 @@ void sched_destroy_domain(struct domain *d)
 void vcpu_sleep_nosync(struct vcpu *v)
 {
     unsigned long flags;
-
-    vcpu_schedule_lock_irqsave(v, flags);
+    spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -341,7 +338,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
         SCHED_OP(VCPU2OP(v), sleep, v);
     }
 
-    vcpu_schedule_unlock_irqrestore(v, flags);
+    vcpu_schedule_unlock_irqrestore(lock, flags, v);
 
     TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
 }
@@ -359,8 +356,7 @@ void vcpu_sleep_sync(struct vcpu *v)
 void vcpu_wake(struct vcpu *v)
 {
     unsigned long flags;
-
-    vcpu_schedule_lock_irqsave(v, flags);
+    spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
 
     if ( likely(vcpu_runnable(v)) )
     {
@@ -374,7 +370,7 @@ void vcpu_wake(struct vcpu *v)
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
     }
 
-    vcpu_schedule_unlock_irqrestore(v, flags);
+    vcpu_schedule_unlock_irqrestore(lock, flags, v);
 
     TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
 }
@@ -525,10 +521,11 @@ static void vcpu_migrate(struct vcpu *v)
  */
 void vcpu_force_reschedule(struct vcpu *v)
 {
-    vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = vcpu_schedule_lock_irq(v);
+
     if ( v->is_running )
         set_bit(_VPF_migrating, &v->pause_flags);
-    vcpu_schedule_unlock_irq(v);
+    vcpu_schedule_unlock_irq(lock, v);
 
     if ( test_bit(_VPF_migrating, &v->pause_flags) )
     {
@@ -543,7 +540,7 @@ void restore_vcpu_affinity(struct domain *d)
 
     for_each_vcpu ( d, v )
     {
-        vcpu_schedule_lock_irq(v);
+        spinlock_t *lock = vcpu_schedule_lock_irq(v);
 
         if ( v->affinity_broken )
         {
@@ -556,13 +553,13 @@ void restore_vcpu_affinity(struct domain *d)
         if ( v->processor == smp_processor_id() )
         {
             set_bit(_VPF_migrating, &v->pause_flags);
-            vcpu_schedule_unlock_irq(v);
+            vcpu_schedule_unlock_irq(lock, v);
             vcpu_sleep_nosync(v);
             vcpu_migrate(v);
         }
         else
         {
-            vcpu_schedule_unlock_irq(v);
+            vcpu_schedule_unlock_irq(lock, v);
         }
     }
 
@@ -589,7 +586,7 @@ int cpu_disable_scheduler(unsigned int cpu)
     {
         for_each_vcpu ( d, v )
         {
-            vcpu_schedule_lock_irq(v);
+            spinlock_t *lock = vcpu_schedule_lock_irq(v);
 
             cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
@@ -610,13 +607,13 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor == cpu )
             {
                 set_bit(_VPF_migrating, &v->pause_flags);
-                vcpu_schedule_unlock_irq(v);
+                vcpu_schedule_unlock_irq(lock, v);
                 vcpu_sleep_nosync(v);
                 vcpu_migrate(v);
             }
             else
             {
-                vcpu_schedule_unlock_irq(v);
+                vcpu_schedule_unlock_irq(lock, v);
             }
 
             /*
@@ -638,6 +635,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t 
*affinity)
 {
     cpumask_t online_affinity;
     cpumask_t *online;
+    spinlock_t *lock;
 
     if ( v->domain->is_pinned )
         return -EINVAL;
@@ -646,14 +644,14 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t 
*affinity)
     if ( cpumask_empty(&online_affinity) )
         return -EINVAL;
 
-    vcpu_schedule_lock_irq(v);
+    lock = vcpu_schedule_lock_irq(v);
 
     cpumask_copy(v->cpu_affinity, affinity);
     if ( VCPU2OP(v)->sched_id == XEN_SCHEDULER_SEDF ||
          !cpumask_test_cpu(v->processor, v->cpu_affinity) )
         set_bit(_VPF_migrating, &v->pause_flags);
 
-    vcpu_schedule_unlock_irq(v);
+    vcpu_schedule_unlock_irq(lock, v);
 
     domain_update_node_affinity(v->domain);
 
@@ -764,10 +762,10 @@ static long do_poll(struct sched_poll *sched_poll)
 static long do_yield(void)
 {
     struct vcpu * v=current;
+    spinlock_t *lock = vcpu_schedule_lock_irq(v);
 
-    vcpu_schedule_lock_irq(v);
     SCHED_OP(VCPU2OP(v), yield, v);
-    vcpu_schedule_unlock_irq(v);
+    vcpu_schedule_unlock_irq(lock, v);
 
     TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
     raise_softirq(SCHEDULE_SOFTIRQ);
@@ -1126,6 +1124,7 @@ static void schedule(void)
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
     struct schedule_data *sd;
+    spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
 
@@ -1152,7 +1151,7 @@ static void schedule(void)
         BUG();
     }
 
-    pcpu_schedule_lock_irq(cpu);
+    lock = pcpu_schedule_lock_irq(cpu);
 
     stop_timer(&sd->s_timer);
     
@@ -1169,7 +1168,7 @@ static void schedule(void)
 
     if ( unlikely(prev == next) )
     {
-        pcpu_schedule_unlock_irq(cpu);
+        pcpu_schedule_unlock_irq(lock, cpu);
         trace_continue_running(next);
         return continue_running(prev);
     }
@@ -1207,7 +1206,7 @@ static void schedule(void)
     ASSERT(!next->is_running);
     next->is_running = 1;
 
-    pcpu_schedule_unlock_irq(cpu);
+    pcpu_schedule_unlock_irq(lock, cpu);
 
     perfc_incr(sched_ctx);
 
@@ -1396,6 +1395,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
 {
     unsigned long flags;
     struct vcpu *idle;
+    spinlock_t *lock;
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
@@ -1414,7 +1414,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
         return -ENOMEM;
     }
 
-    pcpu_schedule_lock_irqsave(cpu, flags);
+    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
     SCHED_OP(old_ops, tick_suspend, cpu);
     vpriv_old = idle->sched_priv;
@@ -1425,7 +1425,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     SCHED_OP(new_ops, tick_resume, cpu);
     SCHED_OP(new_ops, insert_vcpu, idle);
 
-    pcpu_schedule_unlock_irqrestore(cpu, flags);
+    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 
     SCHED_OP(old_ops, free_vdata, vpriv_old);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
@@ -1483,10 +1483,11 @@ void schedule_dump(struct cpupool *c)
 
     for_each_cpu (i, cpus)
     {
-        pcpu_schedule_lock(i);
+        spinlock_t *lock = pcpu_schedule_lock(i);
+
         printk("CPU[%02d] ", i);
         SCHED_OP(sched, dump_cpu_state, i);
-        pcpu_schedule_unlock(i);
+        pcpu_schedule_unlock(lock, i);
     }
 }
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index c3be9dc..febd3c1 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -45,96 +45,70 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 
-static inline spinlock_t * pcpu_schedule_lock(int cpu)
-{
-    spinlock_t * lock=NULL;
-
-    for ( ; ; )
-    {
-        /* The per_cpu(v->processor) may also change, if changing
-         * cpu pool also changes the scheduler lock.  Retry
-         * until they match.
-         */
-        lock=per_cpu(schedule_data, cpu).schedule_lock;
-
-        spin_lock(lock);
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
-            break;
-        spin_unlock(lock);
-    }
-    return lock;
+#define sched_lock(kind, param, cpu, irq, arg...) \
+static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
+{ \
+    for ( ; ; ) \
+    { \
+        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        /* \
+         * v->processor may change when grabbing the lock; but \
+         * per_cpu(v->processor) may also change, if changing cpu pool \
+         * also changes the scheduler lock.  Retry until they match. \
+         * \
+         * It may also be the case that v->processor may change but the \
+         * lock may be the same; this will succeed in that case. \
+         */ \
+        spin_lock##irq(lock, ## arg); \
+        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+            return lock; \
+        spin_unlock##irq(lock, ## arg); \
+    } \
 }
 
-static inline int pcpu_schedule_trylock(int cpu)
-{
-    spinlock_t * lock=NULL;
-
-    lock=per_cpu(schedule_data, cpu).schedule_lock;
-    if ( ! spin_trylock(lock) )
-        return 0;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
-        return 1;
-    else
-    {
-        spin_unlock(lock);
-        return 0;
-    }
-}
-
-#define pcpu_schedule_lock_irq(p) \
-    do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
-#define pcpu_schedule_lock_irqsave(p, flags) \
-    do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
-
-static inline void pcpu_schedule_unlock(int cpu)
-{
-    spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+#define sched_unlock(kind, param, cpu, irq, arg...) \
+static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
+                                               EXTRA_TYPE(arg), param) \
+{ \
+    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    spin_unlock##irq(lock, ## arg); \
 }
 
-#define pcpu_schedule_unlock_irq(p) \
-    do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
-#define pcpu_schedule_unlock_irqrestore(p, flags) \
-    do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
-
-static inline void vcpu_schedule_lock(struct vcpu *v)
+#define EXTRA_TYPE(arg)
+sched_lock(pcpu, unsigned int cpu,     cpu, )
+sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(pcpu, unsigned int cpu,     cpu,          _irq)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(pcpu, unsigned int cpu,     cpu, )
+sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(pcpu, unsigned int cpu,     cpu,          _irq)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+#undef EXTRA_TYPE
+
+#define EXTRA_TYPE(arg) , unsigned long arg
+#define spin_unlock_irqsave spin_unlock_irqrestore
+sched_lock(pcpu, unsigned int cpu,     cpu,          _irqsave, *flags)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+#undef spin_unlock_irqsave
+sched_unlock(pcpu, unsigned int cpu,     cpu,          _irqrestore, flags)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+#undef EXTRA_TYPE
+
+#undef sched_unlock
+#undef sched_lock
+
+static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t * lock;
-
-    for ( ; ; )
-    {
-        /* v->processor may change when grabbing the lock; but
-         * per_cpu(v->processor) may also change, if changing
-         * cpu pool also changes the scheduler lock.  Retry
-         * until they match.
-         *
-         * It may also be the case that v->processor may change
-         * but the lock may be the same; this will succeed
-         * in that case.
-         */
-        lock=per_cpu(schedule_data, v->processor).schedule_lock;
-
-        spin_lock(lock);
-        if ( likely(lock == per_cpu(schedule_data, 
v->processor).schedule_lock) )
-            break;
-        spin_unlock(lock);
-    }
-}
-
-#define vcpu_schedule_lock_irq(v) \
-    do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
-#define vcpu_schedule_lock_irqsave(v, flags) \
-    do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
+    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
 
-static inline void vcpu_schedule_unlock(struct vcpu *v)
-{
-    spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
+    if ( !spin_trylock(lock) )
+        return NULL;
+    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+        return lock;
+    spin_unlock(lock);
+    return NULL;
 }
 
-#define vcpu_schedule_unlock_irq(v) \
-    do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
-#define vcpu_schedule_unlock_irqrestore(v, flags) \
-    do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
-
 struct task_slice {
     struct vcpu *task;
     s_time_t     time;
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.2

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.