[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/sched: switch vcpu_schedule_lock to unit_schedule_lock



commit cc3186d57ed84c53bdd46b6cffdc7deb7011ce73
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Fri Sep 27 09:00:12 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Sep 27 15:19:48 2019 +0200

    xen/sched: switch vcpu_schedule_lock to unit_schedule_lock
    
    Rename vcpu_schedule_[un]lock[_irq]() to unit_schedule_[un]lock[_irq]()
    and let it take a sched_unit pointer instead of a vcpu pointer as
    parameter.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
 xen/common/sched_credit.c  | 17 +++++++++--------
 xen/common/sched_credit2.c | 40 ++++++++++++++++++++--------------------
 xen/common/sched_null.c    | 16 ++++++++--------
 xen/common/sched_rt.c      | 15 +++++++--------
 xen/common/schedule.c      | 45 +++++++++++++++++++++++----------------------
 xen/include/xen/sched-if.h | 12 ++++++------
 6 files changed, 73 insertions(+), 72 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 59a77e874b..d0e4ddc76b 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -926,7 +926,8 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
 static void
 csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
 {
-    struct csched_unit * const svc = CSCHED_UNIT(current->sched_unit);
+    struct sched_unit *currunit = current->sched_unit;
+    struct csched_unit * const svc = CSCHED_UNIT(currunit);
     const struct scheduler *ops = per_cpu(scheduler, cpu);
 
     ASSERT( current->processor == cpu );
@@ -962,7 +963,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int 
cpu)
     {
         unsigned int new_cpu;
         unsigned long flags;
-        spinlock_t *lock = vcpu_schedule_lock_irqsave(current, &flags);
+        spinlock_t *lock = unit_schedule_lock_irqsave(currunit, &flags);
 
         /*
          * If it's been active a while, check if we'd be better off
@@ -971,7 +972,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int 
cpu)
          */
         new_cpu = _csched_cpu_pick(ops, current, 0);
 
-        vcpu_schedule_unlock_irqrestore(lock, flags, current);
+        unit_schedule_unlock_irqrestore(lock, flags, currunit);
 
         if ( new_cpu != cpu )
         {
@@ -1023,19 +1024,19 @@ csched_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
     BUG_ON( is_idle_vcpu(vc) );
 
     /* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
 
     unit->res = csched_res_pick(ops, unit);
     vc->processor = unit->res->master_cpu;
 
     spin_unlock_irq(lock);
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
 
     if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
         runq_insert(svc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 
     SCHED_STAT_CRANK(vcpu_insert);
 }
@@ -2133,12 +2134,12 @@ csched_dump(const struct scheduler *ops)
             spinlock_t *lock;
 
             svc = list_entry(iter_svc, struct csched_unit, active_vcpu_elem);
-            lock = vcpu_schedule_lock(svc->vcpu);
+            lock = unit_schedule_lock(svc->vcpu->sched_unit);
 
             printk("\t%3d: ", ++loop);
             csched_dump_vcpu(svc);
 
-            vcpu_schedule_unlock(lock, svc->vcpu);
+            unit_schedule_unlock(lock, svc->vcpu->sched_unit);
         }
     }
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index ef0dd1d228..82d03a0683 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -171,7 +171,7 @@
  * - runqueue lock
  *  + it is per-runqueue, so:
  *   * cpus in a runqueue take the runqueue lock, when using
- *     pcpu_schedule_lock() / vcpu_schedule_lock() (and friends),
+ *     pcpu_schedule_lock() / unit_schedule_lock() (and friends),
  *   * a cpu may (try to) take a "remote" runqueue lock, e.g., for
  *     load balancing;
  *  + serializes runqueue operations (removing and inserting vcpus);
@@ -1891,7 +1891,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct 
list_head *vcpus)
         unsigned long flags;
         s_time_t now;
 
-        lock = vcpu_schedule_lock_irqsave(svc->vcpu, &flags);
+        lock = unit_schedule_lock_irqsave(svc->vcpu->sched_unit, &flags);
 
         __clear_bit(_VPF_parked, &svc->vcpu->pause_flags);
         if ( unlikely(svc->flags & CSFLAG_scheduled) )
@@ -1924,7 +1924,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct 
list_head *vcpus)
         }
         list_del_init(&svc->parked_elem);
 
-        vcpu_schedule_unlock_irqrestore(lock, flags, svc->vcpu);
+        unit_schedule_unlock_irqrestore(lock, flags, svc->vcpu->sched_unit);
     }
 }
 
@@ -2163,7 +2163,7 @@ csched2_context_saved(const struct scheduler *ops, struct 
sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
     struct csched2_unit * const svc = csched2_unit(unit);
-    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = unit_schedule_lock_irq(unit);
     s_time_t now = NOW();
     LIST_HEAD(were_parked);
 
@@ -2195,7 +2195,7 @@ csched2_context_saved(const struct scheduler *ops, struct 
sched_unit *unit)
     else if ( !is_idle_vcpu(vc) )
         update_load(ops, svc->rqd, svc, -1, now);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 
     unpark_parked_vcpus(ops, &were_parked);
 }
@@ -2848,14 +2848,14 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 struct csched2_unit *svc = csched2_unit(v->sched_unit);
-                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = unit_schedule_lock(svc->vcpu->sched_unit);
 
                 ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
 
                 svc->weight = sdom->weight;
                 update_max_weight(svc->rqd, svc->weight, old_weight);
 
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                unit_schedule_unlock(lock, svc->vcpu->sched_unit);
             }
         }
         /* Cap */
@@ -2886,7 +2886,7 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 svc = csched2_unit(v->sched_unit);
-                lock = vcpu_schedule_lock(svc->vcpu);
+                lock = unit_schedule_lock(svc->vcpu->sched_unit);
                 /*
                  * Too small quotas would in theory cause a lot of overhead,
                  * which then won't happen because, in csched2_runtime(),
@@ -2894,7 +2894,7 @@ csched2_dom_cntl(
                  */
                 svc->budget_quota = max(sdom->tot_budget / sdom->nr_vcpus,
                                         CSCHED2_MIN_TIMER);
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                unit_schedule_unlock(lock, svc->vcpu->sched_unit);
             }
 
             if ( sdom->cap == 0 )
@@ -2929,7 +2929,7 @@ csched2_dom_cntl(
                 for_each_vcpu ( d, v )
                 {
                     svc = csched2_unit(v->sched_unit);
-                    lock = vcpu_schedule_lock(svc->vcpu);
+                    lock = unit_schedule_lock(svc->vcpu->sched_unit);
                     if ( v->is_running )
                     {
                         unsigned int cpu = v->processor;
@@ -2960,7 +2960,7 @@ csched2_dom_cntl(
                         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
                     }
                     svc->budget = 0;
-                    vcpu_schedule_unlock(lock, svc->vcpu);
+                    unit_schedule_unlock(lock, svc->vcpu->sched_unit);
                 }
             }
 
@@ -2976,12 +2976,12 @@ csched2_dom_cntl(
             for_each_vcpu ( d, v )
             {
                 struct csched2_unit *svc = csched2_unit(v->sched_unit);
-                spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+                spinlock_t *lock = unit_schedule_lock(svc->vcpu->sched_unit);
 
                 svc->budget = STIME_MAX;
                 svc->budget_quota = 0;
 
-                vcpu_schedule_unlock(lock, svc->vcpu);
+                unit_schedule_unlock(lock, svc->vcpu->sched_unit);
             }
             sdom->cap = 0;
             /*
@@ -3120,19 +3120,19 @@ csched2_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
     ASSERT(list_empty(&svc->runq_elem));
 
     /* csched2_res_pick() expects the pcpu lock to be held */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
 
     unit->res = csched2_res_pick(ops, unit);
     vc->processor = unit->res->master_cpu;
 
     spin_unlock_irq(lock);
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
 
     /* Add vcpu to runqueue of initial processor */
     runq_assign(ops, vc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 
     sdom->nr_vcpus++;
 
@@ -3162,11 +3162,11 @@ csched2_unit_remove(const struct scheduler *ops, struct 
sched_unit *unit)
     SCHED_STAT_CRANK(vcpu_remove);
 
     /* Remove from runqueue */
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
 
     runq_deassign(ops, vc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 
     svc->sdom->nr_vcpus--;
 }
@@ -3750,12 +3750,12 @@ csched2_dump(const struct scheduler *ops)
             struct csched2_unit * const svc = csched2_unit(v->sched_unit);
             spinlock_t *lock;
 
-            lock = vcpu_schedule_lock(svc->vcpu);
+            lock = unit_schedule_lock(svc->vcpu->sched_unit);
 
             printk("\t%3d: ", ++loop);
             csched2_dump_vcpu(prv, svc);
 
-            vcpu_schedule_unlock(lock, svc->vcpu);
+            unit_schedule_unlock(lock, svc->vcpu->sched_unit);
         }
     }
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index b95214601f..47d1b2ab56 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -309,7 +309,7 @@ pick_res(struct null_private *prv, const struct sched_unit 
*unit)
      * all the pCPUs are busy.
      *
      * In fact, there must always be something sane in v->processor, or
-     * vcpu_schedule_lock() and friends won't work. This is not a problem,
+     * unit_schedule_lock() and friends won't work. This is not a problem,
      * as we will actually assign the vCPU to the pCPU we return from here,
      * only if the pCPU is free.
      */
@@ -450,11 +450,11 @@ static void null_unit_insert(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = unit_schedule_lock_irq(unit);
 
     if ( unlikely(!is_vcpu_online(v)) )
     {
-        vcpu_schedule_unlock_irq(lock, v);
+        unit_schedule_unlock_irq(lock, unit);
         return;
     }
 
@@ -464,7 +464,7 @@ static void null_unit_insert(const struct scheduler *ops,
 
     spin_unlock(lock);
 
-    lock = vcpu_schedule_lock(v);
+    lock = unit_schedule_lock(unit);
 
     cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
@@ -513,7 +513,7 @@ static void null_unit_remove(const struct scheduler *ops,
 
     ASSERT(!is_idle_vcpu(v));
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = unit_schedule_lock_irq(unit);
 
     /* If offline, the vcpu shouldn't be assigned, nor in the waitqueue */
     if ( unlikely(!is_vcpu_online(v)) )
@@ -536,7 +536,7 @@ static void null_unit_remove(const struct scheduler *ops,
     vcpu_deassign(prv, v);
 
  out:
-    vcpu_schedule_unlock_irq(lock, v);
+    unit_schedule_unlock_irq(lock, unit);
 
     SCHED_STAT_CRANK(vcpu_remove);
 }
@@ -935,13 +935,13 @@ static void null_dump(const struct scheduler *ops)
             struct null_unit * const nvc = null_unit(v->sched_unit);
             spinlock_t *lock;
 
-            lock = vcpu_schedule_lock(nvc->vcpu);
+            lock = unit_schedule_lock(nvc->vcpu->sched_unit);
 
             printk("\t%3d: ", ++loop);
             dump_vcpu(prv, nvc);
             printk("\n");
 
-            vcpu_schedule_unlock(lock, nvc->vcpu);
+            unit_schedule_unlock(lock, nvc->vcpu->sched_unit);
         }
     }
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index a168668a70..da0a9c402f 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -177,7 +177,7 @@ static void repl_timer_handler(void *data);
 /*
  * System-wide private data, include global RunQueue/DepletedQ
  * Global lock is referenced by sched_res->schedule_lock from all
- * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
+ * physical cpus. It can be grabbed via unit_schedule_lock_irq()
  */
 struct rt_private {
     spinlock_t lock;            /* the global coarse-grained lock */
@@ -895,7 +895,7 @@ rt_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
     unit->res = rt_res_pick(ops, unit);
     vc->processor = unit->res->master_cpu;
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
 
     now = NOW();
     if ( now >= svc->cur_deadline )
@@ -908,7 +908,7 @@ rt_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
         if ( !vc->is_running )
             runq_insert(ops, svc);
     }
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 
     SCHED_STAT_CRANK(vcpu_insert);
 }
@@ -919,7 +919,6 @@ rt_unit_insert(const struct scheduler *ops, struct 
sched_unit *unit)
 static void
 rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct vcpu *vc = unit->vcpu_list;
     struct rt_unit * const svc = rt_unit(unit);
     struct rt_dom * const sdom = svc->sdom;
     spinlock_t *lock;
@@ -928,14 +927,14 @@ rt_unit_remove(const struct scheduler *ops, struct 
sched_unit *unit)
 
     BUG_ON( sdom == NULL );
 
-    lock = vcpu_schedule_lock_irq(vc);
+    lock = unit_schedule_lock_irq(unit);
     if ( vcpu_on_q(svc) )
         q_remove(svc);
 
     if ( vcpu_on_replq(svc) )
         replq_remove(ops,svc);
 
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 }
 
 /*
@@ -1330,7 +1329,7 @@ rt_context_saved(const struct scheduler *ops, struct 
sched_unit *unit)
 {
     struct vcpu *vc = unit->vcpu_list;
     struct rt_unit *svc = rt_unit(unit);
-    spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+    spinlock_t *lock = unit_schedule_lock_irq(unit);
 
     __clear_bit(__RTDS_scheduled, &svc->flags);
     /* not insert idle vcpu to runq */
@@ -1347,7 +1346,7 @@ rt_context_saved(const struct scheduler *ops, struct 
sched_unit *unit)
         replq_remove(ops, svc);
 
 out:
-    vcpu_schedule_unlock_irq(lock, vc);
+    unit_schedule_unlock_irq(lock, unit);
 }
 
 /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 67ccb78739..6c8fa38052 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -253,7 +253,8 @@ static inline void vcpu_runstate_change(
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 {
-    spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = likely(v == current)
+                       ? NULL : unit_schedule_lock_irq(v->sched_unit);
     s_time_t delta;
 
     memcpy(runstate, &v->runstate, sizeof(*runstate));
@@ -262,7 +263,7 @@ void vcpu_runstate_get(struct vcpu *v, struct 
vcpu_runstate_info *runstate)
         runstate->time[runstate->state] += delta;
 
     if ( unlikely(lock != NULL) )
-        vcpu_schedule_unlock_irq(lock, v);
+        unit_schedule_unlock_irq(lock, v->sched_unit);
 }
 
 uint64_t get_cpu_idle_time(unsigned int cpu)
@@ -478,7 +479,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         migrate_timer(&v->singleshot_timer, new_p);
         migrate_timer(&v->poll_timer, new_p);
 
-        lock = vcpu_schedule_lock_irq(v);
+        lock = unit_schedule_lock_irq(v->sched_unit);
 
         sched_set_affinity(v, &cpumask_all, &cpumask_all);
 
@@ -487,7 +488,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
-         * - use vcpu_schedule_unlock_irq().
+         * - use unit_schedule_unlock_irq().
          */
         spin_unlock_irq(lock);
 
@@ -586,11 +587,11 @@ void vcpu_sleep_nosync(struct vcpu *v)
 
     TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
 
-    lock = vcpu_schedule_lock_irqsave(v, &flags);
+    lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
 
     vcpu_sleep_nosync_locked(v);
 
-    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+    unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
 }
 
 void vcpu_sleep_sync(struct vcpu *v)
@@ -610,7 +611,7 @@ void vcpu_wake(struct vcpu *v)
 
     TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
 
-    lock = vcpu_schedule_lock_irqsave(v, &flags);
+    lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
 
     if ( likely(vcpu_runnable(v)) )
     {
@@ -624,7 +625,7 @@ void vcpu_wake(struct vcpu *v)
             vcpu_runstate_change(v, RUNSTATE_offline, NOW());
     }
 
-    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+    unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
 }
 
 void vcpu_unblock(struct vcpu *v)
@@ -692,9 +693,9 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
  * These steps are encapsulated in the following two functions; they
  * should be called like this:
  *
- *     lock = vcpu_schedule_lock_irq(v);
+ *     lock = unit_schedule_lock_irq(unit);
  *     vcpu_migrate_start(v);
- *     vcpu_schedule_unlock_irq(lock, v)
+ *     unit_schedule_unlock_irq(lock, unit)
  *     vcpu_migrate_finish(v);
  *
  * vcpu_migrate_finish() will do the work now if it can, or simply
@@ -813,7 +814,7 @@ void restore_vcpu_affinity(struct domain *d)
          * set v->processor of each of their vCPUs to something that will
          * make sense for the scheduler of the cpupool in which they are in.
          */
-        lock = vcpu_schedule_lock_irq(v);
+        lock = unit_schedule_lock_irq(v->sched_unit);
 
         cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
                     cpupool_domain_cpumask(d));
@@ -842,7 +843,7 @@ void restore_vcpu_affinity(struct domain *d)
         spin_unlock_irq(lock);
 
         /* v->processor might have changed, so reacquire the lock. */
-        lock = vcpu_schedule_lock_irq(v);
+        lock = unit_schedule_lock_irq(v->sched_unit);
         v->sched_unit->res = sched_pick_resource(vcpu_scheduler(v),
                                                  v->sched_unit);
         v->processor = v->sched_unit->res->master_cpu;
@@ -877,7 +878,7 @@ int cpu_disable_scheduler(unsigned int cpu)
         for_each_vcpu ( d, v )
         {
             unsigned long flags;
-            spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
+            spinlock_t *lock = unit_schedule_lock_irqsave(v->sched_unit, 
&flags);
 
             cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
@@ -886,7 +887,7 @@ int cpu_disable_scheduler(unsigned int cpu)
                 if ( v->affinity_broken )
                 {
                     /* The vcpu is temporarily pinned, can't move it. */
-                    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                    unit_schedule_unlock_irqrestore(lock, flags, 
v->sched_unit);
                     ret = -EADDRINUSE;
                     break;
                 }
@@ -899,7 +900,7 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor != cpu )
             {
                 /* The vcpu is not on this cpu, so we can move on. */
-                vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
                 continue;
             }
 
@@ -912,7 +913,7 @@ int cpu_disable_scheduler(unsigned int cpu)
              *    things would have failed before getting in here.
              */
             vcpu_migrate_start(v);
-            vcpu_schedule_unlock_irqrestore(lock, flags, v);
+            unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
 
             vcpu_migrate_finish(v);
 
@@ -976,7 +977,7 @@ static int vcpu_set_affinity(
     spinlock_t *lock;
     int ret = 0;
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = unit_schedule_lock_irq(v->sched_unit);
 
     if ( v->affinity_broken )
         ret = -EBUSY;
@@ -998,7 +999,7 @@ static int vcpu_set_affinity(
         vcpu_migrate_start(v);
     }
 
-    vcpu_schedule_unlock_irq(lock, v);
+    unit_schedule_unlock_irq(lock, v->sched_unit);
 
     domain_update_node_affinity(v->domain);
 
@@ -1130,10 +1131,10 @@ static long do_poll(struct sched_poll *sched_poll)
 long vcpu_yield(void)
 {
     struct vcpu * v=current;
-    spinlock_t *lock = vcpu_schedule_lock_irq(v);
+    spinlock_t *lock = unit_schedule_lock_irq(v->sched_unit);
 
     sched_yield(vcpu_scheduler(v), v->sched_unit);
-    vcpu_schedule_unlock_irq(lock, v);
+    unit_schedule_unlock_irq(lock, v->sched_unit);
 
     SCHED_STAT_CRANK(vcpu_yield);
 
@@ -1230,7 +1231,7 @@ int vcpu_temporary_affinity(struct vcpu *v, unsigned int 
cpu, uint8_t reason)
     int ret = -EINVAL;
     bool migrate;
 
-    lock = vcpu_schedule_lock_irq(v);
+    lock = unit_schedule_lock_irq(v->sched_unit);
 
     if ( cpu == NR_CPUS )
     {
@@ -1263,7 +1264,7 @@ int vcpu_temporary_affinity(struct vcpu *v, unsigned int 
cpu, uint8_t reason)
     if ( migrate )
         vcpu_migrate_start(v);
 
-    vcpu_schedule_unlock_irq(lock, v);
+    unit_schedule_unlock_irq(lock, v->sched_unit);
 
     if ( migrate )
         vcpu_migrate_finish(v);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 4dbf8f974c..f2c071358f 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -105,22 +105,22 @@ static inline void kind##_schedule_unlock##irq(spinlock_t 
*lock \
 
 #define EXTRA_TYPE(arg)
 sched_lock(pcpu, unsigned int cpu,     cpu, )
-sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(unit, const struct sched_unit *i, i->res->master_cpu, )
 sched_lock(pcpu, unsigned int cpu,     cpu,          _irq)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_lock(unit, const struct sched_unit *i, i->res->master_cpu, _irq)
 sched_unlock(pcpu, unsigned int cpu,     cpu, )
-sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(unit, const struct sched_unit *i, i->res->master_cpu, )
 sched_unlock(pcpu, unsigned int cpu,     cpu,          _irq)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(unit, const struct sched_unit *i, i->res->master_cpu, _irq)
 #undef EXTRA_TYPE
 
 #define EXTRA_TYPE(arg) , unsigned long arg
 #define spin_unlock_irqsave spin_unlock_irqrestore
 sched_lock(pcpu, unsigned int cpu,     cpu,          _irqsave, *flags)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+sched_lock(unit, const struct sched_unit *i, i->res->master_cpu, _irqsave, 
*flags)
 #undef spin_unlock_irqsave
 sched_unlock(pcpu, unsigned int cpu,     cpu,          _irqrestore, flags)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+sched_unlock(unit, const struct sched_unit *i, i->res->master_cpu, 
_irqrestore, flags)
 #undef EXTRA_TYPE
 
 #undef sched_unlock
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.