[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: sched_credit: define and use curr_on_cpu(cpu)


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Tue, 18 Dec 2012 21:22:08 +0000
  • Delivery-date: Tue, 18 Dec 2012 21:22:17 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Dario Faggioli <dario.faggioli@xxxxxxxxxx>
# Date 1355836647 0
# Node ID d5c0389bf26c89969ebce71927f34f6b923af949
# Parent  e2a3d22f2c7daa9008609d6aeda2ca378501cf31
xen: sched_credit: define and use curr_on_cpu(cpu)

To fetch `per_cpu(schedule_data,cpu).curr' in a more readable
way. It's in sched-if.h as that is where `struct schedule_data'
is declared.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r e2a3d22f2c7d -r d5c0389bf26c xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Tue Dec 18 14:08:55 2012 +0100
+++ b/xen/common/sched_credit.c Tue Dec 18 13:17:27 2012 +0000
@@ -228,7 +228,7 @@ static void burn_credits(struct csched_v
     unsigned int credits;
 
     /* Assert svc is current */
-    ASSERT(svc==CSCHED_VCPU(per_cpu(schedule_data, 
svc->vcpu->processor).curr));
+    ASSERT( svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor)) );
 
     if ( (delta = now - svc->start_time) <= 0 )
         return;
@@ -246,8 +246,7 @@ DEFINE_PER_CPU(unsigned int, last_tickle
 static inline void
 __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
 {
-    struct csched_vcpu * const cur =
-        CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
+    struct csched_vcpu * const cur = CSCHED_VCPU(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask;
 
@@ -371,7 +370,7 @@ csched_alloc_pdata(const struct schedule
         per_cpu(schedule_data, cpu).sched_priv = spc;
 
     /* Start off idling... */
-    BUG_ON(!is_idle_vcpu(per_cpu(schedule_data, cpu).curr));
+    BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
     cpumask_set_cpu(cpu, prv->idlers);
 
     spin_unlock_irqrestore(&prv->lock, flags);
@@ -709,7 +708,7 @@ csched_vcpu_sleep(const struct scheduler
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( per_cpu(schedule_data, vc->processor).curr == vc )
+    if ( curr_on_cpu(vc->processor) == vc )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
     else if ( __vcpu_on_runq(svc) )
         __runq_remove(svc);
@@ -723,7 +722,7 @@ csched_vcpu_wake(const struct scheduler 
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(per_cpu(schedule_data, cpu).curr == vc) )
+    if ( unlikely(curr_on_cpu(cpu) == vc) )
     {
         SCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -1192,7 +1191,7 @@ static struct csched_vcpu *
 csched_runq_steal(int peer_cpu, int cpu, int pri)
 {
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
-    const struct vcpu * const peer_vcpu = per_cpu(schedule_data, 
peer_cpu).curr;
+    const struct vcpu * const peer_vcpu = curr_on_cpu(peer_cpu);
     struct csched_vcpu *speer;
     struct list_head *iter;
     struct vcpu *vc;
@@ -1480,7 +1479,7 @@ csched_dump_pcpu(const struct scheduler 
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
-    svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
+    svc = CSCHED_VCPU(curr_on_cpu(cpu));
     if ( svc )
     {
         printk("\trun: ");
diff -r e2a3d22f2c7d -r d5c0389bf26c xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Tue Dec 18 14:08:55 2012 +0100
+++ b/xen/include/xen/sched-if.h        Tue Dec 18 13:17:27 2012 +0000
@@ -41,6 +41,8 @@ struct schedule_data {
     atomic_t            urgent_count;   /* how many urgent vcpus           */
 };
 
+#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
+
 DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.