[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: sched: improve debug dump output.



Scheduling information debug dump for Credit2 is hard
to read as it contains the same information repeated
multiple time in different ways.

In fact, in Credit2, CPUs are grouped in runqueus.
Here's the current debug output:

 CPU[00]  sibling=00000000,00000003, core=00000000,000000ff
    run: [32767.0] flags=0 cpu=0 credit=-1073741824 [w=0] load=0 (~0%)
      1: [0.3] flags=0 cpu=2 credit=3273410 [w=256] load=262144 (~100%)
      2: [0.4] flags=0 cpu=2 credit=2974954 [w=256] load=262144 (~100%)
 CPU[01]  sibling=00000000,00000003, core=00000000,000000ff
    run: [32767.1] flags=0 cpu=1 credit=-1073741824 [w=0] load=0 (~0%)
      1: [0.3] flags=0 cpu=2 credit=3273410 [w=256] load=262144 (~100%)
      2: [0.4] flags=0 cpu=2 credit=2974954 [w=256] load=262144 (~100%)
 CPU[02]  sibling=00000000,0000000c, core=00000000,000000ff
    run: [0.2] flags=2 cpu=2 credit=3556909 [w=256] load=262144 (~100%)
      1: [0.3] flags=0 cpu=2 credit=3273410 [w=256] load=262144 (~100%)
      2: [0.4] flags=0 cpu=2 credit=2974954 [w=256] load=262144 (~100%)

Here, CPUs 0, 1 and 2, are all part of runqueue 0,
the content of which (which, BTW, is d0v3 and d0v4)
is printed 3 times! It is also not very useful to
see the details of the idle vcpus, as they're always
the same (except for the vCPU ids).

With this change, we print:
 - pCPUs details and, for non idle ones, what vCPU
   they're running;
 - the runqueue content, once and for all.

 Runqueue 0:
 CPU[00] runq=0, sibling=00000000,00000003, core=00000000,000000ff
    run: [0.15] flags=2 cpu=0 credit=5804742 [w=256] load=3655 (~1%)
 CPU[01] runq=0, sibling=00000000,00000003, core=00000000,000000ff
 CPU[02] runq=0, sibling=00000000,0000000c, core=00000000,000000ff
    run: [0.3] flags=2 cpu=2 credit=6674856 [w=256] load=262144 (~100%)
 CPU[03] runq=0, sibling=00000000,0000000c, core=00000000,000000ff
 RUNQ:
      0: [0.1] flags=0 cpu=2 credit=6561215 [w=256] load=262144 (~100%)
      1: [0.2] flags=0 cpu=2 credit=5812356 [w=256] load=262144 (~100%)

Stop printing details of idle vCPUs also in Credit1
and RTDS (they're pretty useless in there too).

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx>
Cc: Meng Xu <mengxu@xxxxxxxxxxxxx>
---
 xen/common/sched_credit.c  |    6 ++--
 xen/common/sched_credit2.c |   72 +++++++++++++++++++++-----------------------
 xen/common/sched_rt.c      |    9 +++++-
 xen/common/schedule.c      |    7 ++--
 4 files changed, 49 insertions(+), 45 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index ad20819..7c0ff47 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1988,13 +1988,13 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     runq = &spc->runq;
 
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk(" sort=%d, sibling=%s, ", spc->runq_sort_last, cpustr);
+    printk("CPU[%02d] sort=%d, sibling=%s, ", cpu, spc->runq_sort_last, 
cpustr);
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
-    /* current VCPU */
+    /* current VCPU (nothing to say if that's the idle vcpu). */
     svc = CSCHED_VCPU(curr_on_cpu(cpu));
-    if ( svc )
+    if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
         csched_dump_vcpu(svc);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index b2f2b17..c4e2b9a 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -2627,56 +2627,25 @@ csched2_dump_vcpu(struct csched2_private *prv, struct 
csched2_vcpu *svc)
     printk("\n");
 }
 
-static void
-csched2_dump_pcpu(const struct scheduler *ops, int cpu)
+static inline void
+dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = CSCHED2_PRIV(ops);
-    struct list_head *runq, *iter;
     struct csched2_vcpu *svc;
-    unsigned long flags;
-    spinlock_t *lock;
-    int loop;
 #define cpustr keyhandler_scratch
 
-    /*
-     * We need both locks:
-     * - csched2_dump_vcpu() wants to access domains' weights,
-     *   which are protected by the private scheduler lock;
-     * - we scan through the runqueue, so we need the proper runqueue
-     *   lock (the one of the runqueue this cpu is associated to).
-     */
-    read_lock_irqsave(&prv->lock, flags);
-    lock = per_cpu(schedule_data, cpu).schedule_lock;
-    spin_lock(lock);
-
-    runq = &RQD(ops, cpu)->runq;
-
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk(" sibling=%s, ", cpustr);
+    printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(ops, cpu), cpustr);
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
-    /* current VCPU */
+    /* current VCPU (nothing to say if that's the idle vcpu) */
     svc = CSCHED2_VCPU(curr_on_cpu(cpu));
-    if ( svc )
+    if ( svc && !is_idle_vcpu(svc->vcpu) )
     {
         printk("\trun: ");
         csched2_dump_vcpu(prv, svc);
     }
-
-    loop = 0;
-    list_for_each( iter, runq )
-    {
-        svc = __runq_elem(iter);
-        if ( svc )
-        {
-            printk("\t%3d: ", ++loop);
-            csched2_dump_vcpu(prv, svc);
-        }
-    }
-
-    spin_unlock(lock);
-    read_unlock_irqrestore(&prv->lock, flags);
 #undef cpustr
 }
 
@@ -2686,7 +2655,7 @@ csched2_dump(const struct scheduler *ops)
     struct list_head *iter_sdom;
     struct csched2_private *prv = CSCHED2_PRIV(ops);
     unsigned long flags;
-    int i, loop;
+    unsigned int i, j, loop;
 #define cpustr keyhandler_scratch
 
     /*
@@ -2756,6 +2725,34 @@ csched2_dump(const struct scheduler *ops)
         }
     }
 
+    for_each_cpu(i, &prv->active_queues)
+    {
+        struct csched2_runqueue_data *rqd = prv->rqd + i;
+        struct list_head *iter, *runq = &rqd->runq;
+        int loop = 0;
+
+        /* We need the lock to scan the runqueue. */
+        spin_lock(&rqd->lock);
+
+        printk("Runqueue %d:\n", i);
+
+        for_each_cpu(j, &rqd->active)
+            dump_pcpu(ops, j);
+
+        printk("RUNQ:\n");
+        list_for_each( iter, runq )
+        {
+            struct csched2_vcpu *svc = __runq_elem(iter);
+
+            if ( svc )
+            {
+                printk("\t%3d: ", loop++);
+                csched2_dump_vcpu(prv, svc);
+            }
+        }
+        spin_unlock(&rqd->lock);
+    }
+
     read_unlock_irqrestore(&prv->lock, flags);
 #undef cpustr
 }
@@ -3100,7 +3097,6 @@ static const struct scheduler sched_credit2_def = {
     .do_schedule    = csched2_schedule,
     .context_saved  = csched2_context_saved,
 
-    .dump_cpu_state = csched2_dump_pcpu,
     .dump_settings  = csched2_dump,
     .init           = csched2_init,
     .deinit         = csched2_deinit,
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 24b4b22..f2d979c 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -320,10 +320,17 @@ static void
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
+    struct rt_vcpu *svc;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
-    rt_dump_vcpu(ops, rt_vcpu(curr_on_cpu(cpu)));
+    printk("CPU[%02d]\n", cpu);
+    /* current VCPU (nothing to say if that's the idle vcpu). */
+    svc = rt_vcpu(curr_on_cpu(cpu));
+    if ( svc && !is_idle_vcpu(svc->vcpu) )
+    {
+        rt_dump_vcpu(ops, svc);
+    }
     spin_unlock_irqrestore(&prv->lock, flags);
 }
 
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 43b5b99..e4320f3 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1844,10 +1844,11 @@ void schedule_dump(struct cpupool *c)
         cpus = &cpupool_free_cpus;
     }
 
-    for_each_cpu (i, cpus)
+    if ( sched->dump_cpu_state != NULL )
     {
-        printk("CPU[%02d] ", i);
-        SCHED_OP(sched, dump_cpu_state, i);
+        printk("CPUs info:\n");
+        for_each_cpu (i, cpus)
+            SCHED_OP(sched, dump_cpu_state, i);
     }
 }
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.