[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/sched: Use %*pb[l] instead of cpumask_scn{, list}printf()



commit e80bc240e79ff545db75081e8b1f79f7364ef73c
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Sep 6 11:14:56 2018 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Fri Nov 16 16:25:54 2018 +0000

    xen/sched: Use %*pb[l] instead of cpumask_scn{,list}printf()
    
    This removes all use of keyhandler_scratch as a bounce-buffer for the 
rendered
    string.  In some cases, collapse combine adjacent printk()'s which are 
writing
    parts of the same line.
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    Acked-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
 xen/common/sched_credit.c  | 17 +++++------------
 xen/common/sched_credit2.c | 29 ++++++++++++-----------------
 xen/common/sched_null.c    | 15 +++++----------
 xen/common/sched_rt.c      |  5 ++---
 4 files changed, 24 insertions(+), 42 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 84e744bfe4..3abe20def8 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -2044,7 +2044,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spinlock_t *lock;
     unsigned long flags;
     int loop;
-#define cpustr keyhandler_scratch
 
     /*
      * We need both locks:
@@ -2059,11 +2058,10 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%s, ",
-           cpu, spc->nr_runnable, spc->runq_sort_last, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s\n", cpustr);
+    printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
+           cpu, spc->nr_runnable, spc->runq_sort_last,
+           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
+           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
     svc = CSCHED_VCPU(curr_on_cpu(cpu));
@@ -2086,7 +2084,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
 
     pcpu_schedule_unlock(lock, cpu);
     spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 static void
@@ -2099,8 +2096,6 @@ csched_dump(const struct scheduler *ops)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-#define idlers_buf keyhandler_scratch
-
     printk("info:\n"
            "\tncpus              = %u\n"
            "\tmaster             = %u\n"
@@ -2127,8 +2122,7 @@ csched_dump(const struct scheduler *ops)
            prv->ticks_per_tslice,
            prv->vcpu_migr_delay/ MICROSECS(1));
 
-    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
-    printk("idlers: %s\n", idlers_buf);
+    printk("idlers: %*pb\n", nr_cpu_ids, cpumask_bits(prv->idlers));
 
     printk("active vcpus:\n");
     loop = 0;
@@ -2151,7 +2145,6 @@ csched_dump(const struct scheduler *ops)
             vcpu_schedule_unlock(lock, svc->vcpu);
         }
     }
-#undef idlers_buf
 
     spin_unlock_irqrestore(&prv->lock, flags);
 }
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 2b16bcea21..2ce577cf22 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3654,12 +3654,11 @@ dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct csched2_private *prv = csched2_priv(ops);
     struct csched2_vcpu *svc;
-#define cpustr keyhandler_scratch
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(cpu), cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s\n", cpustr);
+    printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
+           cpu, c2r(cpu),
+           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
+           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
     svc = csched2_vcpu(curr_on_cpu(cpu));
@@ -3668,7 +3667,6 @@ dump_pcpu(const struct scheduler *ops, int cpu)
         printk("\trun: ");
         csched2_dump_vcpu(prv, svc);
     }
-#undef cpustr
 }
 
 static void
@@ -3678,7 +3676,6 @@ csched2_dump(const struct scheduler *ops)
     struct csched2_private *prv = csched2_priv(ops);
     unsigned long flags;
     unsigned int i, j, loop;
-#define cpustr keyhandler_scratch
 
     /*
      * We need the private scheduler lock as we access global
@@ -3696,29 +3693,28 @@ csched2_dump(const struct scheduler *ops)
 
         fraction = (prv->rqd[i].avgload * 100) >> prv->load_precision_shift;
 
-        cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].active);
         printk("Runqueue %d:\n"
                "\tncpus              = %u\n"
-               "\tcpus               = %s\n"
+               "\tcpus               = %*pbl\n"
                "\tmax_weight         = %u\n"
                "\tpick_bias          = %u\n"
                "\tinstload           = %d\n"
                "\taveload            = %"PRI_stime" (~%"PRI_stime"%%)\n",
                i,
                cpumask_weight(&prv->rqd[i].active),
-               cpustr,
+               nr_cpu_ids, cpumask_bits(&prv->rqd[i].active),
                prv->rqd[i].max_weight,
                prv->rqd[i].pick_bias,
                prv->rqd[i].load,
                prv->rqd[i].avgload,
                fraction);
 
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].idle);
-        printk("\tidlers: %s\n", cpustr);
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].tickled);
-        printk("\ttickled: %s\n", cpustr);
-        cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].smt_idle);
-        printk("\tfully idle cores: %s\n", cpustr);
+        printk("\tidlers: %*pb\n"
+               "\ttickled: %*pb\n"
+               "\tfully idle cores: %*pb\n",
+               nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle),
+               nr_cpu_ids, cpumask_bits(&prv->rqd[i].tickled),
+               nr_cpu_ids, cpumask_bits(&prv->rqd[i].smt_idle));
     }
 
     printk("Domain info:\n");
@@ -3779,7 +3775,6 @@ csched2_dump(const struct scheduler *ops)
     }
 
     read_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 static void *
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 7b039b777b..a59dbb2692 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -793,14 +793,13 @@ static void null_dump_pcpu(const struct scheduler *ops, 
int cpu)
     struct null_vcpu *nvc;
     spinlock_t *lock;
     unsigned long flags;
-#define cpustr keyhandler_scratch
 
     lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
-    printk("CPU[%02d] sibling=%s, ", cpu, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
-    printk("core=%s", cpustr);
+    printk("CPU[%02d] sibling=%*pb, core=%*pb",
+           cpu,
+           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
+           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
     if ( per_cpu(npc, cpu).vcpu != NULL )
         printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
     printk("\n");
@@ -815,7 +814,6 @@ static void null_dump_pcpu(const struct scheduler *ops, int 
cpu)
     }
 
     pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
-#undef cpustr
 }
 
 static void null_dump(const struct scheduler *ops)
@@ -824,12 +822,10 @@ static void null_dump(const struct scheduler *ops)
     struct list_head *iter;
     unsigned long flags;
     unsigned int loop;
-#define cpustr keyhandler_scratch
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->cpus_free);
-    printk("\tcpus_free = %s\n", cpustr);
+    printk("\tcpus_free = %*pbl\n", nr_cpu_ids, cpumask_bits(&prv->cpus_free));
 
     printk("Domain info:\n");
     loop = 0;
@@ -873,7 +869,6 @@ static void null_dump(const struct scheduler *ops)
     spin_unlock(&prv->waitq_lock);
 
     spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
 }
 
 const struct scheduler sched_null_def = {
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 59fbfa625f..f1b81f0373 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -328,11 +328,10 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
 
     cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
     cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
-    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
     printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
            " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
            " \t\t priority_level=%d has_extratime=%d\n"
-           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
+           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%*pbl\n",
             svc->vcpu->domain->domain_id,
             svc->vcpu->vcpu_id,
             svc->vcpu->processor,
@@ -346,7 +345,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
             vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
-            keyhandler_scratch);
+            nr_cpu_ids, cpumask_bits(mask));
 }
 
 static void
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.