[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 05/10] xen/cpumask: Introduce a CPUMASK_PR() wrapper for printing



Having to specify 'nr_cpu_id, cpumask_bits(foo)' for all printing operations
is quite repetative.  Introduce a wrapper to help.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
CC: Juergen Gross <jgross@xxxxxxxx>

v3:
 * New

Juergen: If this is too disruptive to your core schedulling series, I can
defer it.  Its just API cleanup patching the subsequent patch.
---
 xen/arch/x86/cpu/mcheck/mce.c |  2 +-
 xen/arch/x86/crash.c          |  2 +-
 xen/arch/x86/io_apic.c        |  6 ++----
 xen/arch/x86/irq.c            |  5 ++---
 xen/arch/x86/sysctl.c         |  3 +--
 xen/common/cpupool.c          |  7 +++----
 xen/common/keyhandler.c       |  8 ++++----
 xen/common/sched_credit.c     |  6 +++---
 xen/common/sched_credit2.c    | 12 ++++++------
 xen/common/sched_null.c       |  7 +++----
 xen/common/sched_rt.c         |  3 +--
 xen/include/xen/cpumask.h     |  6 ++++++
 12 files changed, 33 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 2a9747ed19..28ad7dd659 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -547,7 +547,7 @@ void mcheck_cmn_handler(const struct cpu_user_regs *regs)
 
             snprintf(ebuf, sizeof(ebuf),
                      "MCE: Fatal error happened on CPUs %*pb",
-                     nr_cpu_ids, cpumask_bits(&mce_fatal_cpus));
+                     CPUMASK_PR(&mce_fatal_cpus));
 
             mc_panic(ebuf);
         }
diff --git a/xen/arch/x86/crash.c b/xen/arch/x86/crash.c
index a9f3e1890c..32132e4cb9 100644
--- a/xen/arch/x86/crash.c
+++ b/xen/arch/x86/crash.c
@@ -160,7 +160,7 @@ static void nmi_shootdown_cpus(void)
         printk("Shot down all CPUs\n");
     else
         printk("Failed to shoot down CPUs {%*pbl}\n",
-               nr_cpu_ids, cpumask_bits(&waiting_to_crash));
+               CPUMASK_PR(&waiting_to_crash));
 
     /*
      * Try to crash shutdown IOMMU functionality as some old crashdump
diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index f93f711051..5d25862bd8 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -2238,8 +2238,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int 
irq, int edge_level, int a
         SET_DEST(entry, logical, cpu_mask_to_apicid(mask));
     } else {
         printk(XENLOG_ERR "IRQ%d: no target CPU (%*pb vs %*pb)\n",
-               irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
-               nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+               irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
         desc->status |= IRQ_DISABLED;
     }
 
@@ -2437,8 +2436,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned 
int reg, u32 val)
     else
     {
         gprintk(XENLOG_ERR, "IRQ%d: no target CPU (%*pb vs %*pb)\n",
-               irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
-               nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+               irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
         desc->status |= IRQ_DISABLED;
         rte.mask = 1;
     }
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 668a1f5b36..0ee33464d2 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2398,8 +2398,7 @@ static void dump_irqs(unsigned char key)
         spin_lock_irqsave(&desc->lock, flags);
 
         printk("   IRQ:%4d aff:{%*pbl}/{%*pbl} vec:%02x %-15s status=%03x ",
-               irq, nr_cpu_ids, cpumask_bits(desc->affinity),
-               nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+               irq, CPUMASK_PR(desc->affinity), 
CPUMASK_PR(desc->arch.cpu_mask),
                desc->arch.vector, desc->handler->typename, desc->status);
 
         if ( ssid )
@@ -2563,7 +2562,7 @@ void fixup_irqs(const cpumask_t *mask, bool verbose)
             printk("Cannot set affinity for IRQ%u\n", irq);
         else if ( break_affinity )
             printk("Broke affinity for IRQ%u, new: %*pb\n",
-                   irq, nr_cpu_ids, cpumask_bits(affinity));
+                   irq, CPUMASK_PR(affinity));
     }
 
     /* That doesn't seem sufficient.  Give it 1ms. */
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index 3f06fecbd8..c50d910a1c 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -150,8 +150,7 @@ static long smt_up_down_helper(void *data)
 
     if ( !ret )
         printk(XENLOG_INFO "SMT %s - online CPUs 0x%*pb\n",
-               up ? "enabled" : "disabled",
-               nr_cpu_ids, cpumask_bits(&cpu_online_map));
+               up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
 
     return ret;
 }
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 31ac323e40..f90e496eda 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -712,18 +712,17 @@ void dump_runq(unsigned char key)
             sched_smt_power_savings? "enabled":"disabled");
     printk("NOW=%"PRI_stime"\n", now);
 
-    printk("Online Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits(&cpu_online_map));
+    printk("Online Cpus: %*pbl\n", CPUMASK_PR(&cpu_online_map));
     if ( !cpumask_empty(&cpupool_free_cpus) )
     {
-        printk("Free Cpus: %*pbl\n",
-               nr_cpu_ids, cpumask_bits(&cpupool_free_cpus));
+        printk("Free Cpus: %*pbl\n", CPUMASK_PR(&cpupool_free_cpus));
         schedule_dump(NULL);
     }
 
     for_each_cpupool(c)
     {
         printk("Cpupool %d:\n", (*c)->cpupool_id);
-        printk("Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits((*c)->cpu_valid));
+        printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
         schedule_dump(*c);
     }
 
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 4f4a660b0c..a5e95e2fe9 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -272,8 +272,8 @@ static void dump_domains(unsigned char key)
         printk("    nr_pages=%d xenheap_pages=%d shared_pages=%u 
paged_pages=%u "
                "dirty_cpus={%*pbl} max_pages=%u\n",
                d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages),
-               atomic_read(&d->paged_pages), nr_cpu_ids,
-               cpumask_bits(d->dirty_cpumask), d->max_pages);
+               atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
+               d->max_pages);
         printk("    handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
                "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
                d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
@@ -312,8 +312,8 @@ static void dump_domains(unsigned char key)
                 printk("dirty_cpu=%u", v->dirty_cpu);
             printk("\n");
             printk("    cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n",
-                   nr_cpu_ids, cpumask_bits(v->cpu_hard_affinity),
-                   nr_cpu_ids, cpumask_bits(v->cpu_soft_affinity));
+                   CPUMASK_PR(v->cpu_hard_affinity),
+                   CPUMASK_PR(v->cpu_soft_affinity));
             printk("    pause_count=%d pause_flags=%lx\n",
                    atomic_read(&v->pause_count), v->pause_flags);
             arch_dump_vcpu_info(v);
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 3c0d7c7267..81dee5e472 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -2057,8 +2057,8 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
 
     printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
            cpu, spc->nr_runnable, spc->runq_sort_last,
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+           CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+           CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
     svc = CSCHED_VCPU(curr_on_cpu(cpu));
@@ -2119,7 +2119,7 @@ csched_dump(const struct scheduler *ops)
            prv->ticks_per_tslice,
            prv->vcpu_migr_delay/ MICROSECS(1));
 
-    printk("idlers: %*pb\n", nr_cpu_ids, cpumask_bits(prv->idlers));
+    printk("idlers: %*pb\n", CPUMASK_PR(prv->idlers));
 
     printk("active vcpus:\n");
     loop = 0;
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 8e4381d8a7..33fc86ffb2 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3653,8 +3653,8 @@ dump_pcpu(const struct scheduler *ops, int cpu)
 
     printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
            cpu, c2r(cpu),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+           CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+           CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
     svc = csched2_vcpu(curr_on_cpu(cpu));
@@ -3698,7 +3698,7 @@ csched2_dump(const struct scheduler *ops)
                "\taveload            = %"PRI_stime" (~%"PRI_stime"%%)\n",
                i,
                cpumask_weight(&prv->rqd[i].active),
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].active),
+               CPUMASK_PR(&prv->rqd[i].active),
                prv->rqd[i].max_weight,
                prv->rqd[i].pick_bias,
                prv->rqd[i].load,
@@ -3708,9 +3708,9 @@ csched2_dump(const struct scheduler *ops)
         printk("\tidlers: %*pb\n"
                "\ttickled: %*pb\n"
                "\tfully idle cores: %*pb\n",
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle),
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].tickled),
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].smt_idle));
+               CPUMASK_PR(&prv->rqd[i].idle),
+               CPUMASK_PR(&prv->rqd[i].tickled),
+               CPUMASK_PR(&prv->rqd[i].smt_idle));
     }
 
     printk("Domain info:\n");
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index c47c1b5aae..5aec9f17bd 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -782,9 +782,8 @@ static void null_dump_pcpu(const struct scheduler *ops, int 
cpu)
     lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
     printk("CPU[%02d] sibling=%*pb, core=%*pb",
-           cpu,
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+           cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+           CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
     if ( per_cpu(npc, cpu).vcpu != NULL )
         printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
     printk("\n");
@@ -810,7 +809,7 @@ static void null_dump(const struct scheduler *ops)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    printk("\tcpus_free = %*pbl\n", nr_cpu_ids, cpumask_bits(&prv->cpus_free));
+    printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
 
     printk("Domain info:\n");
     loop = 0;
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 0acfc3d702..e0e350bdf3 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -344,8 +344,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
             has_extratime(svc),
             vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
-            svc->flags,
-            nr_cpu_ids, cpumask_bits(mask));
+            svc->flags, CPUMASK_PR(mask));
 }
 
 static void
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 478ec66e5b..ca712efe57 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -61,6 +61,12 @@
 
 typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
+/*
+ * printf arguments for a cpumask.  Shorthand for using '%*pb[l]' when
+ * printing a cpumask.
+ */
+#define CPUMASK_PR(src) nr_cpu_ids, cpumask_bits(src)
+
 extern unsigned int nr_cpu_ids;
 
 #if NR_CPUS > 4 * BITS_PER_LONG
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.