|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 6/7] xen: sched_credit2: more info when dumping
On 03/16/2015 05:05 PM, Dario Faggioli wrote:
> more specifically, for each runqueue, print what pCPUs
> belong to it, which ones are idle and which ones have
> been tickled.
>
> While there, also convert the whole file to use
> keyhandler_scratch for printing cpumask-s.
>
> Signed-off-b: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jan Beulich <JBeulich@xxxxxxxx>
> Cc: Keir Fraser <keir@xxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> ---
> xen/common/sched_credit2.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
> index 564f890..df29438 100644
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -25,6 +25,7 @@
> #include <xen/errno.h>
> #include <xen/trace.h>
> #include <xen/cpu.h>
> +#include <xen/keyhandler.h>
>
> #define d2printk(x...)
> //#define d2printk printk
> @@ -1804,7 +1805,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
> unsigned long flags;
> spinlock_t *lock;
> int loop;
> - char cpustr[100];
> +#define cpustr keyhandler_scratch
>
> /*
> * We need both locks:
> @@ -1845,6 +1846,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
>
> spin_unlock(lock);
> spin_unlock_irqrestore(&prv->lock, flags);
> +#undef cpustr
> }
>
> static void
> @@ -1854,6 +1856,7 @@ csched2_dump(const struct scheduler *ops)
> struct csched2_private *prv = CSCHED2_PRIV(ops);
> unsigned long flags;
> int i, loop;
> +#define cpustr keyhandler_scratch
>
> /* We need the private lock as we access global scheduler data
> * and (below) the list of active domains. */
> @@ -1869,17 +1872,24 @@ csched2_dump(const struct scheduler *ops)
>
> fraction = prv->rqd[i].avgload * 100 /
> (1ULL<<prv->load_window_shift);
>
> + cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].active);
> printk("Runqueue %d:\n"
> "\tncpus = %u\n"
> + "\tcpus = %s\n"
> "\tmax_weight = %d\n"
> "\tinstload = %d\n"
> "\taveload = %3"PRI_stime"\n",
> i,
> cpumask_weight(&prv->rqd[i].active),
> + cpustr,
> prv->rqd[i].max_weight,
> prv->rqd[i].load,
> fraction);
>
> + cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].idle);
> + printk("\tidlers: %s\n", cpustr);
> + cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].tickled);
> + printk("\ttickled: %s\n", cpustr);
> }
>
> printk("Domain info:\n");
> @@ -1910,6 +1920,7 @@ csched2_dump(const struct scheduler *ops)
> }
>
> spin_unlock_irqrestore(&prv->lock, flags);
> +#undef cpustr
> }
>
> static void activate_runqueue(struct csched2_private *prv, int rqi)
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |