|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 14/19] xen: credit2: add yet some more tracing
(and fix the style of two labels as well.)
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx>
Cc: David Vrabel <david.vrabel@xxxxxxxxxx>
---
xen/common/sched_credit2.c | 58 +++++++++++++++++++++++++++++++++++++++++---
1 file changed, 54 insertions(+), 4 deletions(-)
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index ba3a78a..e9f3f13 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -46,6 +46,9 @@
#define TRC_CSCHED2_TICKLE_NEW TRC_SCHED_CLASS_EVT(CSCHED2, 13)
#define TRC_CSCHED2_RUNQ_MAX_WEIGHT TRC_SCHED_CLASS_EVT(CSCHED2, 14)
#define TRC_CSCHED2_MIGRATE TRC_SCHED_CLASS_EVT(CSCHED2, 15)
+#define TRC_CSCHED2_LOAD_CHECK TRC_SCHED_CLASS_EVT(CSCHED2, 16)
+#define TRC_CSCHED2_LOAD_BALANCE TRC_SCHED_CLASS_EVT(CSCHED2, 17)
+#define TRC_CSCHED2_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED2, 19)
/*
* WARNING: This is still in an experimental phase. Status and work can be
found at the
@@ -709,6 +712,8 @@ update_load(const struct scheduler *ops,
struct csched2_runqueue_data *rqd,
struct csched2_vcpu *svc, int change, s_time_t now)
{
+ trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0, NULL);
+
__update_runq_load(ops, rqd, change, now);
if ( svc )
__update_svc_load(ops, svc, change, now);
@@ -1484,6 +1489,23 @@ csched2_cpu_pick(const struct scheduler *ops, struct
vcpu *vc)
out_up:
spin_unlock(&prv->lock);
+ /* TRACE */
+ {
+ struct {
+ uint64_t b_avgload;
+ unsigned vcpu:16, dom:16;
+ unsigned rq_id:16, new_cpu:16;
+ } d;
+ d.b_avgload = prv->rqd[min_rqi].b_avgload;
+ d.dom = vc->domain->domain_id;
+ d.vcpu = vc->vcpu_id;
+ d.rq_id = c2r(ops, new_cpu);
+ d.new_cpu = new_cpu;
+ trace_var(TRC_CSCHED2_PICKED_CPU, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
+
return new_cpu;
}
@@ -1611,7 +1633,7 @@ static void balance_load(const struct scheduler *ops, int
cpu, s_time_t now)
bool_t inner_load_updated = 0;
balance_state_t st = { .best_push_svc = NULL, .best_pull_svc = NULL };
-
+
/*
* Basic algorithm: Push, pull, or swap.
* - Find the runqueue with the furthest load distance
@@ -1677,6 +1699,20 @@ static void balance_load(const struct scheduler *ops,
int cpu, s_time_t now)
if ( i > cpus_max )
cpus_max = i;
+ /* TRACE */
+ {
+ struct {
+ unsigned lrq_id:16, orq_id:16;
+ unsigned load_delta;
+ } d;
+ d.lrq_id = st.lrqd->id;
+ d.orq_id = st.orqd->id;
+ d.load_delta = st.load_delta;
+ trace_var(TRC_CSCHED2_LOAD_CHECK, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
+
/*
* If we're under 100% capacaty, only shift if load difference
* is > 1. otherwise, shift if under 12.5%
@@ -1705,6 +1741,21 @@ static void balance_load(const struct scheduler *ops,
int cpu, s_time_t now)
if ( unlikely(st.orqd->id < 0) )
goto out_up;
+ /* TRACE */
+ {
+ struct {
+ uint64_t lb_avgload, ob_avgload;
+ unsigned lrq_id:16, orq_id:16;
+ } d;
+ d.lrq_id = st.lrqd->id;
+ d.lb_avgload = st.lrqd->b_avgload;
+ d.orq_id = st.orqd->id;
+ d.ob_avgload = st.orqd->b_avgload;
+ trace_var(TRC_CSCHED2_LOAD_BALANCE, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
+
now = NOW();
/* Look for "swap" which gives the best load average
@@ -1756,10 +1807,9 @@ static void balance_load(const struct scheduler *ops,
int cpu, s_time_t now)
if ( st.best_pull_svc )
migrate(ops, st.best_pull_svc, st.lrqd, now);
-out_up:
+ out_up:
spin_unlock(&st.orqd->lock);
-
-out:
+ out:
return;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |