[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/tools: improve tracing of Credit2 load tracking events



commit f10d19329a3caba200e4d20f17cadc9cf509eba3
Author:     Dario Faggioli <dario.faggioli@xxxxxxxxxx>
AuthorDate: Fri Jul 15 16:49:33 2016 +0200
Commit:     George Dunlap <george.dunlap@xxxxxxxxxx>
CommitDate: Mon Jul 18 17:52:47 2016 +0100

    xen/tools: improve tracing of Credit2 load tracking events
    
    Add the shift used for the precision of the integer
    arithmetic to the trace records, and update both xenalyze
    and xentrace_format to make use of/print it.
    
    In particular, in xenalyze, we are can now show the
    load as a (easier to interpreet) percentage.
    
    Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 tools/xentrace/formats     |  4 ++--
 tools/xentrace/xenalyze.c  | 25 ++++++++++++++++++-------
 xen/common/sched_credit2.c | 11 +++++++----
 3 files changed, 27 insertions(+), 13 deletions(-)

diff --git a/tools/xentrace/formats b/tools/xentrace/formats
index d204351..2e58d03 100644
--- a/tools/xentrace/formats
+++ b/tools/xentrace/formats
@@ -53,8 +53,8 @@
 0x00022208  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:sched_tasklet
 0x00022209  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:update_load
 0x0002220a  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:runq_assign    [ 
dom:vcpu = 0x%(1)08x, rq_id = %(2)d ]
-0x0002220b  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:updt_vcpu_load [ 
dom:vcpu = 0x%(1)08x, avgload = %(2)d ]
-0x0002220c  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:updt_runq_load [ 
rq_load[4]:rq_avgload[28] = 0x%(1)08x, rq_id[4]:b_avgload[28] = 0x%(2)08x ]
+0x0002220b  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:updt_vcpu_load [ 
dom:vcpu = 0x%(3)08x, vcpuload = 0x%(2)08x%(1)08x, wshift = %(4)d ]
+0x0002220c  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  csched2:updt_runq_load [ 
rq_load[16]:rq_id[8]:wshift[8] = 0x%(5)08x, rq_avgload = 0x%(2)08x%(1)08x, 
b_avgload = 0x%(4)08x%(3)08x ]
 
 0x00022801  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  rtds:tickle        [ cpu = 
%(1)d ]
 0x00022802  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  rtds:runq_pick     [ dom:vcpu 
= 0x%(1)08x, cur_deadline = 0x%(3)08x%(2)08x, cur_budget = 0x%(5)08x%(4)08x ]
diff --git a/tools/xentrace/xenalyze.c b/tools/xentrace/xenalyze.c
index 01ead8b..f2f97bd 100644
--- a/tools/xentrace/xenalyze.c
+++ b/tools/xentrace/xenalyze.c
@@ -7802,25 +7802,36 @@ void sched_process(struct pcpu_info *p)
         case TRC_SCHED_CLASS_EVT(CSCHED2, 11): /* UPDATE_VCPU_LOAD */
             if(opt.dump_all) {
                 struct {
+                    uint64_t vcpuload;
                     unsigned int vcpuid:16, domid:16;
-                    unsigned int avgload;
+                    unsigned int shift;
                 } *r = (typeof(r))ri->d;
+                double vcpuload;
 
-                printf(" %s csched2:update_vcpu_load d%uv%u, avg_load = %u\n",
-                       ri->dump_header, r->domid, r->vcpuid, r->avgload);
+                vcpuload = (r->vcpuload * 100.0) / (1ULL << r->shift);
+
+                printf(" %s csched2:update_vcpu_load d%uv%u, "
+                       "vcpu_load = %4.3f%% (%"PRIu64")\n",
+                       ri->dump_header, r->domid, r->vcpuid, vcpuload,
+                       r->vcpuload);
             }
             break;
         case TRC_SCHED_CLASS_EVT(CSCHED2, 12): /* UPDATE_RUNQ_LOAD */
             if(opt.dump_all) {
                 struct {
-                    unsigned int rq_load:4, rq_avgload:28;
-                    unsigned int rq_id:4, b_avgload:28;
+                    uint64_t rq_avgload, b_avgload;
+                    unsigned int rq_load:16, rq_id:8, shift:8;
                 } *r = (typeof(r))ri->d;
+                double avgload, b_avgload;
+
+                avgload = (r->rq_avgload * 100.0) / (1ULL << r->shift);
+                b_avgload = (r->b_avgload * 100.0) / (1ULL << r->shift);
 
                 printf(" %s csched2:update_rq_load rq# %u, load = %u, "
-                       "avgload = %u, b_avgload = %u\n",
+                       "avgload = %4.3f%% (%"PRIu64"), "
+                       "b_avgload = %4.3f%% (%"PRIu64")\n",
                        ri->dump_header, r->rq_id, r->rq_load,
-                       r->rq_avgload, r->b_avgload);
+                       avgload, r->rq_avgload, b_avgload, r->b_avgload);
             }
             break;
         /* RTDS (TRC_RTDS_xxx) */
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index e695f1b..2978eac 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -635,13 +635,14 @@ __update_runq_load(const struct scheduler *ops,
 
     {
         struct {
-            unsigned rq_load:4, rq_avgload:28;
-            unsigned rq_id:4, b_avgload:28;
+            uint64_t rq_avgload, b_avgload;
+            unsigned rq_load:16, rq_id:8, shift:8;
         } d;
-        d.rq_id=rqd->id;
+        d.rq_id = rqd->id;
         d.rq_load = rqd->load;
         d.rq_avgload = rqd->avgload;
         d.b_avgload = rqd->b_avgload;
+        d.shift = P;
         trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1,
                   sizeof(d),
                   (unsigned char *)&d);
@@ -689,12 +690,14 @@ __update_svc_load(const struct scheduler *ops,
 
     {
         struct {
+            uint64_t v_avgload;
             unsigned vcpu:16, dom:16;
-            unsigned v_avgload:32;
+            unsigned shift;
         } d;
         d.dom = svc->vcpu->domain->domain_id;
         d.vcpu = svc->vcpu->vcpu_id;
         d.v_avgload = svc->avgload;
+        d.shift = P;
         trace_var(TRC_CSCHED2_UPDATE_VCPU_LOAD, 1,
                   sizeof(d),
                   (unsigned char *)&d);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.