[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 11/14] xenalyze: handle scheduling events



so the trace will show properly decoded info,
rather than just a bunch of hex codes.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Olaf Hering <olaf@xxxxxxxxx>
---
 tools/xentrace/xenalyze.c |  156 ++++++++++++++++++++++++++++++++++++---------
 1 file changed, 126 insertions(+), 30 deletions(-)

diff --git a/tools/xentrace/xenalyze.c b/tools/xentrace/xenalyze.c
index 6520790..be698aa 100644
--- a/tools/xentrace/xenalyze.c
+++ b/tools/xentrace/xenalyze.c
@@ -1519,27 +1519,6 @@ struct pv_data {
 };
 
 /* Sched data */
-
-enum {
-    SCHED_DOM_ADD=1,
-    SCHED_DOM_REM,
-    SCHED_SLEEP,
-    SCHED_WAKE,
-    SCHED_YIELD,
-    SCHED_BLOCK,
-    SCHED_SHUTDOWN,
-    SCHED_CTL,
-    SCHED_ADJDOM,
-    SCHED_SWITCH,
-    SCHED_S_TIMER_FN,
-    SCHED_T_TIMER_FN,
-    SCHED_DOM_TIMER_FN,
-    SCHED_SWITCH_INFPREV,
-    SCHED_SWITCH_INFNEXT,
-    SCHED_SHUTDOWN_CODE,
-    SCHED_MAX
-};
-
 enum {
     RUNSTATE_RUNNING=0,
     RUNSTATE_RUNNABLE,
@@ -7431,6 +7410,17 @@ no_update:
     return;
 }
 
+void dump_sched_switch(struct record_info *ri)
+{
+    struct {
+        unsigned int prev_dom, prev_vcpu, next_dom, next_vcpu;
+    } * r = (typeof(r))ri->d;
+
+    printf(" %s sched_switch prev d%uv%u next d%uv%u\n",
+           ri->dump_header, r->prev_dom, r->prev_vcpu,
+           r->next_dom, r->next_vcpu);
+}
+
 void sched_switch_process(struct pcpu_info *p)
 {
     struct vcpu_data *prev, *next;
@@ -7440,10 +7430,7 @@ void sched_switch_process(struct pcpu_info *p)
     } * r = (typeof(r))ri->d;
 
     if(opt.dump_all)
-        printf("%s sched_switch prev d%uv%u next d%uv%u\n",
-               ri->dump_header,
-               r->prev_dom, r->prev_vcpu,
-               r->next_dom, r->next_vcpu);
+        dump_sched_switch(ri);
 
     if(r->prev_vcpu > MAX_CPUS)
     {
@@ -7559,6 +7546,14 @@ void sched_summary_domain(struct domain_data *d)
     }
 }
 
+void dump_sched_vcpu_action(struct record_info *ri, const char *action)
+{
+    struct {
+        unsigned int domid, vcpuid;
+    } * r = (typeof(r))ri->d;
+
+    printf(" %s %s d%uv%u\n", ri->dump_header, action, r->domid, r->vcpuid);
+}
 
 void sched_process(struct pcpu_info *p)
 {
@@ -7573,13 +7568,114 @@ void sched_process(struct pcpu_info *p)
         default:
             process_generic(&p->ri);
         }
-    } else {
-        if(ri->evt.sub == 1)
-            sched_runstate_process(p);
-        else {
-            UPDATE_VOLUME(p, sched_verbose, ri->size);
+        return;
+    }
+
+    if(ri->evt.sub == 1) {
+        /* TRC_SCHED_MIN */
+        sched_runstate_process(p);
+    } else if (ri->evt.sub == 8) {
+        /* TRC_SCHED_VERBOSE */
+        switch(ri->event)
+        {
+        case TRC_SCHED_DOM_ADD:
+            if(opt.dump_all) {
+                struct {
+                    unsigned int domid;
+                } * r = (typeof(r))ri->d;
+
+                printf(" %s domain create d%u\n", ri->dump_header, r->domid);
+            }
+            break;
+        case TRC_SCHED_DOM_REM:
+            if(opt.dump_all) {
+                struct {
+                    unsigned int domid, vcpuid;
+                } * r = (typeof(r))ri->d;
+
+                printf(" %s domain destroy d%u\n", ri->dump_header, r->domid);
+            }
+            break;
+        case TRC_SCHED_SLEEP:
+            if(opt.dump_all)
+                dump_sched_vcpu_action(ri, "vcpu_sleep");
+            break;
+        case TRC_SCHED_WAKE:
+            if(opt.dump_all)
+                dump_sched_vcpu_action(ri, "vcpu_wake");
+            break;
+        case TRC_SCHED_YIELD:
+            if(opt.dump_all)
+                dump_sched_vcpu_action(ri, "vcpu_yield");
+            break;
+        case TRC_SCHED_BLOCK:
+            if(opt.dump_all)
+                dump_sched_vcpu_action(ri, "vcpu_block");
+            break;
+        case TRC_SCHED_SHUTDOWN:
+        case TRC_SCHED_SHUTDOWN_CODE:
+            if(opt.dump_all) {
+                struct {
+                    unsigned int domid, vcpuid, reason;
+                } * r = (typeof(r))ri->d;
+
+                printf(" %s %s d%uv%u, reason = %u\n", ri->dump_header,
+                       ri->event == TRC_SCHED_SHUTDOWN ? "sched_shutdown" :
+                       "sched_shutdown_code", r->domid, r->vcpuid, r->reason);
+            }
+            break;
+        case TRC_SCHED_ADJDOM:
+            if(opt.dump_all) {
+                struct {
+                    unsigned int domid;
+                } * r = (typeof(r))ri->d;
+
+                printf(" %s sched_adjust d%u\n", ri->dump_header, r->domid);
+            }
+            break;
+        case TRC_SCHED_SWITCH:
+            dump_sched_switch(ri);
+            break;
+        case TRC_SCHED_SWITCH_INFPREV:
+            if(opt.dump_all) {
+                struct {
+                    unsigned int domid, runtime;
+                } * r = (typeof(r))ri->d;
+
+                printf(" %s sched_switch prev d%u, run for %u.%uus\n",
+                       ri->dump_header, r->domid, r->runtime / 1000,
+                       r->runtime % 1000);
+            }
+            break;
+        case TRC_SCHED_SWITCH_INFNEXT:
+            if(opt.dump_all)
+            {
+                struct {
+                    unsigned int domid, rsince;
+                    int slice;
+                } * r = (typeof(r))ri->d;
+
+                printf(" %s sched_switch next d%u", ri->dump_header, r->domid);
+                if ( r->rsince != 0 )
+                    printf(", was runnable for %u.%uus, ", r->rsince / 1000,
+                           r->rsince % 1000);
+                if ( r->slice > 0 )
+                    printf("next slice %u.%uus\n", r->slice / 1000,
+                           r->slice % 1000);
+                printf("\n");
+            }
+            break;
+        case TRC_SCHED_CTL:
+        case TRC_SCHED_S_TIMER_FN:
+        case TRC_SCHED_T_TIMER_FN:
+        case TRC_SCHED_DOM_TIMER_FN:
+            break;
+        default:
             process_generic(&p->ri);
         }
+    } else {
+        UPDATE_VOLUME(p, sched_verbose, ri->size);
+        process_generic(&p->ri);
     }
 }
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.