[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xenalyze: Add --report-pcpu option to report physical cpu utilization
Designed to help XenRT (Citrix's testing infrastructure) to determine if a given test has utilized all of the available cores. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> diff -r 1d7a842aecc5 -r 405fc96c0ec2 xenalyze.c --- a/xenalyze.c Tue Jul 12 14:14:21 2011 +0200 +++ b/xenalyze.c Thu Jul 14 12:55:17 2011 +0100 @@ -161,6 +161,7 @@ struct { progress:1, svm_mode:1, summary:1, + report_pcpu:1, tsc_loop_fatal:1, summary_info; long long cpu_hz; @@ -233,6 +234,7 @@ struct { .progress = 0, .svm_mode = 0, .summary = 0, + .report_pcpu = 0, .tsc_loop_fatal = 0, .cpu_hz = DEFAULT_CPU_HZ, .default_guest_paging_levels = 2, @@ -1840,6 +1842,12 @@ struct pcpu_info { unsigned buffer_size; struct trace_volume total, last_buffer; } volume; + + /* Time report */ + struct { + tsc_t tsc; + struct cycle_summary idle, running, lost; + } time; }; void __fill_in_record_info(struct pcpu_info *p); @@ -6661,6 +6669,18 @@ struct vcpu_data * vcpu_find(int did, in return v; } +void pcpu_runstate_update(struct pcpu_info *p, tsc_t tsc) +{ + if ( p->time.tsc ) + { + if ( p->current->d->did == IDLE_DOMAIN ) + update_cycles(&p->time.idle, tsc - p->time.tsc); + else + update_cycles(&p->time.running, tsc - p->time.tsc); + p->time.tsc = 0; + } +} + void vcpu_prev_update(struct pcpu_info *p, struct vcpu_data *prev, tsc_t tsc, int new_runstate) { @@ -6700,6 +6720,7 @@ void vcpu_prev_update(struct pcpu_info * } set: + pcpu_runstate_update(p, tsc); p->current = NULL; runstate_update(prev, new_runstate, tsc); } @@ -6782,6 +6803,7 @@ void vcpu_next_update(struct pcpu_info * next->p = p; p->current = next; + p->time.tsc = tsc; p->lost_record.seen_valid_schedule = 1; } @@ -6817,6 +6839,7 @@ void vcpu_start(struct pcpu_info *p, str /* Simulate the time since the first tsc */ runstate_update(v, RUNSTATE_RUNNING, p->first_tsc); + p->time.tsc = p->first_tsc; p->current = v; v->p = p; } @@ -7839,6 +7862,9 @@ void process_lost_records_end(struct pcp if(opt.dump_cooked || opt.dump_all) printf(" %s lost_records end ---\n", pcpu_string(p->pid)); + + update_cycles(&p->time.lost, ri->tsc - p->lost_record.tsc); + if(p->lost_record.domain_valid) { int did = p->lost_record.did, vid = p->lost_record.vid; @@ -8001,6 +8027,8 @@ void deactivate_pcpu(struct pcpu_info *p { if ( p->current ) { + pcpu_runstate_update(p, p->last_tsc); + fprintf(warn, "%s: setting d%dv%d to state LOST\n", __func__, p->current->d->did, p->current->vid); @@ -8750,6 +8778,27 @@ void summary(void) { domain_summary(); } +void report_pcpu(void) { + int i, active=0; + + for(i=0; i<MAX_CPUS; i++) + { + struct pcpu_info *p = P.pcpu+i; + if(!p->summary) + continue; + printf("pcpu %d\n", i); + + print_cycle_summary(&p->time.running, " running"); + print_cycle_summary(&p->time.idle, " idle"); + print_cycle_summary(&p->time.lost, " lost"); + + if ( p->time.running.count ) + active++; + } + printf("Total active cpus: %d\n", active); + +} + void init_pcpus(void) { int i=0; loff_t offset = 0; @@ -8818,6 +8867,7 @@ enum { /* Summary info */ OPT_SHOW_DEFAULT_DOMAIN_SUMMARY, OPT_SAMPLE_SIZE, + OPT_REPORT_PCPU, /* Guest info */ OPT_DEFAULT_GUEST_PAGING_LEVELS, OPT_SYMBOL_FILE, @@ -9238,6 +9288,11 @@ error_t cmd_parser(int key, char *arg, s opt.summary_info = 1; G.output_defined = 1; break; + case OPT_REPORT_PCPU: + opt.report_pcpu = 1; + //opt.summary_info = 1; + G.output_defined = 1; + break; /* Guest info group */ case OPT_DEFAULT_GUEST_PAGING_LEVELS: { @@ -9532,6 +9587,11 @@ const struct argp_option cmd_opts[] = { .group = OPT_GROUP_SUMMARY, .doc = "Output a summary", }, + { .name = "report-pcpu", + .key = OPT_REPORT_PCPU, + .group = OPT_GROUP_SUMMARY, + .doc = "Report utilization for pcpus", }, + /* Guest info */ { .name = "default-guest-paging-levels", .key = OPT_DEFAULT_GUEST_PAGING_LEVELS, @@ -9621,6 +9681,9 @@ int main(int argc, char *argv[]) { if(opt.summary) summary(); + if(opt.report_pcpu) + report_pcpu(); + if(opt.progress) progress_finish(); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |