[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] tools/xenalyze: Fix build with clang
1) EXIT_REASON_EXCEPTION_NMI is 0, and Clang complains: xenalyze.c:513:33: error: initializer overrides prior initialization of this subobject [-Werror,-Winitializer-overrides] [EXIT_REASON_EXCEPTION_NMI]="EXCEPTION_NMI", ^~~~~~~~~~~~~~~ xenalyze.c:512:11: note: previous initialization is here [0] = "NONE", ^~~~~~ 2) cr3_time_compare(), eip_compare(), ipi_send() and cr3_compare_start() are declared as nested functions, which is a GCCism not supported by Clang. As they don't actually make use of the interesting feature offered by nested functions (i.e. dynamic scoping), move them to just being normal functions. 3) clear_interval_summary(), update_cpi() and clear_interval_cpi() are all unused. The former isn't reference anywhere, so is deleted, while the other two are called from currently #if 0'd code. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx> CC: Ian Campbell <Ian.Campbell@xxxxxxxxxx> CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> --- tools/xentrace/xenalyze.c | 188 +++++++++++++++++++++++----------------------- 1 file changed, 92 insertions(+), 96 deletions(-) diff --git a/tools/xentrace/xenalyze.c b/tools/xentrace/xenalyze.c index 6520790..1651302 100644 --- a/tools/xentrace/xenalyze.c +++ b/tools/xentrace/xenalyze.c @@ -509,7 +509,6 @@ struct { #define HVM_VMX_EXIT_REASON_MAX (EXIT_REASON_XSETBV+1) char * hvm_vmx_exit_reason_name[HVM_VMX_EXIT_REASON_MAX] = { - [0] = "NONE", [EXIT_REASON_EXCEPTION_NMI]="EXCEPTION_NMI", [EXIT_REASON_EXTERNAL_INTERRUPT]="EXTERNAL_INTERRUPT", [EXIT_REASON_TRIPLE_FAULT]="TRIPLE_FAULT", @@ -2270,11 +2269,6 @@ static inline void update_summary(struct event_cycle_summary *s, long long c) { s->count++; } -static inline void clear_interval_summary(struct event_cycle_summary *s) { - s->interval.count = 0; - s->interval.cycles = 0; -} - static inline void update_cycles(struct cycle_summary *s, long long c) { /* We don't know ahead of time how many samples there are, and working * with dynamic stuff is a pain, and unnecessary. This algorithm will @@ -2322,6 +2316,7 @@ static inline void clear_interval_cycles(struct interval_element *e) { e->instructions = 0; } +#if 0 static inline void update_cpi(struct weighted_cpi_summary *s, unsigned long long i, unsigned long long c) { @@ -2367,6 +2362,7 @@ static inline void clear_interval_cpi(struct weighted_cpi_summary *s) { s->interval.count = 0; s->interval.instructions = 0; } +#endif static inline void print_cpu_affinity(struct cycle_summary *s, char *p) { if(s->count) { @@ -2647,6 +2643,23 @@ void interval_cr3_value_check(struct cr3_value_struct *cr3) { } } +int cr3_time_compare(const void *_a, const void *_b) { + struct cr3_value_struct *a=*(typeof(&a))_a; + struct cr3_value_struct *b=*(typeof(&a))_b; + + if(a->total_time.interval.cycles < b->total_time.interval.cycles) + return 1; + else if(b->total_time.interval.cycles == a->total_time.interval.cycles) { + if(a->total_time.interval.count < b->total_time.interval.count) + return 1; + else if(a->total_time.interval.count == b->total_time.interval.count) + return 0; + else + return -1; + } else + return -1; +} + void interval_cr3_schedule_ordered_output(void) { struct cr3_value_struct *p; int i; @@ -2654,23 +2667,6 @@ void interval_cr3_schedule_ordered_output(void) { struct cr3_value_struct **qsort_array; int N=0; - int cr3_time_compare(const void *_a, const void *_b) { - struct cr3_value_struct *a=*(typeof(&a))_a; - struct cr3_value_struct *b=*(typeof(&a))_b; - - if(a->total_time.interval.cycles < b->total_time.interval.cycles) - return 1; - else if(b->total_time.interval.cycles == a->total_time.interval.cycles) { - if(a->total_time.interval.count < b->total_time.interval.count) - return 1; - else if(a->total_time.interval.count == b->total_time.interval.count) - return 0; - else - return -1; - } else - return -1; - } - for(p=P.cr3.head; p; p=p->gnext) N++; @@ -2966,6 +2962,23 @@ void update_eip(struct eip_list_struct **head, unsigned long long eip, update_summary(&p->summary, cycles); } +int eip_compare(const void *_a, const void *_b) { + struct eip_list_struct *a=*(typeof(&a))_a; + struct eip_list_struct *b=*(typeof(&a))_b; + + if(a->summary.cycles < b->summary.cycles) + return 1; + else if(b->summary.cycles == a->summary.cycles) { + if(a->summary.count < b->summary.count) + return 1; + else if(a->summary.count == b->summary.count) + return 0; + else + return -1; + } else + return -1; +} + void dump_eip(struct eip_list_struct *head) { struct eip_list_struct *p; int i; @@ -2974,23 +2987,6 @@ void dump_eip(struct eip_list_struct *head) { struct eip_list_struct **qsort_array; int N=0; - int eip_compare(const void *_a, const void *_b) { - struct eip_list_struct *a=*(typeof(&a))_a; - struct eip_list_struct *b=*(typeof(&a))_b; - - if(a->summary.cycles < b->summary.cycles) - return 1; - else if(b->summary.cycles == a->summary.cycles) { - if(a->summary.count < b->summary.count) - return 1; - else if(a->summary.count == b->summary.count) - return 0; - else - return -1; - } else - return -1; - } - for(p=head; p; p=p->next) { total += p->summary.count; @@ -3557,6 +3553,50 @@ struct outstanding_ipi *find_vec(struct vlapic_struct *vla, int vec) return o; } +void ipi_send(struct vcpu_data *ov, int vec) +{ + struct vlapic_struct *vla; + struct outstanding_ipi *o = NULL; + + if(ov->runstate.state == RUNSTATE_LOST) { + if(opt.dump_all) + fprintf(warn, "%s: v%d in state RUNSTATE_LOST, not counting ipi\n", + __func__, ov->vid); + return; + } + + vla = &ov->vlapic; + + o = find_vec(vla, vec); + + if(!o) + { + fprintf(warn, "%s: Couldn't find an open slot!\n", + __func__); + return; + } + + if(!o->first_tsc) + o->first_tsc = P.now; + + if(opt.dump_all && o->count == 0 && o->injected) + printf(" [vla] Pre-injection\n"); + + o->count++; + + if((opt.dump_all) +#if 0 + && (ov->runstate.state != RUNSTATE_RUNNING + || ov->hvm.vmexit_valid) +#endif + ) + printf(" [vla] d%dv%d vec %d state %s (outstanding ipis %d)\n", + ov->d->did, ov->vid, + o->vec, + runstate_name[ov->runstate.state], + o->count); +} + void hvm_vlapic_icr_handler(struct hvm_data *h) { struct mmio_info *m = &h->inflight.mmio; @@ -3575,50 +3615,6 @@ void hvm_vlapic_icr_handler(struct hvm_data *h) }; } icr = { .val = m->data }; - void ipi_send(struct vcpu_data *ov, int vec) - { - struct vlapic_struct *vla; - struct outstanding_ipi *o = NULL; - - if(ov->runstate.state == RUNSTATE_LOST) { - if(opt.dump_all) - fprintf(warn, "%s: v%d in state RUNSTATE_LOST, not counting ipi\n", - __func__, ov->vid); - return; - } - - vla = &ov->vlapic; - - o = find_vec(vla, vec); - - if(!o) - { - fprintf(warn, "%s: Couldn't find an open slot!\n", - __func__); - return; - } - - if(!o->first_tsc) - o->first_tsc = P.now; - - if(opt.dump_all && o->count == 0 && o->injected) - printf(" [vla] Pre-injection\n"); - - o->count++; - - if((opt.dump_all) -#if 0 - && (ov->runstate.state != RUNSTATE_RUNNING - || ov->hvm.vmexit_valid) -#endif - ) - printf(" [vla] d%dv%d vec %d state %s (outstanding ipis %d)\n", - ov->d->did, ov->vid, - o->vec, - runstate_name[ov->runstate.state], - o->count); - } - if(m->is_write) { if(opt.dump_all) { printf(" [vla] d%dv%d icr vec %d %s\n", @@ -4124,23 +4120,23 @@ void cr3_prealloc_unpin(struct vcpu_data *v, unsigned long long gmfn) { gmfn, cr3->prealloc_unpin.count); } +int cr3_compare_start(const void *_a, const void *_b) { + struct cr3_value_struct *a=*(typeof(&a))_a; + struct cr3_value_struct *b=*(typeof(&a))_b; + + if(a->first_time > b->first_time) + return 1; + else if(b->first_time == a->first_time) + return 0; + else + return -1; +} + void cr3_dump_list(struct cr3_value_struct *head){ struct cr3_value_struct *p; struct cr3_value_struct **qsort_array; int i, N=0; - int cr3_compare_start(const void *_a, const void *_b) { - struct cr3_value_struct *a=*(typeof(&a))_a; - struct cr3_value_struct *b=*(typeof(&a))_b; - - if(a->first_time > b->first_time) - return 1; - else if(b->first_time == a->first_time) - return 0; - else - return -1; - } - if(!head) return; -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |