[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.2-testing] x86: Fix build after xentrace changes.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1203343726 0 # Node ID d4d6d997aaf8a2e74c141fbdad5f52170ac4d692 # Parent 8cfc3863da72e54caa7fd37ccba7c2d99a0edb58 x86: Fix build after xentrace changes. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> xen-unstable changeset: 17013:e3d417c4786b3fa25e09ce4be37d2ad56ea3898b xen-unstable date: Mon Feb 11 10:50:57 2008 +0000 xentrace: Remove redundant tb_done_init checks, and add missing ones. Hand inspection of gcc -02 output confirms significantly shorter codepaths for inactive (i.e. normal case) tracing. Signed-off-by: Michael A Fetterman <Michael.Fetterman@xxxxxxxxxxxx> xen-unstable changeset: 17002:c68ce89542c7fbba9d00fd3a7d4e190476554e55 xen-unstable date: Mon Feb 11 09:47:19 2008 +0000 xentrace: Improve xentrace to use VIRQ_TBUF interrupts as well as a user-specified polling interval in order to determine when to empty the trace buffers. Removed the old and unused/unimplemented new_data_threshold logic. Signed-off-by: Michael A Fetterman <Michael.Fetterman@xxxxxxxxxxxx> xen-unstable changeset: 17001:7a415bce11c9c12a5af36b3504d8ab43bdef8aab xen-unstable date: Mon Feb 11 09:46:53 2008 +0000 xentrace: Allow xentrace to handle >4G of trace data. It was previously assert'ing when it hit 4G. Also, because the trace buffer is not a power of 2 in size, using modulo arithmetic to address the buffer does not work when the index wraps around 2^32. This patch fixes both issues, and as a side effect, removes all integer division from the hypervisor side of the trace mechanism. Signed-off-by: Michael A Fetterman <Michael.Fetterman@xxxxxxxxxxxx> xen-unstable changeset: 17000:98e9d5d4b309c82886d7740aa88c29c334a4fff9 xen-unstable date: Mon Feb 11 09:46:21 2008 +0000 xentrace: Fix bug in logic for bytes_to_wrap in trace buffer. Admittedly, the bug could only be manifest with much larger trace records than are currently allowed (or equivalently, much smaller trace buffers), but the old code was harder to read, and thus hid the logic bug well, too. Signed-off-by: Michael A Fetterman <Michael.Fetterman@xxxxxxxxxxxx> xen-unstable changeset: 16999:7d03c0b0750482ec96ec9fcf93c2b585f7740af5 xen-unstable date: Mon Feb 11 09:45:36 2008 +0000 --- tools/xentrace/xentrace.c | 153 ++++++++++++++++++++++++---------------- xen/arch/x86/trace.c | 18 ---- xen/common/trace.c | 100 ++++++++++++++++---------- xen/include/asm-x86/hvm/trace.h | 74 ++++++++++++------- xen/include/asm-x86/trace.h | 10 +- xen/include/public/trace.h | 8 ++ xen/include/xen/trace.h | 73 +++++++++++-------- 7 files changed, 262 insertions(+), 174 deletions(-) diff -r 8cfc3863da72 -r d4d6d997aaf8 tools/xentrace/xentrace.c --- a/tools/xentrace/xentrace.c Mon Feb 18 14:06:18 2008 +0000 +++ b/tools/xentrace/xentrace.c Mon Feb 18 14:08:46 2008 +0000 @@ -23,6 +23,7 @@ #include <string.h> #include <getopt.h> #include <assert.h> +#include <sys/poll.h> #include <xen/xen.h> #include <xen/trace.h> @@ -40,9 +41,6 @@ do { /***** Compile time configuration of defaults ********************************/ -/* when we've got more records than this waiting, we log it to the output */ -#define NEW_DATA_THRESH 1 - /* sleep for this long (milliseconds) between checking the trace buffers */ #define POLL_SLEEP_MILLIS 100 @@ -51,8 +49,7 @@ do { typedef struct settings_st { char *outfile; - struct timespec poll_sleep; - unsigned long new_data_thresh; + unsigned long poll_sleep; /* milliseconds to sleep between polls */ uint32_t evt_mask; uint32_t cpu_mask; unsigned long tbuf_size; @@ -63,23 +60,13 @@ settings_t opts; int interrupted = 0; /* gets set if we get a SIGHUP */ +static int xc_handle = -1; +static int event_fd = -1; +static int virq_port = -1; + void close_handler(int signal) { interrupted = 1; -} - -/** - * millis_to_timespec - convert a time in milliseconds to a struct timespec - * @millis: time interval in milliseconds - */ -struct timespec millis_to_timespec(unsigned long millis) -{ - struct timespec spec; - - spec.tv_sec = millis / 1000; - spec.tv_nsec = (millis % 1000) * 1000; - - return spec; } /** @@ -143,13 +130,7 @@ void write_buffer(unsigned int cpu, unsi static void get_tbufs(unsigned long *mfn, unsigned long *size) { - int xc_handle = xc_interface_open(); int ret; - - if ( xc_handle < 0 ) - { - exit(EXIT_FAILURE); - } if(!opts.tbuf_size) opts.tbuf_size = DEFAULT_TBUF_SIZE; @@ -161,8 +142,6 @@ static void get_tbufs(unsigned long *mfn perror("Couldn't enable trace buffers"); exit(1); } - - xc_interface_close(xc_handle); } /** @@ -176,22 +155,12 @@ struct t_buf *map_tbufs(unsigned long tb struct t_buf *map_tbufs(unsigned long tbufs_mfn, unsigned int num, unsigned long size) { - int xc_handle; struct t_buf *tbufs_mapped; - - xc_handle = xc_interface_open(); - - if ( xc_handle < 0 ) - { - exit(EXIT_FAILURE); - } tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN, size * num, PROT_READ | PROT_WRITE, tbufs_mfn); - xc_interface_close(xc_handle); - if ( tbufs_mapped == 0 ) { PERROR("Failed to mmap trace buffers"); @@ -210,7 +179,6 @@ void set_mask(uint32_t mask, int type) void set_mask(uint32_t mask, int type) { int ret = 0; - int xc_handle = xc_interface_open(); /* for accessing control interface */ if (type == 1) { ret = xc_tbuf_set_cpu_mask(xc_handle, mask); @@ -220,8 +188,6 @@ void set_mask(uint32_t mask, int type) fprintf(stderr, "change evtmask to 0x%x\n", mask); } - xc_interface_close(xc_handle); - if ( ret != 0 ) { PERROR("Failure to get trace buffer pointer from Xen and set the new mask"); @@ -295,7 +261,6 @@ unsigned int get_num_cpus(void) unsigned int get_num_cpus(void) { xc_physinfo_t physinfo = { 0 }; - int xc_handle = xc_interface_open(); int ret; ret = xc_physinfo(xc_handle, &physinfo); @@ -306,9 +271,68 @@ unsigned int get_num_cpus(void) exit(EXIT_FAILURE); } - xc_interface_close(xc_handle); - return physinfo.nr_cpus; +} + +/** + * event_init - setup to receive the VIRQ_TBUF event + */ +void event_init(void) +{ + int rc; + + rc = xc_evtchn_open(); + if (rc < 0) { + perror(xc_get_last_error()->message); + exit(EXIT_FAILURE); + } + event_fd = rc; + + rc = xc_evtchn_bind_virq(event_fd, VIRQ_TBUF); + if (rc == -1) { + PERROR("failed to bind to VIRQ port"); + exit(EXIT_FAILURE); + } + virq_port = rc; +} + +/** + * wait_for_event_or_timeout - sleep for the specified number of milliseconds, + * or until an VIRQ_TBUF event occurs + */ +void wait_for_event_or_timeout(unsigned long milliseconds) +{ + int rc; + struct pollfd fd = { .fd = event_fd, + .events = POLLIN | POLLERR }; + int port; + + rc = poll(&fd, 1, milliseconds); + if (rc == -1) { + if (errno == EINTR) + return; + PERROR("poll exitted with an error"); + exit(EXIT_FAILURE); + } + + if (rc == 1) { + port = xc_evtchn_pending(event_fd); + if (port == -1) { + PERROR("failed to read port from evtchn"); + exit(EXIT_FAILURE); + } + if (port != virq_port) { + fprintf(stderr, + "unexpected port returned from evtchn (got %d vs expected %d)\n", + port, virq_port); + exit(EXIT_FAILURE); + } + rc = xc_evtchn_unmask(event_fd, port); + if (rc == -1) { + PERROR("failed to write port to evtchn"); + exit(EXIT_FAILURE); + } + } } @@ -329,6 +353,9 @@ int monitor_tbufs(int outfd) unsigned long size; /* size of a single trace buffer */ unsigned long data_size; + + /* prepare to listen for VIRQ_TBUF */ + event_init(); /* get number of logical CPUs (and therefore number of trace buffers) */ num = get_num_cpus(); @@ -362,9 +389,18 @@ int monitor_tbufs(int outfd) if ( cons == prod ) continue; - assert(prod > cons); - - window_size = prod - cons; + assert(cons < 2*data_size); + assert(prod < 2*data_size); + + // NB: if (prod<cons), then (prod-cons)%data_size will not yield + // the correct answer because data_size is not a power of 2. + if ( prod < cons ) + window_size = (prod + 2*data_size) - cons; + else + window_size = prod - cons; + assert(window_size > 0); + assert(window_size <= data_size); + start_offset = cons % data_size; end_offset = prod % data_size; @@ -396,7 +432,7 @@ int monitor_tbufs(int outfd) meta[i]->cons = prod; } - nanosleep(&opts.poll_sleep, NULL); + wait_for_event_or_timeout(opts.poll_sleep); } /* cleanup */ @@ -416,7 +452,7 @@ int monitor_tbufs(int outfd) #define xstr(x) str(x) #define str(x) #x -const char *program_version = "xentrace v1.1"; +const char *program_version = "xentrace v1.2"; const char *program_bug_address = "<mark.a.williamson@xxxxxxxxx>"; void usage(void) @@ -435,9 +471,6 @@ void usage(void) " N.B. that the trace buffer cannot be resized.\n" \ " if it has already been set this boot cycle,\n" \ " this argument will be ignored.\n" \ -" -t, --log-thresh=l Set number, l, of new records required to\n" \ -" trigger a write to output (default " \ - xstr(NEW_DATA_THRESH) ").\n" \ " -?, --help Show this message\n" \ " -V, --version Print program version\n" \ "\n" \ @@ -516,12 +549,8 @@ void parse_args(int argc, char **argv) { switch ( option ) { - case 't': /* set new records threshold for logging */ - opts.new_data_thresh = argtol(optarg, 0); - break; - case 's': /* set sleep time (given in milliseconds) */ - opts.poll_sleep = millis_to_timespec(argtol(optarg, 0)); + opts.poll_sleep = argtol(optarg, 0); break; case 'c': /* set new cpu mask for filtering*/ @@ -565,13 +594,19 @@ int main(int argc, char **argv) struct sigaction act; opts.outfile = 0; - opts.poll_sleep = millis_to_timespec(POLL_SLEEP_MILLIS); - opts.new_data_thresh = NEW_DATA_THRESH; + opts.poll_sleep = POLL_SLEEP_MILLIS; opts.evt_mask = 0; opts.cpu_mask = 0; parse_args(argc, argv); - + + xc_handle = xc_interface_open(); + if ( xc_handle < 0 ) + { + perror(xc_get_last_error()->message); + exit(EXIT_FAILURE); + } + if ( opts.evt_mask != 0 ) set_mask(opts.evt_mask, 0); diff -r 8cfc3863da72 -r d4d6d997aaf8 xen/arch/x86/trace.c --- a/xen/arch/x86/trace.c Mon Feb 18 14:06:18 2008 +0000 +++ b/xen/arch/x86/trace.c Mon Feb 18 14:08:46 2008 +0000 @@ -15,9 +15,6 @@ asmlinkage void trace_hypercall(void) { struct cpu_user_regs *regs = guest_cpu_user_regs(); - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -52,9 +49,6 @@ void __trace_pv_trap(int trapnr, unsigne void __trace_pv_trap(int trapnr, unsigned long eip, int use_error_code, unsigned error_code) { - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -99,9 +93,6 @@ void __trace_pv_page_fault(unsigned long { unsigned long eip = guest_cpu_user_regs()->eip; - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -135,9 +126,6 @@ void __trace_pv_page_fault(unsigned long void __trace_trap_one_addr(unsigned event, unsigned long va) { - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -155,9 +143,6 @@ void __trace_trap_two_addr(unsigned even void __trace_trap_two_addr(unsigned event, unsigned long va1, unsigned long va2) { - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -184,9 +169,6 @@ void __trace_ptwr_emulation(unsigned lon void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) { unsigned long eip = guest_cpu_user_regs()->eip; - - if ( !tb_init_done ) - return; /* We have a couple of different modes to worry about: * - 32-on-32: 32-bit pte, 32-bit virtual addresses diff -r 8cfc3863da72 -r d4d6d997aaf8 xen/common/trace.c --- a/xen/common/trace.c Mon Feb 18 14:06:18 2008 +0000 +++ b/xen/common/trace.c Mon Feb 18 14:08:46 2008 +0000 @@ -239,14 +239,46 @@ static inline int calc_rec_size(int cycl return rec_size; } +static inline int calc_unconsumed_bytes(struct t_buf *buf) +{ + int x = buf->prod - buf->cons; + if ( x < 0 ) + x += 2*data_size; + + ASSERT(x >= 0); + ASSERT(x <= data_size); + + return x; +} + static inline int calc_bytes_to_wrap(struct t_buf *buf) { - return data_size - (buf->prod % data_size); -} - -static inline unsigned calc_bytes_avail(struct t_buf *buf) -{ - return data_size - (buf->prod - buf->cons); + int x = data_size - buf->prod; + if ( x <= 0 ) + x += data_size; + + ASSERT(x > 0); + ASSERT(x <= data_size); + + return x; +} + +static inline int calc_bytes_avail(struct t_buf *buf) +{ + return data_size - calc_unconsumed_bytes(buf); +} + +static inline struct t_rec * +next_record(struct t_buf *buf) +{ + int x = buf->prod; + if ( x >= data_size ) + x -= data_size; + + ASSERT(x >= 0); + ASSERT(x < data_size); + + return (struct t_rec *)&this_cpu(t_data)[x]; } static inline int __insert_record(struct t_buf *buf, @@ -260,24 +292,25 @@ static inline int __insert_record(struct unsigned char *dst; unsigned long extra_word = extra/sizeof(u32); int local_rec_size = calc_rec_size(cycles, extra); + uint32_t next; BUG_ON(local_rec_size != rec_size); + BUG_ON(extra & 3); /* Double-check once more that we have enough space. * Don't bugcheck here, in case the userland tool is doing * something stupid. */ if ( calc_bytes_avail(buf) < rec_size ) { - printk("%s: %u bytes left (%u - (%u - %u)) recsize %u.\n", + printk("%s: %u bytes left (%u - ((%u - %u) %% %u) recsize %u.\n", __func__, - data_size - (buf->prod - buf->cons), - data_size, - buf->prod, buf->cons, rec_size); + calc_bytes_avail(buf), + data_size, buf->prod, buf->cons, data_size, rec_size); return 0; } rmb(); - rec = (struct t_rec *)&this_cpu(t_data)[buf->prod % data_size]; + rec = next_record(buf); rec->event = event; rec->extra_u32 = extra_word; dst = (unsigned char *)rec->u.nocycles.extra_u32; @@ -293,7 +326,13 @@ static inline int __insert_record(struct memcpy(dst, extra_data, extra); wmb(); - buf->prod += rec_size; + + next = buf->prod + rec_size; + if ( next >= 2*data_size ) + next -= 2*data_size; + ASSERT(next >= 0); + ASSERT(next < 2*data_size); + buf->prod = next; return rec_size; } @@ -395,7 +434,7 @@ void __trace_var(u32 event, int cycles, local_irq_save(flags); - started_below_highwater = ((buf->prod - buf->cons) < t_buf_highwater); + started_below_highwater = (calc_unconsumed_bytes(buf) < t_buf_highwater); /* Calculate the record size */ rec_size = calc_rec_size(cycles, extra); @@ -413,10 +452,6 @@ void __trace_var(u32 event, int cycles, total_size = 0; /* First, check to see if we need to include a lost_record. - * - * calc_bytes_to_wrap() involves integer division, which we'd like to - * avoid if we can. So do the math, check it in debug versions, and - * do a final check always if we happen to write a record. */ if ( this_cpu(lost_records) ) { @@ -425,25 +460,18 @@ void __trace_var(u32 event, int cycles, total_size += bytes_to_wrap; bytes_to_wrap = data_size; } - else - { - bytes_to_wrap -= LOST_REC_SIZE; - if ( bytes_to_wrap == 0 ) - bytes_to_wrap = data_size; - } total_size += LOST_REC_SIZE; + bytes_to_wrap -= LOST_REC_SIZE; + + /* LOST_REC might line up perfectly with the buffer wrap */ + if ( bytes_to_wrap == 0 ) + bytes_to_wrap = data_size; } if ( rec_size > bytes_to_wrap ) { total_size += bytes_to_wrap; - bytes_to_wrap = data_size; } - else - { - bytes_to_wrap -= rec_size; - } - total_size += rec_size; /* Do we have enough space for everything? */ @@ -466,14 +494,12 @@ void __trace_var(u32 event, int cycles, insert_wrap_record(buf, LOST_REC_SIZE); bytes_to_wrap = data_size; } - else - { - bytes_to_wrap -= LOST_REC_SIZE; - /* LOST_REC might line up perfectly with the buffer wrap */ - if ( bytes_to_wrap == 0 ) - bytes_to_wrap = data_size; - } insert_lost_records(buf); + bytes_to_wrap -= LOST_REC_SIZE; + + /* LOST_REC might line up perfectly with the buffer wrap */ + if ( bytes_to_wrap == 0 ) + bytes_to_wrap = data_size; } if ( rec_size > bytes_to_wrap ) @@ -486,7 +512,7 @@ void __trace_var(u32 event, int cycles, /* Notify trace buffer consumer that we've crossed the high water mark. */ if ( started_below_highwater && - ((buf->prod - buf->cons) >= t_buf_highwater) ) + (calc_unconsumed_bytes(buf) >= t_buf_highwater) ) raise_softirq(TRACE_SOFTIRQ); } diff -r 8cfc3863da72 -r d4d6d997aaf8 xen/include/asm-x86/hvm/trace.h --- a/xen/include/asm-x86/hvm/trace.h Mon Feb 18 14:06:18 2008 +0000 +++ b/xen/include/asm-x86/hvm/trace.h Mon Feb 18 14:08:46 2008 +0000 @@ -31,14 +31,15 @@ #define DO_TRC_HVM_CLTS 1 #define DO_TRC_HVM_LMSW 1 - - static inline void hvmtrace_vmexit(struct vcpu *v, unsigned long rip, unsigned long exit_reason) { + if ( likely(!tb_init_done) ) + return; + #ifdef __x86_64__ - if(hvm_long_mode_enabled(v)) + if ( hvm_long_mode_enabled(v) ) { struct { unsigned did:16, vid:16; @@ -50,9 +51,12 @@ static inline void hvmtrace_vmexit(struc d.vid = v->vcpu_id; d.exit_reason = exit_reason; d.rip = rip; - trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d), (unsigned char *)&d); - } else { + __trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d), + (unsigned char *)&d); + } + else #endif + { struct { unsigned did:16, vid:16; unsigned exit_reason:32; @@ -63,10 +67,9 @@ static inline void hvmtrace_vmexit(struc d.vid = v->vcpu_id; d.exit_reason = exit_reason; d.eip = rip; - trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d), (unsigned char *)&d); -#ifdef __x86_64__ - } -#endif + __trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d), + (unsigned char *)&d); + } } @@ -75,9 +78,13 @@ static inline void hvmtrace_vmentry(stru struct { unsigned did:16, vid:16; } d; - d.did = v->domain->domain_id; - d.vid = v->vcpu_id; - trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d); + + if ( likely(!tb_init_done) ) + return; + + d.did = v->domain->domain_id; + d.vid = v->vcpu_id; + __trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d); } static inline void hvmtrace_msr_read(struct vcpu *v, u32 ecx, u64 msr_content) @@ -87,11 +94,16 @@ static inline void hvmtrace_msr_read(str u32 ecx; u64 msr_content; } d; + + if ( likely(!tb_init_done) ) + return; + d.did = v->domain->domain_id; d.vid = v->vcpu_id; d.ecx = ecx; d.msr_content = msr_content; - trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d), + (unsigned char *)&d); } static inline void hvmtrace_msr_write(struct vcpu *v, u32 ecx, u64 msr_content) @@ -101,18 +113,26 @@ static inline void hvmtrace_msr_write(st u32 ecx; u64 msr_content; } d; + + if ( likely(!tb_init_done) ) + return; + d.did = v->domain->domain_id; d.vid = v->vcpu_id; d.ecx = ecx; d.msr_content = msr_content; - trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d), + (unsigned char *)&d); } static inline void hvmtrace_pf_xen(struct vcpu *v, unsigned long va, u32 error_code) { + if ( likely(!tb_init_done) ) + return; + #ifdef __x86_64__ - if(hvm_long_mode_enabled(v)) + if( hvm_long_mode_enabled(v) ) { struct { unsigned did:16, vid:16; @@ -123,10 +143,12 @@ static inline void hvmtrace_pf_xen(struc d.vid = v->vcpu_id; d.error_code = error_code; d.va = va; - trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d), - (unsigned char *)&d); - } else { + __trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d), + (unsigned char *)&d); + } + else #endif + { struct { unsigned did:16, vid:16; u32 error_code; @@ -136,15 +158,14 @@ static inline void hvmtrace_pf_xen(struc d.vid = v->vcpu_id; d.error_code = error_code; d.va = va; - trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d), (unsigned char *)&d); -#ifdef __x86_64__ - } -#endif + __trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d), + (unsigned char *)&d); + } } #define HVMTRACE_ND(evt, vcpu, count, d1, d2, d3, d4) \ do { \ - if (DO_TRC_HVM_ ## evt) \ + if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \ { \ struct { \ unsigned did:16, vid:16; \ @@ -156,8 +177,8 @@ static inline void hvmtrace_pf_xen(struc _d.d[1]=(d2); \ _d.d[2]=(d3); \ _d.d[3]=(d4); \ - trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \ - sizeof(u32)*count+1, (unsigned char *)&_d); \ + __trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \ + sizeof(u32)*count+1, (unsigned char *)&_d); \ } \ } while(0) @@ -167,7 +188,8 @@ static inline void hvmtrace_pf_xen(struc #define HVMTRACE_1D(evt, vcpu, d1) HVMTRACE_ND(evt, vcpu, 1, d1, 0, 0, 0) #define HVMTRACE_0D(evt, vcpu) HVMTRACE_ND(evt, vcpu, 0, 0, 0, 0, 0) -#endif //__ASM_X86_HVM_TRACE_H__ +#endif /* __ASM_X86_HVM_TRACE_H__ */ + /* * Local variables: * mode: C diff -r 8cfc3863da72 -r d4d6d997aaf8 xen/include/asm-x86/trace.h --- a/xen/include/asm-x86/trace.h Mon Feb 18 14:06:18 2008 +0000 +++ b/xen/include/asm-x86/trace.h Mon Feb 18 14:08:46 2008 +0000 @@ -8,7 +8,7 @@ static inline void trace_pv_trap(int tra static inline void trace_pv_trap(int trapnr, unsigned long eip, int use_error_code, unsigned error_code) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_pv_trap(trapnr, eip, use_error_code, error_code); } @@ -16,14 +16,14 @@ static inline void trace_pv_page_fault(u static inline void trace_pv_page_fault(unsigned long addr, unsigned error_code) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_pv_page_fault(addr, error_code); } void __trace_trap_one_addr(unsigned event, unsigned long va); static inline void trace_trap_one_addr(unsigned event, unsigned long va) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_trap_one_addr(event, va); } @@ -32,14 +32,14 @@ static inline void trace_trap_two_addr(u static inline void trace_trap_two_addr(unsigned event, unsigned long va1, unsigned long va2) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_trap_two_addr(event, va1, va2); } void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte); static inline void trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_ptwr_emulation(addr, npte); } diff -r 8cfc3863da72 -r d4d6d997aaf8 xen/include/public/trace.h --- a/xen/include/public/trace.h Mon Feb 18 14:06:18 2008 +0000 +++ b/xen/include/public/trace.h Mon Feb 18 14:08:46 2008 +0000 @@ -141,6 +141,14 @@ struct t_rec { * field, indexes into an array of struct t_rec's. */ struct t_buf { + /* Assume the data buffer size is X. X is generally not a power of 2. + * CONS and PROD are incremented modulo (2*X): + * 0 <= cons < 2*X + * 0 <= prod < 2*X + * This is done because addition modulo X breaks at 2^32 when X is not a + * power of 2: + * (((2^32 - 1) % X) + 1) % X != (2^32) % X + */ uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ diff -r 8cfc3863da72 -r d4d6d997aaf8 xen/include/xen/trace.h --- a/xen/include/xen/trace.h Mon Feb 18 14:06:18 2008 +0000 +++ b/xen/include/xen/trace.h Mon Feb 18 14:08:46 2008 +0000 @@ -39,7 +39,7 @@ static inline void trace_var(u32 event, static inline void trace_var(u32 event, int cycles, int extra, unsigned char *extra_data) { - if( unlikely(tb_init_done) ) + if ( unlikely(tb_init_done) ) __trace_var(event, cycles, extra, extra_data); } @@ -49,49 +49,64 @@ static inline void trace_var(u32 event, trace_var(_e, 1, 0, NULL); \ } while ( 0 ) -#define TRACE_1D(_e,_d) \ +#define TRACE_1D(_e,d1) \ do { \ - u32 _d1; \ - _d1 = _d; \ - trace_var(_e, 1, sizeof(_d1), (unsigned char *)&_d1); \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[1]; \ + _d[0] = d1; \ + __trace_var(_e, 1, sizeof(*_d), (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_2D(_e,d1,d2) \ - do { \ - u32 _d[2]; \ - _d[0]=d1; \ - _d[1]=d2; \ - trace_var(_e, 1, sizeof(*_d)*2, (unsigned char *)_d); \ + do { \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[2]; \ + _d[0] = d1; \ + _d[1] = d2; \ + __trace_var(_e, 1, sizeof(*_d)*2, (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_3D(_e,d1,d2,d3) \ do { \ - u32 _d[3]; \ - _d[0]=d1; \ - _d[1]=d2; \ - _d[2]=d3; \ - trace_var(_e, 1, sizeof(*_d)*3, (unsigned char *)_d); \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[3]; \ + _d[0] = d1; \ + _d[1] = d2; \ + _d[2] = d3; \ + __trace_var(_e, 1, sizeof(*_d)*3, (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_4D(_e,d1,d2,d3,d4) \ do { \ - u32 _d[4]; \ - _d[0]=d1; \ - _d[1]=d2; \ - _d[2]=d3; \ - _d[3]=d4; \ - trace_var(_e, 1, sizeof(*_d)*4, (unsigned char *)_d); \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[4]; \ + _d[0] = d1; \ + _d[1] = d2; \ + _d[2] = d3; \ + _d[3] = d4; \ + __trace_var(_e, 1, sizeof(*_d)*4, (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_5D(_e,d1,d2,d3,d4,d5) \ - do { \ - u32 _d[5]; \ - _d[0]=d1; \ - _d[1]=d2; \ - _d[2]=d3; \ - _d[3]=d4; \ - _d[4]=d5; \ - trace_var(_e, 1, sizeof(*_d)*5, (unsigned char *)_d); \ + do { \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[5]; \ + _d[0] = d1; \ + _d[1] = d2; \ + _d[2] = d3; \ + _d[3] = d4; \ + _d[4] = d5; \ + __trace_var(_e, 1, sizeof(*_d)*5, (unsigned char *)_d); \ + } \ } while ( 0 ) #endif /* __XEN_TRACE_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |