diff -uNr a/xen/common/trace.c b/xen/common/trace.c --- a/xen/common/trace.c 2008-06-01 13:51:38.000000000 +0100 +++ b/xen/common/trace.c 2008-08-05 13:29:04.518299605 +0100 @@ -58,6 +58,7 @@ /* Number of records lost due to per-CPU trace buffer being full. */ static DEFINE_PER_CPU(unsigned long, lost_records); +static DEFINE_PER_CPU(unsigned long, lost_records_first_tsc); /* a flag recording whether initialization has been done */ /* or more properly, if the tbuf subsystem is enabled right now */ @@ -354,22 +355,27 @@ NULL); } -#define LOST_REC_SIZE 8 +#define LOST_REC_SIZE (4 + 8 + 16) /* header + tsc + sizeof(struct ed) */ static inline int insert_lost_records(struct t_buf *buf) { struct { u32 lost_records; - } ed; + unsigned did:16, vid:16; + u64 first_tsc; + } __attribute__((packed)) ed; + ed.vid = current->vcpu_id; + ed.did = current->domain->domain_id; ed.lost_records = this_cpu(lost_records); + ed.first_tsc = this_cpu(lost_records_first_tsc); this_cpu(lost_records) = 0; return __insert_record(buf, TRC_LOST_RECORDS, sizeof(ed), - 0 /* !cycles */, + 1 /* cycles */, LOST_REC_SIZE, (unsigned char *)&ed); } @@ -479,7 +485,8 @@ /* Do we have enough space for everything? */ if ( total_size > bytes_to_tail ) { - this_cpu(lost_records)++; + if ( ++this_cpu(lost_records) == 1 ) + this_cpu(lost_records_first_tsc)=(u64)get_cycles(); local_irq_restore(flags); return; }