[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1190637891 -3600
# Node ID 1445de775a7c77e578f6e5d2806d4f8ed8d0ccdf
# Parent  c0d1825f51899b329495efb2078dd15e0fb3b479
# Parent  6ab8580e685505caa2d44c3d3cedf29f6cf37de7
Merge
---
 tools/xenmon/xenbaked.c    |  703 ++++++++++++++++++++++-----------------------
 tools/xentrace/xentrace.c  |   20 -
 xen/common/trace.c         |   89 +----
 xen/include/public/trace.h |   15 
 xen/include/xen/trace.h    |   10 
 5 files changed, 402 insertions(+), 435 deletions(-)

diff -r c0d1825f5189 -r 1445de775a7c tools/xenmon/xenbaked.c
--- a/tools/xenmon/xenbaked.c   Mon Sep 24 13:44:29 2007 +0100
+++ b/tools/xenmon/xenbaked.c   Mon Sep 24 13:44:51 2007 +0100
@@ -12,7 +12,8 @@
  * Authors: Diwaker Gupta, diwaker.gupta@xxxxxx
  *          Rob Gardner, rob.gardner@xxxxxx
  *          Lucy Cherkasova, lucy.cherkasova.hp.com
- * Much code based on xentrace, authored by Mark Williamson, 
mark.a.williamson@xxxxxxxxx
+ * Much code based on xentrace, authored by Mark Williamson, 
+ * mark.a.williamson@xxxxxxxxx
  * Date:   November, 2005
  * 
  *  This program is free software; you can redistribute it and/or modify
@@ -107,31 +108,31 @@ int NCPU = 0;
 
 void init_current(int ncpu)
 {
-  running = calloc(ncpu, sizeof(int));
-  NCPU = ncpu;
-  printf("Initialized with %d %s\n", ncpu, (ncpu == 1) ? "cpu" : "cpu's");
+    running = calloc(ncpu, sizeof(int));
+    NCPU = ncpu;
+    printf("Initialized with %d %s\n", ncpu, (ncpu == 1) ? "cpu" : "cpu's");
 }
 
 int is_current(int domain, int cpu)
 {
-  //  int i;
-  
-  //  for (i=0; i<NCPU; i++)
+    //  int i;
+  
+    //  for (i=0; i<NCPU; i++)
     if (running[cpu] == domain)
-      return 1;
-  return 0;
+        return 1;
+    return 0;
 }
 
 
 // return the domain that's currently running on the given cpu
 int current(int cpu)
 {
-  return running[cpu];
+    return running[cpu];
 }
 
 void set_current(int cpu, int domain)
 {
-  running[cpu] = domain;
+    running[cpu] = domain;
 }
 
 
@@ -145,7 +146,7 @@ void dump_record(int cpu, struct t_rec *
 void dump_record(int cpu, struct t_rec *x)
 {
     printf("record: cpu=%x, tsc=%lx, event=%x, d1=%lx\n", 
-            cpu, x->cycles, x->event, x->data[0]);
+           cpu, x->cycles, x->event, x->data[0]);
 }
 #endif
 
@@ -198,15 +199,15 @@ void check_gotten_sum(void)
     int i;
 
     for (i=0; i<NCPU; i++) {
-      new_qos = cpu_qos_data[i];
-      ns = billion;
-      sum = total_ns_gotten(&ns);
-
-      printf("[cpu%d] ns_gotten over all domains = %lldns, over %lldns\n",
-             i, sum, ns);
-      percent = (double) sum;
-      percent = (100.0*percent) / (double)ns;
-      printf(" ==> ns_gotten = %7.3f%%\n", percent);
+        new_qos = cpu_qos_data[i];
+        ns = billion;
+        sum = total_ns_gotten(&ns);
+
+        printf("[cpu%d] ns_gotten over all domains = %lldns, over %lldns\n",
+               i, sum, ns);
+        percent = (double) sum;
+        percent = (100.0*percent) / (double)ns;
+        printf(" ==> ns_gotten = %7.3f%%\n", percent);
     }
 #endif
 }
@@ -229,7 +230,7 @@ void dump_stats(void)
     }
 
     printf("processed %d total records in %d seconds (%ld per second)\n",
-            rec_count, (int)run_time, rec_count/run_time);
+           rec_count, (int)run_time, rec_count/run_time);
 
     printf("woke up %d times in %d seconds (%ld per second)\n", wakeups,
           (int) run_time, wakeups/run_time);
@@ -261,56 +262,56 @@ int xce_handle = -1;
 /* Stolen from xenstore code */
 int eventchn_init(void)
 {
-  int rc;
-  
-  // to revert to old way:
-  if (0)
-    return -1;
-  
-  xce_handle = xc_evtchn_open();
-
-  if (xce_handle < 0)
-    perror("Failed to open evtchn device");
-  
-  if ((rc = xc_evtchn_bind_virq(xce_handle, VIRQ_TBUF)) == -1)
-    perror("Failed to bind to domain exception virq port");
-  virq_port = rc;
-  
-  return xce_handle;
+    int rc;
+  
+    // to revert to old way:
+    if (0)
+        return -1;
+  
+    xce_handle = xc_evtchn_open();
+
+    if (xce_handle < 0)
+        perror("Failed to open evtchn device");
+  
+    if ((rc = xc_evtchn_bind_virq(xce_handle, VIRQ_TBUF)) == -1)
+        perror("Failed to bind to domain exception virq port");
+    virq_port = rc;
+  
+    return xce_handle;
 }
 
 void wait_for_event(void)
 {
-  int ret;
-  fd_set inset;
-  evtchn_port_t port;
-  struct timeval tv;
-  int evtchn_fd;
-  
-  if (xce_handle < 0) {
-    nanosleep(&opts.poll_sleep, NULL);
-    return;
-  }
-
-  evtchn_fd = xc_evtchn_fd(xce_handle);
-
-  FD_ZERO(&inset);
-  FD_SET(evtchn_fd, &inset);
-  tv.tv_sec = 1;
-  tv.tv_usec = 0;
-  // tv = millis_to_timespec(&opts.poll_sleep);
-  ret = select(evtchn_fd+1, &inset, NULL, NULL, &tv);
-  
-  if ( (ret == 1) && FD_ISSET(evtchn_fd, &inset)) {
-    if ((port = xc_evtchn_pending(xce_handle)) == -1)
-      perror("Failed to read from event fd");
-    
-    //    if (port == virq_port)
-    //      printf("got the event I was looking for\r\n");
-
-    if (xc_evtchn_unmask(xce_handle, port) == -1)
-      perror("Failed to write to event fd");
-  }
+    int ret;
+    fd_set inset;
+    evtchn_port_t port;
+    struct timeval tv;
+    int evtchn_fd;
+  
+    if (xce_handle < 0) {
+        nanosleep(&opts.poll_sleep, NULL);
+        return;
+    }
+
+    evtchn_fd = xc_evtchn_fd(xce_handle);
+
+    FD_ZERO(&inset);
+    FD_SET(evtchn_fd, &inset);
+    tv.tv_sec = 1;
+    tv.tv_usec = 0;
+    // tv = millis_to_timespec(&opts.poll_sleep);
+    ret = select(evtchn_fd+1, &inset, NULL, NULL, &tv);
+  
+    if ( (ret == 1) && FD_ISSET(evtchn_fd, &inset)) {
+        if ((port = xc_evtchn_pending(xce_handle)) == -1)
+            perror("Failed to read from event fd");
+    
+        //    if (port == virq_port)
+        //      printf("got the event I was looking for\r\n");
+
+        if (xc_evtchn_unmask(xce_handle, port) == -1)
+            perror("Failed to write to event fd");
+    }
 }
 
 static void get_tbufs(unsigned long *mfn, unsigned long *size)
@@ -336,9 +337,9 @@ static void get_tbufs(unsigned long *mfn
 
 void disable_tracing(void)
 {
-  int xc_handle = xc_interface_open();
-  xc_tbuf_disable(xc_handle);  
-  xc_interface_close(xc_handle);
+    int xc_handle = xc_interface_open();
+    xc_tbuf_disable(xc_handle);  
+    xc_interface_close(xc_handle);
 }
 
 /**
@@ -387,7 +388,7 @@ struct t_buf *map_tbufs(unsigned long tb
  * mapped region containing all trace buffers.
  */
 struct t_buf **init_bufs_ptrs(void *bufs_mapped, unsigned int num,
-        unsigned long size)
+                              unsigned long size)
 {
     int i;
     struct t_buf **user_ptrs;
@@ -472,7 +473,7 @@ int monitor_tbufs(void)
 int monitor_tbufs(void)
 {
     int i;
-    extern void process_record(int, struct t_rec *);
+    extern int process_record(int, struct t_rec *);
     extern void alloc_qos_data(int ncpu);
 
     void *tbufs_mapped;          /* pointer to where the tbufs are mapped    */
@@ -483,7 +484,7 @@ int monitor_tbufs(void)
     unsigned int  num;           /* number of trace buffers / logical CPUS   */
     unsigned long size;          /* size of a single trace buffer            */
 
-    int size_in_recs;
+    unsigned long data_size, rec_size;
 
     /* get number of logical CPUs (and therefore number of trace buffers) */
     num = get_num_cpus();
@@ -496,34 +497,32 @@ int monitor_tbufs(void)
     /* setup access to trace buffers */
     get_tbufs(&tbufs_mfn, &size);
 
-    //    printf("from dom0op: %ld, t_buf: %d, t_rec: %d\n",
-    //            size, sizeof(struct t_buf), sizeof(struct t_rec));
-
     tbufs_mapped = map_tbufs(tbufs_mfn, num, size);
 
-    size_in_recs = (size - sizeof(struct t_buf)) / sizeof(struct t_rec);
-    //    fprintf(stderr, "size_in_recs = %d\n", size_in_recs);
+    data_size = size - sizeof(struct t_buf);
 
     /* build arrays of convenience ptrs */
     meta  = init_bufs_ptrs (tbufs_mapped, num, size);
     data  = init_rec_ptrs(meta, num);
 
-    // Set up event channel for select()
-    if (eventchn_init() < 0) {
-      fprintf(stderr, "Failed to initialize event channel; Using POLL 
method\r\n");
-    }
+    if ( eventchn_init() < 0 )
+        fprintf(stderr, "Failed to initialize event channel; "
+                "Using POLL method\r\n");
 
     /* now, scan buffers for events */
     while ( !interrupted )
     {
-        for ( i = 0; ( i < num ) && !interrupted; i++ )
+        for ( i = 0; (i < num) && !interrupted; i++ )
+        {
             while ( meta[i]->cons != meta[i]->prod )
             {
                 rmb(); /* read prod, then read item. */
-                process_record(i, data[i] + meta[i]->cons % size_in_recs);
+                rec_size = process_record(
+                    i, data[i] + meta[i]->cons % data_size);
                 mb(); /* read item, then update cons. */
-                meta[i]->cons++;
+                meta[i]->cons += rec_size;
             }
+        }
 
        wait_for_event();
        wakeups++;
@@ -550,44 +549,44 @@ error_t cmd_parser(int key, char *arg, s
 
     switch ( key )
     {
-        case 't': /* set new records threshold for logging */
-            {
-                char *inval;
-                setup->new_data_thresh = strtol(arg, &inval, 0);
-                if ( inval == arg )
-                    argp_usage(state);
-            }
-            break;
-
-        case 's': /* set sleep time (given in milliseconds) */
-            {
-                char *inval;
-                setup->poll_sleep = millis_to_timespec(strtol(arg, &inval, 0));
-                if ( inval == arg )
-                    argp_usage(state);
-            }
-            break;
-
-        case 'm': /* set ms_per_sample */
-            {
-                char *inval;
-                setup->ms_per_sample = strtol(arg, &inval, 0);
-                if ( inval == arg )
-                    argp_usage(state);
-            }
-            break;
-
-        case ARGP_KEY_ARG:
-            {
-                if ( state->arg_num == 0 )
-                    setup->outfile = arg;
-                else
-                    argp_usage(state);
-            }
-            break;
-
-        default:
-            return ARGP_ERR_UNKNOWN;
+    case 't': /* set new records threshold for logging */
+    {
+        char *inval;
+        setup->new_data_thresh = strtol(arg, &inval, 0);
+        if ( inval == arg )
+            argp_usage(state);
+    }
+    break;
+
+    case 's': /* set sleep time (given in milliseconds) */
+    {
+        char *inval;
+        setup->poll_sleep = millis_to_timespec(strtol(arg, &inval, 0));
+        if ( inval == arg )
+            argp_usage(state);
+    }
+    break;
+
+    case 'm': /* set ms_per_sample */
+    {
+        char *inval;
+        setup->ms_per_sample = strtol(arg, &inval, 0);
+        if ( inval == arg )
+            argp_usage(state);
+    }
+    break;
+
+    case ARGP_KEY_ARG:
+    {
+        if ( state->arg_num == 0 )
+            setup->outfile = arg;
+        else
+            argp_usage(state);
+    }
+    break;
+
+    default:
+        return ARGP_ERR_UNKNOWN;
     }
 
     return 0;
@@ -614,27 +613,27 @@ void alloc_qos_data(int ncpu)
 
     for (n=0; n<ncpu; n++) {
 
-      for (i=0; i<sizeof(_new_qos_data); i=i+pgsize)
-          if ((write(qos_fd, dummy, pgsize)) != pgsize) {
-              PERROR(SHARED_MEM_FILE);
-              exit(2);
-          }
-
-      new_qos = (_new_qos_data *) mmap(0, sizeof(_new_qos_data), 
PROT_READ|PROT_WRITE, 
-                                      MAP_SHARED, qos_fd, off);
-      off += i;
-      if (new_qos == NULL) {
-        PERROR("mmap");
-        exit(3);
-      }
-      //  printf("new_qos = %p\n", new_qos);
-      memset(new_qos, 0, sizeof(_new_qos_data));
-      new_qos->next_datapoint = 0;
-      advance_next_datapoint(0);
-      new_qos->structlen = i;
-      new_qos->ncpu = ncpu;
-      //      printf("structlen = 0x%x\n", i);
-      cpu_qos_data[n] = new_qos;
+        for (i=0; i<sizeof(_new_qos_data); i=i+pgsize)
+            if ((write(qos_fd, dummy, pgsize)) != pgsize) {
+                PERROR(SHARED_MEM_FILE);
+                exit(2);
+            }
+
+        new_qos = (_new_qos_data *) mmap(0, sizeof(_new_qos_data), 
PROT_READ|PROT_WRITE, 
+                                         MAP_SHARED, qos_fd, off);
+        off += i;
+        if (new_qos == NULL) {
+            PERROR("mmap");
+            exit(3);
+        }
+        //  printf("new_qos = %p\n", new_qos);
+        memset(new_qos, 0, sizeof(_new_qos_data));
+        new_qos->next_datapoint = 0;
+        advance_next_datapoint(0);
+        new_qos->structlen = i;
+        new_qos->ncpu = ncpu;
+        //      printf("structlen = 0x%x\n", i);
+        cpu_qos_data[n] = new_qos;
     }
     free(dummy);
     new_qos = NULL;
@@ -647,19 +646,19 @@ const struct argp_option cmd_opts[] =
 const struct argp_option cmd_opts[] =
 {
     { .name = "log-thresh", .key='t', .arg="l",
-        .doc =
-            "Set number, l, of new records required to trigger a write to 
output "
-            "(default " xstr(NEW_DATA_THRESH) ")." },
+      .doc =
+      "Set number, l, of new records required to trigger a write to output "
+      "(default " xstr(NEW_DATA_THRESH) ")." },
 
     { .name = "poll-sleep", .key='s', .arg="p",
-        .doc = 
-            "Set sleep time, p, in milliseconds between polling the trace 
buffer "
-            "for new data (default " xstr(POLL_SLEEP_MILLIS) ")." },
+      .doc = 
+      "Set sleep time, p, in milliseconds between polling the trace buffer "
+      "for new data (default " xstr(POLL_SLEEP_MILLIS) ")." },
 
     { .name = "ms_per_sample", .key='m', .arg="MS",
-        .doc = 
-            "Specify the number of milliseconds per sample "
-            " (default " xstr(MS_PER_SAMPLE) ")." },
+      .doc = 
+      "Specify the number of milliseconds per sample "
+      " (default " xstr(MS_PER_SAMPLE) ")." },
 
     {0}
 };
@@ -670,10 +669,10 @@ const struct argp parser_def =
     .parser = cmd_parser,
     //    .args_doc = "[output file]",
     .doc =
-        "Tool to capture and partially process Xen trace buffer data"
-        "\v"
-        "This tool is used to capture trace buffer data from Xen.  The data is 
"
-        "saved in a shared memory structure to be further processed by xenmon."
+    "Tool to capture and partially process Xen trace buffer data"
+    "\v"
+    "This tool is used to capture trace buffer data from Xen.  The data is "
+    "saved in a shared memory structure to be further processed by xenmon."
 };
 
 
@@ -716,101 +715,101 @@ int main(int argc, char **argv)
 
 void qos_init_domain(int domid, int idx)
 {
-  int i;
-
-  memset(&new_qos->domain_info[idx], 0, sizeof(_domain_info));
-  new_qos->domain_info[idx].last_update_time = global_now;
-  //  runnable_start_time[idx] = 0;
-  new_qos->domain_info[idx].runnable_start_time = 0; // invalidate
-  new_qos->domain_info[idx].in_use = 1;
-  new_qos->domain_info[idx].blocked_start_time = 0;
-  new_qos->domain_info[idx].id = domid;
-  if (domid == IDLE_DOMAIN_ID)
-    sprintf(new_qos->domain_info[idx].name, "Idle Task%d", global_cpu);
-  else
-    sprintf(new_qos->domain_info[idx].name, "Domain#%d", domid);
-  
-  for (i=0; i<NSAMPLES; i++) {
-    new_qos->qdata[i].ns_gotten[idx] = 0;
-    new_qos->qdata[i].ns_allocated[idx] = 0;
-    new_qos->qdata[i].ns_waiting[idx] = 0;
-    new_qos->qdata[i].ns_blocked[idx] = 0;
-    new_qos->qdata[i].switchin_count[idx] = 0;
-    new_qos->qdata[i].io_count[idx] = 0;
-  }
+    int i;
+
+    memset(&new_qos->domain_info[idx], 0, sizeof(_domain_info));
+    new_qos->domain_info[idx].last_update_time = global_now;
+    //  runnable_start_time[idx] = 0;
+    new_qos->domain_info[idx].runnable_start_time = 0; // invalidate
+    new_qos->domain_info[idx].in_use = 1;
+    new_qos->domain_info[idx].blocked_start_time = 0;
+    new_qos->domain_info[idx].id = domid;
+    if (domid == IDLE_DOMAIN_ID)
+        sprintf(new_qos->domain_info[idx].name, "Idle Task%d", global_cpu);
+    else
+        sprintf(new_qos->domain_info[idx].name, "Domain#%d", domid);
+  
+    for (i=0; i<NSAMPLES; i++) {
+        new_qos->qdata[i].ns_gotten[idx] = 0;
+        new_qos->qdata[i].ns_allocated[idx] = 0;
+        new_qos->qdata[i].ns_waiting[idx] = 0;
+        new_qos->qdata[i].ns_blocked[idx] = 0;
+        new_qos->qdata[i].switchin_count[idx] = 0;
+        new_qos->qdata[i].io_count[idx] = 0;
+    }
 }
 
 void global_init_domain(int domid, int idx) 
 {
-  int cpu;
-  _new_qos_data *saved_qos;
-  
-  saved_qos = new_qos;
-  
-  for (cpu=0; cpu<NCPU; cpu++) {
-    new_qos = cpu_qos_data[cpu];
-    qos_init_domain(domid, idx);
-  }
-  new_qos = saved_qos;
+    int cpu;
+    _new_qos_data *saved_qos;
+  
+    saved_qos = new_qos;
+  
+    for (cpu=0; cpu<NCPU; cpu++) {
+        new_qos = cpu_qos_data[cpu];
+        qos_init_domain(domid, idx);
+    }
+    new_qos = saved_qos;
 }
 
 
 // give index of this domain in the qos data array
 int indexof(int domid)
 {
-  int idx;
-  xc_dominfo_t dominfo[NDOMAINS];
-  int xc_handle, ndomains;
-  extern void qos_kill_thread(int domid);
-  
-  if (domid < 0) {     // shouldn't happen
-    printf("bad domain id: %d\r\n", domid);
-    return 0;
-  }
-
-  for (idx=0; idx<NDOMAINS; idx++)
-    if ( (new_qos->domain_info[idx].id == domid) && 
new_qos->domain_info[idx].in_use)
-      return idx;
-
-  // not found, make a new entry
-  for (idx=0; idx<NDOMAINS; idx++)
-    if (new_qos->domain_info[idx].in_use == 0) {
-      global_init_domain(domid, idx);
-      return idx;
-    }
-
-  // call domaininfo hypercall to try and garbage collect unused entries
-  xc_handle = xc_interface_open();
-  ndomains = xc_domain_getinfo(xc_handle, 0, NDOMAINS, dominfo);
-  xc_interface_close(xc_handle);
-
-  // for each domain in our data, look for it in the system dominfo structure
-  // and purge the domain's data from our state if it does not exist in the
-  // dominfo structure
-  for (idx=0; idx<NDOMAINS; idx++) {
-    int domid = new_qos->domain_info[idx].id;
-    int jdx;
-    
-    for (jdx=0; jdx<ndomains; jdx++) {
-      if (dominfo[jdx].domid == domid)
-       break;
-    }
-    if (jdx == ndomains)        // we didn't find domid in the dominfo struct
-      if (domid != IDLE_DOMAIN_ID) // exception for idle domain, which is not
-                                  // contained in dominfo
-       qos_kill_thread(domid); // purge our stale data
-  }
-  
-  // look again for a free slot
-  for (idx=0; idx<NDOMAINS; idx++)
-    if (new_qos->domain_info[idx].in_use == 0) {
-      global_init_domain(domid, idx);
-      return idx;
-    }
-
-  // still no space found, so bail
-  fprintf(stderr, "out of space in domain table, increase NDOMAINS\r\n");
-  exit(2);
+    int idx;
+    xc_dominfo_t dominfo[NDOMAINS];
+    int xc_handle, ndomains;
+    extern void qos_kill_thread(int domid);
+  
+    if (domid < 0) {   // shouldn't happen
+        printf("bad domain id: %d\r\n", domid);
+        return 0;
+    }
+
+    for (idx=0; idx<NDOMAINS; idx++)
+        if ( (new_qos->domain_info[idx].id == domid) && 
new_qos->domain_info[idx].in_use)
+            return idx;
+
+    // not found, make a new entry
+    for (idx=0; idx<NDOMAINS; idx++)
+        if (new_qos->domain_info[idx].in_use == 0) {
+            global_init_domain(domid, idx);
+            return idx;
+        }
+
+    // call domaininfo hypercall to try and garbage collect unused entries
+    xc_handle = xc_interface_open();
+    ndomains = xc_domain_getinfo(xc_handle, 0, NDOMAINS, dominfo);
+    xc_interface_close(xc_handle);
+
+    // for each domain in our data, look for it in the system dominfo structure
+    // and purge the domain's data from our state if it does not exist in the
+    // dominfo structure
+    for (idx=0; idx<NDOMAINS; idx++) {
+        int domid = new_qos->domain_info[idx].id;
+        int jdx;
+    
+        for (jdx=0; jdx<ndomains; jdx++) {
+            if (dominfo[jdx].domid == domid)
+                break;
+        }
+        if (jdx == ndomains)        // we didn't find domid in the dominfo 
struct
+            if (domid != IDLE_DOMAIN_ID) // exception for idle domain, which 
is not
+                // contained in dominfo
+                qos_kill_thread(domid);        // purge our stale data
+    }
+  
+    // look again for a free slot
+    for (idx=0; idx<NDOMAINS; idx++)
+        if (new_qos->domain_info[idx].in_use == 0) {
+            global_init_domain(domid, idx);
+            return idx;
+        }
+
+    // still no space found, so bail
+    fprintf(stderr, "out of space in domain table, increase NDOMAINS\r\n");
+    exit(2);
 }
 
 int domain_runnable(int domid)
@@ -879,25 +878,25 @@ void qos_update_thread(int cpu, int domi
     time_since_update = now - last_update_time;
 
     if (time_since_update < 0) {
-      // what happened here? either a timestamp wraparound, or more likely,
-      // a slight inconsistency among timestamps from various cpu's
-      if (-time_since_update < billion) {
-       // fairly small difference, let's just adjust 'now' to be a little
-       // beyond last_update_time
-       time_since_update = -time_since_update;
-      }
-      else if ( ((~0ULL - last_update_time) < billion) && (now < billion) ) {
-       // difference is huge, must be a wraparound
-       // last_update time should be "near" ~0ULL,
-       // and now should be "near" 0
-       time_since_update = now + (~0ULL - last_update_time);
-       printf("time wraparound\n");
-      }
-      else {
-       // none of the above, may be an out of order record
-       // no good solution, just ignore and update again later
-       return;
-      }
+        // what happened here? either a timestamp wraparound, or more likely,
+        // a slight inconsistency among timestamps from various cpu's
+        if (-time_since_update < billion) {
+            // fairly small difference, let's just adjust 'now' to be a little
+            // beyond last_update_time
+            time_since_update = -time_since_update;
+        }
+        else if ( ((~0ULL - last_update_time) < billion) && (now < billion) ) {
+            // difference is huge, must be a wraparound
+            // last_update time should be "near" ~0ULL,
+            // and now should be "near" 0
+            time_since_update = now + (~0ULL - last_update_time);
+            printf("time wraparound\n");
+        }
+        else {
+            // none of the above, may be an out of order record
+            // no good solution, just ignore and update again later
+            return;
+        }
     }
        
     new_qos->domain_info[id].last_update_time = now;
@@ -985,7 +984,7 @@ void qos_switch_in(int cpu, int domid, u
 
     // count up page flips for dom0 execution
     if (domid == 0)
-      dom0_flips = 0;
+        dom0_flips = 0;
 }
 
 // called when the current thread is taken off the cpu
@@ -1011,8 +1010,8 @@ void qos_switch_out(int cpu, int domid, 
 #if 0
     new_qos->qdata[n].ns_gotten[idx] += gotten;
     if (gotten > new_qos->qdata[n].ns_passed)
-      printf("inconsistency #257, diff = %lld\n",
-           gotten - new_qos->qdata[n].ns_passed );
+        printf("inconsistency #257, diff = %lld\n",
+               gotten - new_qos->qdata[n].ns_passed );
 #endif
     new_qos->domain_info[idx].ns_oncpu_since_boot += gotten;
     new_qos->domain_info[idx].runnable_start_time = now;
@@ -1021,8 +1020,8 @@ void qos_switch_out(int cpu, int domid, 
 
     // process dom0 page flips
     if (domid == 0)
-      if (dom0_flips == 0)
-       new_qos->qdata[n].flip_free_periods++;
+        if (dom0_flips == 0)
+            new_qos->qdata[n].flip_free_periods++;
 }
 
 // called when domain is put to sleep, may also be called
@@ -1047,11 +1046,11 @@ void qos_state_sleeping(int cpu, int dom
 // domain died, presume it's dead on all cpu's, not just mostly dead
 void qos_kill_thread(int domid)
 {
-  int cpu;
-  
-  for (cpu=0; cpu<NCPU; cpu++) {
-    cpu_qos_data[cpu]->domain_info[indexof(domid)].in_use = 0;
-  }
+    int cpu;
+  
+    for (cpu=0; cpu<NCPU; cpu++) {
+        cpu_qos_data[cpu]->domain_info[indexof(domid)].in_use = 0;
+    }
   
 }
 
@@ -1060,7 +1059,7 @@ void qos_kill_thread(int domid)
 // when thread is already runnable
 void qos_state_runnable(int cpu, int domid, uint64_t now)
 {
-   int idx;
+    int idx;
   
 
     qos_update_thread_stats(cpu, domid, now);
@@ -1080,79 +1079,85 @@ void qos_state_runnable(int cpu, int dom
 
 void qos_count_packets(domid_t domid, uint64_t now)
 {
-  int i, idx = indexof(domid);
-  _new_qos_data *cpu_data;
-
-  for (i=0; i<NCPU; i++) {
-    cpu_data = cpu_qos_data[i];
-    if (cpu_data->domain_info[idx].in_use) {
-      cpu_data->qdata[cpu_data->next_datapoint].io_count[idx]++;
-    }
-  }
-
-  new_qos->qdata[new_qos->next_datapoint].io_count[0]++;
-  dom0_flips++;
-}
-
-
-void process_record(int cpu, struct t_rec *r)
-{
-  uint64_t now;
-
-  new_qos = cpu_qos_data[cpu];
-
-  rec_count++;
-
-  now = ((double)r->cycles) / (opts.cpu_freq / 1000.0);
-
-  global_now = now;
-  global_cpu = cpu;
-
-  log_event(r->event);
-
-  switch (r->event) {
-
-  case TRC_SCHED_SWITCH_INFPREV:
-    // domain data[0] just switched out and received data[1] ns of cpu time
-    qos_switch_out(cpu, r->data[0], now, r->data[1]);
-    //    printf("ns_gotten %ld\n", r->data[1]);
-    break;
-    
-  case TRC_SCHED_SWITCH_INFNEXT:
-    // domain data[0] just switched in and
-    // waited data[1] ns, and was allocated data[2] ns of cpu time
-    qos_switch_in(cpu, r->data[0], now, r->data[2], r->data[1]);
-    break;
-    
-  case TRC_SCHED_DOM_ADD:
-    (void) indexof(r->data[0]);
-    break;
-    
-  case TRC_SCHED_DOM_REM:
-    qos_kill_thread(r->data[0]);
-    break;
-    
-  case TRC_SCHED_SLEEP:
-    qos_state_sleeping(cpu, r->data[0], now);
-    break;
-    
-  case TRC_SCHED_WAKE:
-    qos_state_runnable(cpu, r->data[0], now);
-    break;
-    
-  case TRC_SCHED_BLOCK:
-    qos_state_sleeping(cpu, r->data[0], now);
-    break;
-    
-  case TRC_MEM_PAGE_GRANT_TRANSFER:
-    qos_count_packets(r->data[0], now);
-    break;
-    
-  default:
-    break;
-  }
-  new_qos = NULL;
-}
-
-
-
+    int i, idx = indexof(domid);
+    _new_qos_data *cpu_data;
+
+    for (i=0; i<NCPU; i++) {
+        cpu_data = cpu_qos_data[i];
+        if (cpu_data->domain_info[idx].in_use) {
+            cpu_data->qdata[cpu_data->next_datapoint].io_count[idx]++;
+        }
+    }
+
+    new_qos->qdata[new_qos->next_datapoint].io_count[0]++;
+    dom0_flips++;
+}
+
+
+int process_record(int cpu, struct t_rec *r)
+{
+    uint64_t now = 0;
+    uint32_t *extra_u32 = r->u.nocycles.extra_u32;
+
+    new_qos = cpu_qos_data[cpu];
+
+    rec_count++;
+
+    if ( r->cycles_included )
+    {
+        now = ((uint64_t)r->u.cycles.cycles_hi << 32) | r->u.cycles.cycles_lo;
+        now = ((double)now) / (opts.cpu_freq / 1000.0);
+        extra_u32 = r->u.cycles.extra_u32;
+    }
+
+    global_now = now;
+    global_cpu = cpu;
+
+    log_event(r->event);
+
+    switch (r->event) {
+
+    case TRC_SCHED_SWITCH_INFPREV:
+        // domain data[0] just switched out and received data[1] ns of cpu time
+        qos_switch_out(cpu, extra_u32[0], now, extra_u32[1]);
+        //    printf("ns_gotten %ld\n", extra_u32[1]);
+        break;
+    
+    case TRC_SCHED_SWITCH_INFNEXT:
+        // domain data[0] just switched in and
+        // waited data[1] ns, and was allocated data[2] ns of cpu time
+        qos_switch_in(cpu, extra_u32[0], now, extra_u32[2], extra_u32[1]);
+        break;
+    
+    case TRC_SCHED_DOM_ADD:
+        (void) indexof(extra_u32[0]);
+        break;
+    
+    case TRC_SCHED_DOM_REM:
+        qos_kill_thread(extra_u32[0]);
+        break;
+    
+    case TRC_SCHED_SLEEP:
+        qos_state_sleeping(cpu, extra_u32[0], now);
+        break;
+    
+    case TRC_SCHED_WAKE:
+        qos_state_runnable(cpu, extra_u32[0], now);
+        break;
+    
+    case TRC_SCHED_BLOCK:
+        qos_state_sleeping(cpu, extra_u32[0], now);
+        break;
+    
+    case TRC_MEM_PAGE_GRANT_TRANSFER:
+        qos_count_packets(extra_u32[0], now);
+        break;
+    
+    default:
+        break;
+    }
+
+    new_qos = NULL;
+
+    return 4 + (r->cycles_included ? 8 : 0) + (r->extra_u32 * 4);
+}
diff -r c0d1825f5189 -r 1445de775a7c tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Mon Sep 24 13:44:29 2007 +0100
+++ b/tools/xentrace/xentrace.c Mon Sep 24 13:44:51 2007 +0100
@@ -102,7 +102,7 @@ void write_buffer(unsigned int cpu, unsi
     /* Write a CPU_BUF record on each buffer "window" written.  Wrapped
      * windows may involve two writes, so only write the record on the
      * first write. */
-    if(total_size)
+    if ( total_size != 0 )
     {
         struct {
             uint32_t header;
@@ -119,7 +119,8 @@ void write_buffer(unsigned int cpu, unsi
 
         written = write(outfd, &rec, sizeof(rec));
 
-        if(written!=sizeof(rec)) {
+        if ( written != sizeof(rec) )
+        {
             fprintf(stderr, "Cannot write cpu change (write returned %d)\n",
                     written);
             goto fail;
@@ -127,12 +128,15 @@ void write_buffer(unsigned int cpu, unsi
     }
 
     written = write(outfd, start, size);
-    if ( written != size ) {
+    if ( written != size )
+    {
         fprintf(stderr, "Write failed! (size %d, returned %d)\n",
                 size, written);
         goto fail;
     }
+
     return;
+
  fail:
     PERROR("Failed to write trace data");
     exit(EXIT_FAILURE);
@@ -337,7 +341,7 @@ int monitor_tbufs(int outfd)
     get_tbufs(&tbufs_mfn, &size);
     tbufs_mapped = map_tbufs(tbufs_mfn, num, size);
 
-    data_size = (size - sizeof(struct t_buf));
+    data_size = size - sizeof(struct t_buf);
 
     /* build arrays of convenience ptrs */
     meta  = init_bufs_ptrs(tbufs_mapped, num, size);
@@ -353,13 +357,13 @@ int monitor_tbufs(int outfd)
         for ( i = 0; (i < num) && !interrupted; i++ )
         {
             unsigned long start_offset, end_offset, window_size, cons, prod;
-            rmb(); /* read prod, then read item. */
                 
             /* Read window information only once. */
             cons = meta[i]->cons;
             prod = meta[i]->prod;
+            rmb(); /* read prod, then read item. */
             
-            if(cons == prod)
+            if ( cons == prod )
                 continue;
            
             assert(prod > cons);
@@ -368,7 +372,7 @@ int monitor_tbufs(int outfd)
             start_offset = cons % data_size;
             end_offset = prod % data_size;
 
-            if(end_offset > start_offset)
+            if ( end_offset > start_offset )
             {
                 /* If window does not wrap, write in one big chunk */
                 write_buffer(i, data[i]+start_offset,
@@ -382,7 +386,7 @@ int monitor_tbufs(int outfd)
                  * - first, start to the end of the buffer
                  * - second, start of buffer to end of window
                  */
-                write_buffer(i, data[i]+start_offset,
+                write_buffer(i, data[i] + start_offset,
                              data_size - start_offset,
                              window_size,
                              outfd);
diff -r c0d1825f5189 -r 1445de775a7c xen/common/trace.c
--- a/xen/common/trace.c        Mon Sep 24 13:44:29 2007 +0100
+++ b/xen/common/trace.c        Mon Sep 24 13:44:51 2007 +0100
@@ -233,7 +233,7 @@ static inline int calc_rec_size(int cycl
 {
     int rec_size;
     rec_size = 4;
-    if(cycles)
+    if ( cycles )
         rec_size += 8;
     rec_size += extra;
     return rec_size;
@@ -254,10 +254,10 @@ static inline int __insert_record(struct
                                   int extra,
                                   int cycles,
                                   int rec_size,
-                                  unsigned char * extra_data) 
+                                  unsigned char *extra_data)
 {
     struct t_rec *rec;
-    unsigned char * dst;
+    unsigned char *dst;
     unsigned long extra_word = extra/sizeof(u32);
     int local_rec_size = calc_rec_size(cycles, extra);
 
@@ -266,7 +266,7 @@ static inline int __insert_record(struct
     /* Double-check once more that we have enough space.
      * Don't bugcheck here, in case the userland tool is doing
      * something stupid. */
-    if(calc_bytes_avail(buf) < rec_size )
+    if ( calc_bytes_avail(buf) < rec_size )
     {
         printk("%s: %u bytes left (%u - (%u - %u)) recsize %u.\n",
                __func__,
@@ -278,25 +278,22 @@ static inline int __insert_record(struct
     rmb();
 
     rec = (struct t_rec *)&this_cpu(t_data)[buf->prod % data_size];
-    rec->header = event;
-    rec->header |= extra_word << TRACE_EXTRA_SHIFT;
-    if(cycles) 
+    rec->event = event;
+    rec->extra_u32 = extra_word;
+    dst = (unsigned char *)rec->u.nocycles.extra_u32;
+    if ( (rec->cycles_included = cycles) != 0 )
     {
         u64 tsc = (u64)get_cycles();
-
-        rec->header |= TRC_HD_CYCLE_FLAG;
-        rec->u.cycles.cycles_lo = tsc & ((((u64)1)<<32)-1);
-        rec->u.cycles.cycles_hi = tsc >> 32;
-        dst = rec->u.cycles.data;
+        rec->u.cycles.cycles_lo = (uint32_t)tsc;
+        rec->u.cycles.cycles_hi = (uint32_t)(tsc >> 32);
+        dst = (unsigned char *)rec->u.cycles.extra_u32;
     } 
-    else
-        dst = rec->u.nocycles.data;
-
-    if(extra_data && extra)
+
+    if ( extra_data && extra )
         memcpy(dst, extra_data, extra);
 
     wmb();
-    buf->prod+=rec_size;
+    buf->prod += rec_size;
 
     return rec_size;
 }
@@ -305,23 +302,17 @@ static inline int insert_wrap_record(str
 {
     int space_left = calc_bytes_to_wrap(buf);
     unsigned long extra_space = space_left - sizeof(u32);
-    int cycles=0;
-
-    if(space_left > size)
-        printk("%s: space_left %d, size %d!\n",
-               __func__, space_left, size);
+    int cycles = 0;
 
     BUG_ON(space_left > size);
 
     /* We may need to add cycles to take up enough space... */
-    if((extra_space/sizeof(u32)) > TRACE_EXTRA_MAX)
+    if ( (extra_space/sizeof(u32)) > TRACE_EXTRA_MAX )
     {
         cycles = 1;
         extra_space -= sizeof(u64);
-        
         ASSERT((extra_space/sizeof(u32)) <= TRACE_EXTRA_MAX);
     }
-
 
     return __insert_record(buf,
                     TRC_TRACE_WRAP_BUFFER,
@@ -433,9 +424,9 @@ void __trace_var(u32 event, int cycles, 
      * avoid if we can.  So do the math, check it in debug versions, and
      * do a final check always if we happen to write a record.
      */
-    if(this_cpu(lost_records))
-    {
-        if(LOST_REC_SIZE > bytes_to_wrap)
+    if ( this_cpu(lost_records) )
+    {
+        if ( LOST_REC_SIZE > bytes_to_wrap )
         {
             total_size += bytes_to_wrap;
             bytes_to_wrap = data_size;
@@ -443,7 +434,7 @@ void __trace_var(u32 event, int cycles, 
         else
         {
             bytes_to_wrap -= LOST_REC_SIZE;
-            if(bytes_to_wrap == 0)
+            if ( bytes_to_wrap == 0 )
                 bytes_to_wrap == data_size;
         }
         total_size += LOST_REC_SIZE;
@@ -451,7 +442,7 @@ void __trace_var(u32 event, int cycles, 
 
     ASSERT(bytes_to_wrap == calc_bytes_to_wrap(buf));
 
-    if(rec_size > bytes_to_wrap)
+    if ( rec_size > bytes_to_wrap )
     {
         total_size += bytes_to_wrap;
         bytes_to_wrap = data_size;
@@ -464,7 +455,7 @@ void __trace_var(u32 event, int cycles, 
     total_size += rec_size;
 
     /* Do we have enough space for everything? */
-    if(total_size > bytes_to_tail) 
+    if ( total_size > bytes_to_tail )
     {
         this_cpu(lost_records)++;
         local_irq_restore(flags);
@@ -476,9 +467,9 @@ void __trace_var(u32 event, int cycles, 
      */
     bytes_to_wrap = calc_bytes_to_wrap(buf);
 
-    if(this_cpu(lost_records))
-    {
-        if(LOST_REC_SIZE > bytes_to_wrap)
+    if ( this_cpu(lost_records) )
+    {
+        if ( LOST_REC_SIZE > bytes_to_wrap )
         {
             insert_wrap_record(buf, LOST_REC_SIZE);
             bytes_to_wrap = data_size;
@@ -487,7 +478,7 @@ void __trace_var(u32 event, int cycles, 
         {
             bytes_to_wrap -= LOST_REC_SIZE;
             /* LOST_REC might line up perfectly with the buffer wrap */
-            if(bytes_to_wrap == 0)
+            if ( bytes_to_wrap == 0 )
                 bytes_to_wrap = data_size;
         }
         insert_lost_records(buf);
@@ -495,40 +486,18 @@ void __trace_var(u32 event, int cycles, 
 
     ASSERT(bytes_to_wrap == calc_bytes_to_wrap(buf));
 
-    if(rec_size > bytes_to_wrap)
-    {
+    if ( rec_size > bytes_to_wrap )
         insert_wrap_record(buf, rec_size);
-    } 
 
     /* Write the original record */
     __insert_record(buf, event, extra, cycles, rec_size, extra_data);
 
     local_irq_restore(flags);
 
-    /*
-     * Notify trace buffer consumer that we've crossed the high water mark.
-     *
-     */
+    /* Notify trace buffer consumer that we've crossed the high water mark. */
     if ( started_below_highwater
-         && ( (buf->prod - buf->cons) > t_buf_highwater ) )
+         && ((buf->prod - buf->cons) > t_buf_highwater) )
         raise_softirq(TRACE_SOFTIRQ);
-}
-
-
-void __trace_fixed(u32 event, unsigned long d1, unsigned long d2,
-           unsigned long d3, unsigned long d4, unsigned long d5)
-{
-    u32 extra_data[5];
-    
-    /* In a 64-bit hypervisor, this will truncate to 32 bits. */
-    extra_data[0]=d1;
-    extra_data[1]=d2;
-    extra_data[2]=d3;
-    extra_data[3]=d4;
-    extra_data[4]=d5;
-
-    __trace_var(event, 1/* include cycles */, sizeof(*extra_data)*5,
-              (unsigned char *)extra_data);
 }
 
 /*
diff -r c0d1825f5189 -r 1445de775a7c xen/include/public/trace.h
--- a/xen/include/public/trace.h        Mon Sep 24 13:44:29 2007 +0100
+++ b/xen/include/public/trace.h        Mon Sep 24 13:44:51 2007 +0100
@@ -103,23 +103,22 @@
 #define TRC_HVM_MMIO_ASSIST     (TRC_HVM_HANDLER + 0x17)
 #define TRC_HVM_CLTS            (TRC_HVM_HANDLER + 0x18)
 #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
-#define TRC_HVM_PF_XEN64       (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_PF_XEN64        (TRC_HVM_HANDLER + 0x20)
 
 /* This structure represents a single trace buffer record. */
 struct t_rec {
-    uint32_t header;           /* 31   : Cycles included?
-                                  30-28: # of extra words
-                                  27- 0: event ID                */
+    uint32_t event:28;
+    uint32_t extra_u32:3;         /* # entries in trailing extra_u32[] array */
+    uint32_t cycles_included:1;   /* u.cycles or u.no_cycles? */
     union {
         struct {
             uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
-            unsigned char data[28];        /* event data items        */
+            uint32_t extra_u32[7];         /* event data items */
         } cycles;
         struct {
-            unsigned char data[28];        /* event data items        */
+            uint32_t extra_u32[7];         /* event data items */
         } nocycles;
-    }u;
-        
+    } u;
 };
 
 /*
diff -r c0d1825f5189 -r 1445de775a7c xen/include/xen/trace.h
--- a/xen/include/xen/trace.h   Mon Sep 24 13:44:29 2007 +0100
+++ b/xen/include/xen/trace.h   Mon Sep 24 13:44:51 2007 +0100
@@ -33,17 +33,7 @@ void init_trace_bufs(void);
 /* used to retrieve the physical address of the trace buffers */
 int tb_control(struct xen_sysctl_tbuf_op *tbc);
 
-void __trace_fixed(u32 event, unsigned long d1, unsigned long d2,
-           unsigned long d3, unsigned long d4, unsigned long d5);
 void __trace_var(u32 event, int cycles, int extra, unsigned char *extra_data);
-
-static inline void trace_fixed(u32 event, unsigned long d1,
-                               unsigned long d2, unsigned long d3,
-                               unsigned long d4, unsigned long d5)
-{
-    if( unlikely(tb_init_done) )
-        __trace_fixed(event, d1, d2, d3, d4, d5);
-}
 
 static inline void trace_var(u32 event, int cycles, int extra,
                                unsigned char *extra_data)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.