[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 4/5] xentrace: Use xc_cpumask_t when setting the cpu mask (v4)



On 06/04/2014 02:44 PM, Konrad Rzeszutek Wilk wrote:
Instead of using an uint32_t. This requires us using the
xc_tbuf_set_cpu_mask_array API call that can handle variable
sized uint8_t arrays.

As this is just swapping over to use the new API, the existing
shortcomming when using -c is still present: it can only do up
to 32 CPUs.

But if '-c' has not been specified it will construct an CPU mask
for the full amount of physical CPUs.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
[v2: Changes per Boris's and Daniel's review]
[v3,v4: Change per Boris's review]

Looks good (saving the reviewed-by because I think this should probably be merged into patch 2).

 -George

---
  tools/xentrace/xentrace.8 |  3 ++
  tools/xentrace/xentrace.c | 98 +++++++++++++++++++++++++++++++++++++++--------
  2 files changed, 86 insertions(+), 15 deletions(-)

diff --git a/tools/xentrace/xentrace.8 b/tools/xentrace/xentrace.8
index ac18e9f..c176a96 100644
--- a/tools/xentrace/xentrace.8
+++ b/tools/xentrace/xentrace.8
@@ -38,6 +38,9 @@ for new data.
  .TP
  .B -c, --cpu-mask=c
  set bitmask of CPUs to trace. It is limited to 32-bits.
+If not specified, the cpu-mask of all of the available CPUs will be
+constructed.
+
  .TP
  .B -e, --evt-mask=e
  set event capture mask. If not specified the TRC_ALL will be used.
diff --git a/tools/xentrace/xentrace.c b/tools/xentrace/xentrace.c
index 8a38e32..2063ae8 100644
--- a/tools/xentrace/xentrace.c
+++ b/tools/xentrace/xentrace.c
@@ -30,6 +30,7 @@
  #include <xen/trace.h>

  #include <xenctrl.h>
+#include "xc_bitops.h"

  #define PERROR(_m, _a...)                                       \
  do {                                                            \
@@ -52,7 +53,7 @@ typedef struct settings_st {
      char *outfile;
      unsigned long poll_sleep; /* milliseconds to sleep between polls */
      uint32_t evt_mask;
-    uint32_t cpu_mask;
+    xc_cpumap_t cpu_mask;
      unsigned long tbuf_size;
      unsigned long disk_rsvd;
      unsigned long timeout;
@@ -521,23 +522,66 @@ static struct t_struct *map_tbufs(unsigned long 
tbufs_mfn, unsigned int num,
      return &tbufs;
  }

+void print_cpu_mask(xc_cpumap_t mask, int bits)
+{
+    unsigned int v, had_printed = 0;
+    int i;
+
+    fprintf(stderr, "change cpumask to 0x");
+
+    for ( i = DIV_ROUND_UP(bits, 8); i >= 0; i-- )
+    {
+        v = mask[i];
+        if ( v || had_printed ) {
+            fprintf(stderr,"%x", v);
+            had_printed = 1;
+        }
+   }
+   fprintf(stderr, "\n");
+}
+
+static void set_cpu_mask(xc_cpumap_t mask)
+{
+    int bits, i, ret = 0;
+
+    bits = xc_get_max_cpus(xc_handle);
+    if ( bits <= 0 )
+        goto out;
+
+    if ( !mask )
+    {
+        mask = xc_cpumap_alloc(xc_handle);
+        if ( !mask )
+            goto out;
+
+        /* Set it to include _all_ CPUs. */
+        for ( i = 0; i < DIV_ROUND_UP(bits, 8); i++ )
+            mask[i] = 0xff;
+    }
+    /* And this will limit it to the exact amount of bits. */
+    ret = xc_tbuf_set_cpu_mask_array(xc_handle, mask, bits);
+    if ( ret != 0 )
+        goto out;
+
+    print_cpu_mask(mask, bits);
+    return;
+out:
+    PERROR("Failure to get trace buffer pointer from Xen and set the new 
mask");
+    exit(EXIT_FAILURE);
+}
+
  /**
- * set_mask - set the cpu/event mask in HV
+ * set_mask - set the event mask in HV
   * @mask:           the new mask
   * @type:           the new mask type,0-event mask, 1-cpu mask
   *
   */
-static void set_mask(uint32_t mask, int type)
+static void set_evt_mask(uint32_t mask)
  {
      int ret = 0;

-    if (type == 1) {
-        ret = xc_tbuf_set_cpu_mask(xc_handle, mask);
-        fprintf(stderr, "change cpumask to 0x%x\n", mask);
-    } else if (type == 0) {
-        ret = xc_tbuf_set_evt_mask(xc_handle, mask);
-        fprintf(stderr, "change evtmask to 0x%x\n", mask);
-    }
+    ret = xc_tbuf_set_evt_mask(xc_handle, mask);
+    fprintf(stderr, "change evtmask to 0x%x\n", mask);

      if ( ret != 0 )
      {
@@ -906,6 +950,23 @@ static int parse_evtmask(char *arg)
      return 0;
  }

+static int parse_cpumask(const char *arg)
+{
+    xc_cpumap_t map;
+    uint32_t v, i;
+
+    map = malloc(sizeof(uint32_t));
+    if ( !map )
+        return -ENOMEM;
+
+    v = argtol(arg, 0);
+    for ( i = 0; i < sizeof(uint32_t); i++ )
+        map[i] = (v >> (i * 8)) & 0xff;
+
+    opts.cpu_mask = map;
+    return 0;
+}
+
  /* parse command line arguments */
  static void parse_args(int argc, char **argv)
  {
@@ -937,7 +998,12 @@ static void parse_args(int argc, char **argv)
              break;

          case 'c': /* set new cpu mask for filtering*/
-            opts.cpu_mask = argtol(optarg, 0);
+            /* Set opts.cpu_mask later as we don't have 'xc_handle' set yet. */
+            if ( parse_cpumask(optarg) )
+            {
+                perror("Not enough memory!");
+                exit(EXIT_FAILURE);
+            }
              break;

          case 'e': /* set new event mask for filtering*/
@@ -1002,7 +1068,7 @@ int main(int argc, char **argv)
      opts.outfile = 0;
      opts.poll_sleep = POLL_SLEEP_MILLIS;
      opts.evt_mask = 0;
-    opts.cpu_mask = 0;
+    opts.cpu_mask = NULL;
      opts.disk_rsvd = 0;
      opts.disable_tracing = 1;
      opts.start_disabled = 0;
@@ -1018,10 +1084,12 @@ int main(int argc, char **argv)
      }

      if ( opts.evt_mask != 0 )
-        set_mask(opts.evt_mask, 0);
+        set_evt_mask(opts.evt_mask);
+

-    if ( opts.cpu_mask != 0 )
-        set_mask(opts.cpu_mask, 1);
+    set_cpu_mask(opts.cpu_mask);
+    /* We don't use it pass this point. */
+    free(opts.cpu_mask);

      if ( opts.timeout != 0 )
          alarm(opts.timeout);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.