[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xentrace: handle sparse cpu ids correctly in xen trace buffer handling



The per-cpu buffers for Xentrace are addressed by cpu-id, but the info
array for the buffers is sized only by number of online cpus. This
might lead to crashes when using Xentrace with smt=0.

The t_info structure has to be sized based on nr_cpu_ids.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 tools/xentrace/xentrace.c |  2 +-
 xen/common/trace.c        | 14 +++++++-------
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/tools/xentrace/xentrace.c b/tools/xentrace/xentrace.c
index 364a6fdad5..b008342df7 100644
--- a/tools/xentrace/xentrace.c
+++ b/tools/xentrace/xentrace.c
@@ -594,7 +594,7 @@ static unsigned int get_num_cpus(void)
         exit(EXIT_FAILURE);
     }
 
-    return physinfo.nr_cpus;
+    return physinfo.max_cpu_id + 1;
 }
 
 /**
diff --git a/xen/common/trace.c b/xen/common/trace.c
index 8cdc17b731..c1d999267a 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -113,7 +113,7 @@ static int calculate_tbuf_size(unsigned int pages, uint16_t 
t_info_first_offset)
     struct t_info dummy_pages;
     typeof(dummy_pages.tbuf_size) max_pages;
     typeof(dummy_pages.mfn_offset[0]) max_mfn_offset;
-    unsigned int max_cpus = num_online_cpus();
+    unsigned int max_cpus = nr_cpu_ids;
     unsigned int t_info_words;
 
     /* force maximum value for an unsigned type */
@@ -151,11 +151,11 @@ static int calculate_tbuf_size(unsigned int pages, 
uint16_t t_info_first_offset)
      * NB this calculation is correct, because t_info_first_offset is
      * in words, not bytes, not bytes
      */
-    t_info_words = num_online_cpus() * pages + t_info_first_offset;
+    t_info_words = nr_cpu_ids * pages + t_info_first_offset;
     t_info_pages = PFN_UP(t_info_words * sizeof(uint32_t));
     printk(XENLOG_INFO "xentrace: requesting %u t_info pages "
            "for %u trace pages on %u cpus\n",
-           t_info_pages, pages, num_online_cpus());
+           t_info_pages, pages, nr_cpu_ids);
     return pages;
 }
 
@@ -202,7 +202,7 @@ static int alloc_trace_bufs(unsigned int pages)
      * Allocate buffers for all of the cpus.
      * If any fails, deallocate what you have so far and exit.
      */
-    for_each_online_cpu(cpu)
+    for_each_present_cpu(cpu)
     {
         offset = t_info_first_offset + (cpu * pages);
         t_info->mfn_offset[cpu] = offset;
@@ -224,7 +224,7 @@ static int alloc_trace_bufs(unsigned int pages)
     /*
      * Initialize buffers for all of the cpus.
      */
-    for_each_online_cpu(cpu)
+    for_each_present_cpu(cpu)
     {
         struct t_buf *buf;
 
@@ -261,7 +261,7 @@ static int alloc_trace_bufs(unsigned int pages)
     return 0;
 
 out_dealloc:
-    for_each_online_cpu(cpu)
+    for_each_present_cpu(cpu)
     {
         offset = t_info->mfn_offset[cpu];
         if ( !offset )
@@ -418,7 +418,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc)
         /* Clear any lost-record info so we don't get phantom lost records 
next time we
          * start tracing.  Grab the lock to make sure we're not racing anyone. 
 After this
          * hypercall returns, no more records should be placed into the 
buffers. */
-        for_each_online_cpu(i)
+        for_each_present_cpu(i)
         {
             unsigned long flags;
             spin_lock_irqsave(&per_cpu(t_lock, i), flags);
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.