[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] trace: insert compiler memory barriers



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1278314658 -3600
# Node ID 9074d50d09358cd8349d54c7ab2e2ead81fa1570
# Parent  f483b5ce7be235494156fee164decd73e0472cb7
trace: insert compiler memory barriers

This is to ensure fields shared writably with Dom0 get read only once
for any consistency checking followed by actual calculations.

I realized there was another multiple-read issue, a fix for which is
also included (which at once simplifies __insert_record()).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/common/trace.c |   52 +++++++++++++++++++++++++++-------------------------
 1 files changed, 27 insertions(+), 25 deletions(-)

diff -r f483b5ce7be2 -r 9074d50d0935 xen/common/trace.c
--- a/xen/common/trace.c        Fri Jul 02 19:04:57 2010 +0100
+++ b/xen/common/trace.c        Mon Jul 05 08:24:18 2010 +0100
@@ -459,11 +459,13 @@ static inline u32 calc_unconsumed_bytes(
 static inline u32 calc_unconsumed_bytes(const struct t_buf *buf)
 {
     u32 prod = buf->prod, cons = buf->cons;
-    s32 x = prod - cons;
-
+    s32 x;
+
+    barrier(); /* must read buf->prod and buf->cons only once */
     if ( bogus(prod, cons) )
         return data_size;
 
+    x = prod - cons;
     if ( x < 0 )
         x += 2*data_size;
 
@@ -475,12 +477,14 @@ static inline u32 calc_unconsumed_bytes(
 
 static inline u32 calc_bytes_to_wrap(const struct t_buf *buf)
 {
-    u32 prod = buf->prod;
-    s32 x = data_size - prod;
-
-    if ( bogus(prod, buf->cons) )
+    u32 prod = buf->prod, cons = buf->cons;
+    s32 x;
+
+    barrier(); /* must read buf->prod and buf->cons only once */
+    if ( bogus(prod, cons) )
         return 0;
 
+    x = data_size - prod;
     if ( x <= 0 )
         x += data_size;
 
@@ -495,11 +499,14 @@ static inline u32 calc_bytes_avail(const
     return data_size - calc_unconsumed_bytes(buf);
 }
 
-static inline struct t_rec *next_record(const struct t_buf *buf)
-{
-    u32 x = buf->prod;
-
-    if ( !tb_init_done || bogus(x, buf->cons) )
+static inline struct t_rec *next_record(const struct t_buf *buf,
+                                        uint32_t *next)
+{
+    u32 x = buf->prod, cons = buf->cons;
+
+    barrier(); /* must read buf->prod and buf->cons only once */
+    *next = x;
+    if ( !tb_init_done || bogus(x, cons) )
         return NULL;
 
     if ( x >= data_size )
@@ -526,23 +533,21 @@ static inline void __insert_record(struc
     BUG_ON(local_rec_size != rec_size);
     BUG_ON(extra & 3);
 
+    rec = next_record(buf, &next);
+    if ( !rec )
+        return;
     /* Double-check once more that we have enough space.
      * Don't bugcheck here, in case the userland tool is doing
      * something stupid. */
-    next = calc_bytes_avail(buf);
-    if ( next < rec_size )
+    if ( (unsigned char *)rec + rec_size > this_cpu(t_data) + data_size )
     {
         if ( printk_ratelimit() )
             printk(XENLOG_WARNING
-                   "%s: avail=%u (size=%08x prod=%08x cons=%08x) rec=%u\n",
-                   __func__, next, data_size, buf->prod, buf->cons, rec_size);
-        return;
-    }
-    rmb();
-
-    rec = next_record(buf);
-    if ( !rec )
-        return;
+                   "%s: size=%08x prod=%08x cons=%08x rec=%u\n",
+                   __func__, data_size, next, buf->cons, rec_size);
+        return;
+    }
+
     rec->event = event;
     rec->extra_u32 = extra_word;
     dst = (unsigned char *)rec->u.nocycles.extra_u32;
@@ -559,9 +564,6 @@ static inline void __insert_record(struc
 
     wmb();
 
-    next = buf->prod;
-    if ( bogus(next, buf->cons) )
-        return;
     next += rec_size;
     if ( next >= 2*data_size )
         next -= 2*data_size;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.