[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 06/15] xen: trace IRQ enabling/disabling



Trace when interrupts are disabled and (re)enabled.
Basically, we replace the IRQ disabling and enabling
functions with helpers that does the same, but also
output the proper trace record.

For putting in the record something that will let
us identify _where_ in the code (i.e., in what function)
the IRQ manipulation is happening, use either:
 - current_text_addr(),
 - or __builtin_return_address(0).

In fact, depending on whether the disabling/enabling
happens in macros (like for local_irq_disable() and
local_irq_enable()) or in actual functions (like in
spin_lock_irq*()), it is either:
 - the actual content of the instruction pointer when
   IRQ are disabled/enabled,
 - or the return address of the utility function where
   IRQ are disabled/enabled,
that will tell us what it is the actual piece of code
that is asking for the IRQ manipulation operation.

Gate this with its specific Kconfig option, and keep
it in disabled state by default (i.e., don't build it,
if not explicitly specified), as the impact on
performance may be non negligible.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jennifer Herbert <jennifer.herbert@xxxxxxxxxx>
---
 xen/Kconfig.debug                  |   11 ++++-
 xen/common/spinlock.c              |   16 +++++--
 xen/common/trace.c                 |   10 +++-
 xen/include/asm-arm/arm32/system.h |   12 +++++
 xen/include/asm-arm/arm64/system.h |   12 +++++
 xen/include/asm-x86/system.h       |   85 ++++++++++++++++++++++++++++++++++--
 xen/include/public/trace.h         |    2 +
 xen/include/xen/rwlock.h           |   33 +++++++++++---
 8 files changed, 161 insertions(+), 20 deletions(-)

diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 374c1c0..81910c9 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -98,7 +98,7 @@ config PERF_ARRAYS
        ---help---
          Enables software performance counter array histograms.
 
-config TRACING
+menuconfig TRACING
        bool "Tracing"
        default y
        ---help---
@@ -106,6 +106,15 @@ config TRACING
          in per-CPU ring buffers. The 'xentrace' tool can be used to read
          the buffers and dump the content on the disk.
 
+config TRACE_IRQSOFF
+       bool "Trace when IRQs are disabled and (re)enabled" if EXPERT = "y"
+       default n
+       depends on TRACING
+       ---help---
+         Makes it possible to generate events _every_ time IRQs are disabled
+          and (re)enabled, with also an indication of where that happened.
+          Note that this comes with high overead and produces huge amount of
+          tracing data.
 
 config VERBOSE_DEBUG
        bool "Verbose debug messages"
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 2a06406..33b903e 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -150,7 +150,9 @@ void _spin_lock(spinlock_t *lock)
 void _spin_lock_irq(spinlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
-    local_irq_disable();
+    _local_irq_disable();
+    if ( unlikely(tb_init_done) )
+        trace_irq_disable_ret();
     _spin_lock(lock);
 }
 
@@ -158,7 +160,9 @@ unsigned long _spin_lock_irqsave(spinlock_t *lock)
 {
     unsigned long flags;
 
-    local_irq_save(flags);
+    _local_irq_save(flags);
+    if ( unlikely(tb_init_done) )
+        trace_irq_save_ret(flags);
     _spin_lock(lock);
     return flags;
 }
@@ -175,13 +179,17 @@ void _spin_unlock(spinlock_t *lock)
 void _spin_unlock_irq(spinlock_t *lock)
 {
     _spin_unlock(lock);
-    local_irq_enable();
+    if ( unlikely(tb_init_done) )
+        trace_irq_enable_ret();
+    _local_irq_enable();
 }
 
 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 {
     _spin_unlock(lock);
-    local_irq_restore(flags);
+    if ( unlikely(tb_init_done) )
+        trace_irq_restore_ret(flags);
+    _local_irq_restore(flags);
 }
 
 int _spin_is_locked(spinlock_t *lock)
diff --git a/xen/common/trace.c b/xen/common/trace.c
index 2c18462..71202df 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -722,7 +722,12 @@ void __trace_var(u32 event, bool_t cycles, unsigned int 
extra,
     /* Read tb_init_done /before/ t_bufs. */
     smp_rmb();
 
-    spin_lock_irqsave(&this_cpu(t_lock), flags);
+    /*
+     * spin_lock_irqsave() would call local_irq_save(), which (may)
+     * call __trace_var(). Open code it to avoid recursing.
+     */
+    _local_irq_save(flags);
+    spin_lock(&this_cpu(t_lock));
 
     buf = this_cpu(t_bufs);
 
@@ -809,7 +814,8 @@ void __trace_var(u32 event, bool_t cycles, unsigned int 
extra,
     __insert_record(buf, event, extra, cycles, rec_size, extra_data);
 
 unlock:
-    spin_unlock_irqrestore(&this_cpu(t_lock), flags);
+    spin_unlock(&this_cpu(t_lock));
+    _local_irq_restore(flags);
 
     /* Notify trace buffer consumer that we've crossed the high water mark. */
     if ( likely(buf!=NULL)
diff --git a/xen/include/asm-arm/arm32/system.h 
b/xen/include/asm-arm/arm32/system.h
index c617b40..20871ad 100644
--- a/xen/include/asm-arm/arm32/system.h
+++ b/xen/include/asm-arm/arm32/system.h
@@ -4,6 +4,8 @@
 
 #include <asm/arm32/cmpxchg.h>
 
+#include <xen/trace.h>
+
 #define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : 
: "cc" )
 #define local_irq_enable()  asm volatile ( "cpsie i @ local_irq_enable\n" : : 
: "cc" )
 
@@ -41,6 +43,16 @@ static inline int local_irq_is_enabled(void)
 #define local_abort_enable() __asm__("cpsie a  @ __sta\n" : : : "memory", "cc")
 #define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc")
 
+/* We do not support tracing (at all) yet */
+#define trace_irq_disable_ret()   do { } while ( 0 )
+#define trace_irq_enable_ret()    do { } while ( 0 )
+#define trace_irq_save_ret(_x)    do { } while ( 0 )
+#define trace_irq_restore_ret(_x) do { } while ( 0 )
+#define _local_irq_disable()      local_irq_disable()
+#define _local_irq_enable()       local_irq_enable()
+#define _local_irq_save(_x)       local_irq_save(_x)
+#define _local_irq_restore(_x)    local_irq_restore(_x)
+
 static inline int local_fiq_is_enabled(void)
 {
     unsigned long flags;
diff --git a/xen/include/asm-arm/arm64/system.h 
b/xen/include/asm-arm/arm64/system.h
index 2e2ee21..6603b0c 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -4,6 +4,8 @@
 
 #include <asm/arm64/cmpxchg.h>
 
+#include <xen/trace.h>
+
 /* Uses uimm4 as a bitmask to select the clearing of one or more of
  * the DAIF exception mask bits:
  * bit 3 selects the D mask,
@@ -44,6 +46,16 @@
         : "memory");                                             \
 })
 
+/* We do not support tracing (at all) yet */
+#define trace_irq_disable_ret()   do { } while ( 0 )
+#define trace_irq_enable_ret()    do { } while ( 0 )
+#define trace_irq_save_ret(_x)    do { } while ( 0 )
+#define trace_irq_restore_ret(_x) do { } while ( 0 )
+#define _local_irq_disable()      local_irq_disable()
+#define _local_irq_enable()       local_irq_enable()
+#define _local_irq_save(_x)       local_irq_save(_x)
+#define _local_irq_restore(_x)    local_irq_restore(_x)
+
 static inline int local_irq_is_enabled(void)
 {
     unsigned long flags;
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index eb498f5..0e7bf01 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -5,6 +5,8 @@
 #include <xen/bitops.h>
 #include <asm/processor.h>
 
+#include <xen/trace.h>
+
 #define read_sreg(name)                                         \
 ({  unsigned int __sel;                                         \
     asm volatile ( "mov %%" STR(name) ",%0" : "=r" (__sel) );   \
@@ -185,8 +187,8 @@ static always_inline unsigned long __xadd(
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
-#define local_irq_disable()     asm volatile ( "cli" : : : "memory" )
-#define local_irq_enable()      asm volatile ( "sti" : : : "memory" )
+#define _local_irq_disable()    asm volatile ( "cli" : : : "memory" )
+#define _local_irq_enable()     asm volatile ( "sti" : : : "memory" )
 
 /* used in the idle loop; sti takes one instruction cycle to complete */
 #define safe_halt()     asm volatile ( "sti; hlt" : : : "memory" )
@@ -198,12 +200,12 @@ static always_inline unsigned long __xadd(
     BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
     asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \
 })
-#define local_irq_save(x)                                        \
+#define _local_irq_save(x)                                       \
 ({                                                               \
     local_save_flags(x);                                         \
-    local_irq_disable();                                         \
+    _local_irq_disable();                                        \
 })
-#define local_irq_restore(x)                                     \
+#define _local_irq_restore(x)                                    \
 ({                                                               \
     BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
     asm volatile ( "pushfq\n\t"                                  \
@@ -214,6 +216,79 @@ static always_inline unsigned long __xadd(
                        "ri" ( (x) & X86_EFLAGS_IF ) );           \
 })
 
+#ifdef CONFIG_TRACE_IRQSOFF
+
+#define TRACE_LOCAL_ADDR ((uint64_t) current_text_addr())
+#define TRACE_RET_ADDR   ((uint64_t) __builtin_return_address(0))
+
+#define trace_irq_disable(_a)                                    \
+({                                                               \
+    uint64_t addr = _a;                                          \
+    __trace_var(TRC_HW_IRQ_DISABLE, 1, sizeof(addr), &addr);     \
+})
+#define trace_irq_enable(_a)                                     \
+({                                                               \
+    uint64_t addr = _a;                                          \
+    __trace_var(TRC_HW_IRQ_ENABLE, 1, sizeof(addr), &addr);      \
+})
+#define trace_irq_save(_x, _a)                                   \
+({                                                               \
+    uint64_t addr = _a;                                          \
+    if ( _x & X86_EFLAGS_IF )                                    \
+        __trace_var(TRC_HW_IRQ_DISABLE, 1, sizeof(addr), &addr); \
+})
+#define trace_irq_restore(_x, _a)                                \
+({                                                               \
+    uint64_t addr = _a;                                          \
+    if ( _x & X86_EFLAGS_IF )                                    \
+        __trace_var(TRC_HW_IRQ_ENABLE, 1, sizeof(addr), &addr);  \
+})
+
+#define trace_irq_disable_ret()   trace_irq_disable(TRACE_RET_ADDR)
+#define trace_irq_enable_ret()    trace_irq_enable(TRACE_RET_ADDR)
+#define trace_irq_save_ret(_x)    trace_irq_save(_x, TRACE_RET_ADDR)
+#define trace_irq_restore_ret(_x) trace_irq_restore(_x, TRACE_RET_ADDR)
+
+#define local_irq_disable()                      \
+({                                               \
+    bool_t irqon = local_irq_is_enabled();       \
+    _local_irq_disable();                        \
+    if ( unlikely(tb_init_done && irqon) )       \
+        trace_irq_disable(TRACE_LOCAL_ADDR);     \
+})
+
+#define local_irq_enable()                       \
+({                                               \
+    if ( unlikely(tb_init_done) )                \
+        trace_irq_enable(TRACE_LOCAL_ADDR);      \
+    _local_irq_enable();                         \
+})
+
+#define local_irq_save(_x)                       \
+({                                               \
+    local_save_flags(_x);                        \
+    _local_irq_disable();                        \
+    if ( unlikely(tb_init_done) )                \
+        trace_irq_save(_x, TRACE_LOCAL_ADDR);    \
+})
+
+#define local_irq_restore(_x)                    \
+({                                               \
+    if ( unlikely(tb_init_done) )                \
+        trace_irq_restore(_x, TRACE_LOCAL_ADDR); \
+    _local_irq_restore(_x);                      \
+})
+#else /* !TRACE_IRQSOFF */
+#define trace_irq_disable_ret()   do { } while ( 0 )
+#define trace_irq_enable_ret()    do { } while ( 0 )
+#define trace_irq_save_ret(_x)    do { } while ( 0 )
+#define trace_irq_restore_ret(_x) do { } while ( 0 )
+#define local_irq_disable()       _local_irq_disable()
+#define local_irq_enable()        _local_irq_enable()
+#define local_irq_save(_x)        _local_irq_save(_x)
+#define local_irq_restore(_x)     _local_irq_restore(_x)
+#endif /* TRACE_IRQSOFF */
+
 static inline int local_irq_is_enabled(void)
 {
     unsigned long flags;
diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h
index f66a7af..1692a79 100644
--- a/xen/include/public/trace.h
+++ b/xen/include/public/trace.h
@@ -275,6 +275,8 @@
 #define TRC_HW_IRQ_ENTER              (TRC_HW_IRQ + 0xA)
 #define TRC_HW_IRQ_GUEST              (TRC_HW_IRQ + 0xB)
 #define TRC_HW_IRQ_EXIT               (TRC_HW_IRQ + 0xC)
+#define TRC_HW_IRQ_DISABLE            (TRC_HW_IRQ + 0xD)
+#define TRC_HW_IRQ_ENABLE             (TRC_HW_IRQ + 0xE)
 
 /*
  * Event Flags
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 35657c5..04f50e5 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -73,14 +73,19 @@ static inline void _read_lock(rwlock_t *lock)
 static inline void _read_lock_irq(rwlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
-    local_irq_disable();
+    _local_irq_disable();
+    if ( unlikely(tb_init_done) )
+        trace_irq_disable_ret();
     _read_lock(lock);
 }
 
 static inline unsigned long _read_lock_irqsave(rwlock_t *lock)
 {
     unsigned long flags;
-    local_irq_save(flags);
+
+    _local_irq_save(flags);
+    if ( unlikely(tb_init_done) )
+        trace_irq_save_ret(flags);
     _read_lock(lock);
     return flags;
 }
@@ -100,13 +105,17 @@ static inline void _read_unlock(rwlock_t *lock)
 static inline void _read_unlock_irq(rwlock_t *lock)
 {
     _read_unlock(lock);
-    local_irq_enable();
+    if ( unlikely(tb_init_done) )
+        trace_irq_enable_ret();
+    _local_irq_enable();
 }
 
 static inline void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 {
     _read_unlock(lock);
-    local_irq_restore(flags);
+    if ( unlikely(tb_init_done) )
+        trace_irq_restore_ret(flags);
+    _local_irq_restore(flags);
 }
 
 static inline int _rw_is_locked(rwlock_t *lock)
@@ -130,7 +139,9 @@ static inline void _write_lock(rwlock_t *lock)
 static inline void _write_lock_irq(rwlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
-    local_irq_disable();
+    _local_irq_disable();
+    if ( unlikely(tb_init_done) )
+        trace_irq_disable_ret();
     _write_lock(lock);
 }
 
@@ -138,7 +149,9 @@ static inline unsigned long _write_lock_irqsave(rwlock_t 
*lock)
 {
     unsigned long flags;
 
-    local_irq_save(flags);
+    _local_irq_save(flags);
+    if ( unlikely(tb_init_done) )
+        trace_irq_save_ret(flags);
     _write_lock(lock);
     return flags;
 }
@@ -171,13 +184,17 @@ static inline void _write_unlock(rwlock_t *lock)
 static inline void _write_unlock_irq(rwlock_t *lock)
 {
     _write_unlock(lock);
-    local_irq_enable();
+    if ( unlikely(tb_init_done) )
+        trace_irq_enable_ret();
+    _local_irq_enable();
 }
 
 static inline void _write_unlock_irqrestore(rwlock_t *lock, unsigned long 
flags)
 {
     _write_unlock(lock);
-    local_irq_restore(flags);
+    if ( unlikely(tb_init_done) )
+        trace_irq_restore_ret(flags);
+    _local_irq_restore(flags);
 }
 
 static inline int _rw_is_write_locked(rwlock_t *lock)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.