[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] timer: Ensure that CPU field of a timer is read safely when lock-free.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1294481384 0
# Node ID 0e49e25904623af4b03b614c10c54fce77f05909
# Parent  533d6e5c0099ede05f6db7c505b3599d9d5b35a8
timer: Ensure that CPU field of a timer is read safely when lock-free.

Firstly, all updates must use atomic_write16(), and lock-free reads
must use atomic_read16(). Secondly, we ensure ->cpu is the only field
accessed without a lock. This requires us to place a special sentinel
value in that field when a timer is killed, to avoid needing to read
->status outside a locked critical section.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---
 xen/common/timer.c      |   33 +++++++++++++++++----------------
 xen/include/xen/timer.h |    1 +
 2 files changed, 18 insertions(+), 16 deletions(-)

diff -r 533d6e5c0099 -r 0e49e2590462 xen/common/timer.c
--- a/xen/common/timer.c        Sat Jan 08 10:05:55 2011 +0000
+++ b/xen/common/timer.c        Sat Jan 08 10:09:44 2011 +0000
@@ -23,6 +23,7 @@
 #include <xen/symbols.h>
 #include <asm/system.h>
 #include <asm/desc.h>
+#include <asm/atomic.h>
 
 /* We program the time hardware this far behind the closest deadline. */
 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
@@ -238,15 +239,14 @@ static inline bool_t timer_lock(struct t
 
     for ( ; ; )
     {
-        cpu = timer->cpu;
-        if ( unlikely(timer->status == TIMER_STATUS_killed) )
+        cpu = atomic_read16(&timer->cpu);
+        if ( unlikely(cpu == TIMER_CPU_status_killed) )
         {
             rcu_read_unlock(&timer_cpu_read_lock);
             return 0;
         }
         spin_lock(&per_cpu(timers, cpu).lock);
-        if ( likely(timer->cpu == cpu) &&
-             likely(timer->status != TIMER_STATUS_killed) )
+        if ( likely(timer->cpu == cpu) )
             break;
         spin_unlock(&per_cpu(timers, cpu).lock);
     }
@@ -292,7 +292,7 @@ void init_timer(
     memset(timer, 0, sizeof(*timer));
     timer->function = function;
     timer->data = data;
-    timer->cpu = cpu;
+    atomic_write16(&timer->cpu, cpu);
     timer->status = TIMER_STATUS_inactive;
     if ( !timer_lock_irqsave(timer, flags) )
         BUG();
@@ -335,7 +335,7 @@ void stop_timer(struct timer *timer)
 
 void migrate_timer(struct timer *timer, unsigned int new_cpu)
 {
-    int old_cpu;
+    unsigned int old_cpu;
     bool_t active;
     unsigned long flags;
 
@@ -343,8 +343,8 @@ void migrate_timer(struct timer *timer, 
 
     for ( ; ; )
     {
-        if ( ((old_cpu = timer->cpu) == new_cpu) ||
-             unlikely(timer->status == TIMER_STATUS_killed) )
+        old_cpu = atomic_read16(&timer->cpu);
+        if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
         {
             rcu_read_unlock(&timer_cpu_read_lock);
             return;
@@ -361,8 +361,7 @@ void migrate_timer(struct timer *timer, 
             spin_lock(&per_cpu(timers, old_cpu).lock);
         }
 
-        if ( likely(timer->cpu == old_cpu) &&
-             likely(timer->status != TIMER_STATUS_killed) )
+        if ( likely(timer->cpu == old_cpu) )
              break;
 
         spin_unlock(&per_cpu(timers, old_cpu).lock);
@@ -376,7 +375,7 @@ void migrate_timer(struct timer *timer, 
         deactivate_timer(timer);
 
     list_del(&timer->inactive);
-    timer->cpu = new_cpu;
+    atomic_write16(&timer->cpu, new_cpu);
     list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
 
     if ( active )
@@ -389,7 +388,7 @@ void migrate_timer(struct timer *timer, 
 
 void kill_timer(struct timer *timer)
 {
-    int           cpu;
+    unsigned int old_cpu, cpu;
     unsigned long flags;
 
     BUG_ON(this_cpu(timers).running == timer);
@@ -402,8 +401,10 @@ void kill_timer(struct timer *timer)
 
     list_del(&timer->inactive);
     timer->status = TIMER_STATUS_killed;
-
-    timer_unlock_irqrestore(timer, flags);
+    old_cpu = timer->cpu;
+    atomic_write16(&timer->cpu, TIMER_CPU_status_killed);
+
+    spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
 
     for_each_online_cpu ( cpu )
         while ( per_cpu(timers, cpu).running == timer )
@@ -561,7 +562,7 @@ static void migrate_timers_from_cpu(unsi
     while ( (t = GET_HEAP_SIZE(ts->heap) ? ts->heap[1] : ts->list) != NULL )
     {
         remove_entry(t);
-        t->cpu = 0;
+        atomic_write16(&t->cpu, 0);
         notify |= add_entry(t);
     }
 
@@ -569,7 +570,7 @@ static void migrate_timers_from_cpu(unsi
     {
         t = list_entry(ts->inactive.next, struct timer, inactive);
         list_del(&t->inactive);
-        t->cpu = 0;
+        atomic_write16(&t->cpu, 0);
         list_add(&t->inactive, &per_cpu(timers, 0).inactive);
     }
 
diff -r 533d6e5c0099 -r 0e49e2590462 xen/include/xen/timer.h
--- a/xen/include/xen/timer.h   Sat Jan 08 10:05:55 2011 +0000
+++ b/xen/include/xen/timer.h   Sat Jan 08 10:09:44 2011 +0000
@@ -32,6 +32,7 @@ struct timer {
     void *data;
 
     /* CPU on which this timer will be installed and executed. */
+#define TIMER_CPU_status_killed 0xffffu /* Timer is TIMER_STATUS_killed */
     uint16_t cpu;
 
     /* Timer status. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.