[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] cpuidle: redirect some hpet lock users to a new cpumask_lock



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1276761170 -3600
# Node ID d0ad966b3d1a8653e2d147baf2843f2ca5ea7aed
# Parent  47655a695312af43c1dd163b22ab4adb02553f60
cpuidle: redirect some hpet lock users to a new cpumask_lock

The hpet channel lock was also used for prevent handle_hpet_broadcast
access other cpu's timer_deadline_start/end after other cpu was
already waken up.
This purpose can be approached via a standalone lock to remove much
spins on the hpet channel lock.

Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>
---
 xen/arch/x86/hpet.c |   11 +++++++----
 1 files changed, 7 insertions(+), 4 deletions(-)

diff -r 47655a695312 -r d0ad966b3d1a xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Thu Jun 17 08:52:29 2010 +0100
+++ b/xen/arch/x86/hpet.c       Thu Jun 17 08:52:50 2010 +0100
@@ -34,6 +34,7 @@ struct hpet_event_channel
     int           shift;
     s_time_t      next_event;
     cpumask_t     cpumask;
+    spinlock_t    cpumask_lock;
     spinlock_t    lock;
     void          (*event_handler)(struct hpet_event_channel *);
 
@@ -196,7 +197,7 @@ again:
     /* find all expired events */
     for_each_cpu_mask(cpu, ch->cpumask)
     {
-        spin_lock_irq(&ch->lock);
+        spin_lock_irq(&ch->cpumask_lock);
 
         if ( cpumask_test_cpu(cpu, ch->cpumask) )
         {
@@ -206,7 +207,7 @@ again:
                 next_event = per_cpu(timer_deadline_end, cpu);
         }
 
-        spin_unlock_irq(&ch->lock);
+        spin_unlock_irq(&ch->cpumask_lock);
     }
 
     /* wakeup the cpus which have an expired event. */
@@ -579,6 +580,7 @@ void hpet_broadcast_init(void)
             hpet_events[i].next_event = STIME_MAX;
             hpet_events[i].event_handler = handle_hpet_broadcast;
             spin_lock_init(&hpet_events[i].lock);
+            spin_lock_init(&hpet_events[i].cpumask_lock);
         }
 
         return;
@@ -613,6 +615,7 @@ void hpet_broadcast_init(void)
     legacy_hpet_event.idx = 0;
     legacy_hpet_event.flags = 0;
     spin_lock_init(&legacy_hpet_event.lock);
+    spin_lock_init(&legacy_hpet_event.cpumask_lock);
 
     if ( !force_hpet_broadcast )
         pv_rtc_handler = handle_rtc_once;
@@ -689,9 +692,9 @@ void hpet_broadcast_exit(void)
     if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
         raise_softirq(TIMER_SOFTIRQ);
 
-    spin_lock_irq(&ch->lock);
+    spin_lock_irq(&ch->cpumask_lock);
     cpu_clear(cpu, ch->cpumask);
-    spin_unlock_irq(&ch->lock);
+    spin_unlock_irq(&ch->cpumask_lock);
 
     if ( ch != &legacy_hpet_event )
     {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.