[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/HPET: deal with event having expired while interrupt was masked



commit da4ae08504d12290c5193d6c2cd33c20a41e1b0d
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Mar 20 16:57:04 2013 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Mar 20 16:57:04 2013 +0100

    x86/HPET: deal with event having expired while interrupt was masked
    
    Commit 2d8a282 ("x86/HPET: fix FSB interrupt masking") may cause the
    HPET event to occur while its interrupt is masked. In that case we need
    to "manually" deliver the event.
    
    Unfortunately this requires the locking to be changed: For one, it was
    always bogus for handle_hpet_broadcast() to use spin_unlock_irq() - the
    function is being called from an interrupt handler, and hence shouldn't
    blindly re-enable interrupts (this should be left up to the generic
    interrupt handling code). And with the event handler wanting to acquire
    the lock for two of its code regions, we must not enter it with the
    lock already held. Hence move the locking into
    hpet_{attach,detach}_channel(), permitting the lock to be dropped by
    set_channel_irq_affinity() (which is a tail call of those functions).
    
    Reported-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Tested-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/hpet.c |   53 ++++++++++++++++++++++++++------------------------
 1 files changed, 28 insertions(+), 25 deletions(-)

diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 9b3d83b..1404196 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -171,13 +171,14 @@ static void handle_hpet_broadcast(struct 
hpet_event_channel *ch)
     cpumask_t mask;
     s_time_t now, next_event;
     unsigned int cpu;
+    unsigned long flags;
 
-    spin_lock_irq(&ch->lock);
+    spin_lock_irqsave(&ch->lock, flags);
 
 again:
     ch->next_event = STIME_MAX;
 
-    spin_unlock_irq(&ch->lock);
+    spin_unlock_irqrestore(&ch->lock, flags);
 
     next_event = STIME_MAX;
     cpumask_clear(&mask);
@@ -205,13 +206,13 @@ again:
 
     if ( next_event != STIME_MAX )
     {
-        spin_lock_irq(&ch->lock);
+        spin_lock_irqsave(&ch->lock, flags);
 
         if ( next_event < ch->next_event &&
              reprogram_hpet_evt_channel(ch, next_event, now, 0) )
             goto again;
 
-        spin_unlock_irq(&ch->lock);
+        spin_unlock_irqrestore(&ch->lock, flags);
     }
 }
 
@@ -460,7 +461,7 @@ static struct hpet_event_channel *hpet_get_channel(unsigned 
int cpu)
     return ch;
 }
 
-static void set_channel_irq_affinity(const struct hpet_event_channel *ch)
+static void set_channel_irq_affinity(struct hpet_event_channel *ch)
 {
     struct irq_desc *desc = irq_to_desc(ch->msi.irq);
 
@@ -470,12 +471,19 @@ static void set_channel_irq_affinity(const struct 
hpet_event_channel *ch)
     hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
     hpet_msi_unmask(desc);
     spin_unlock(&desc->lock);
+
+    spin_unlock(&ch->lock);
+
+    /* We may have missed an interrupt due to the temporary masking. */
+    if ( ch->event_handler && ch->next_event < NOW() )
+        ch->event_handler(ch);
 }
 
 static void hpet_attach_channel(unsigned int cpu,
                                 struct hpet_event_channel *ch)
 {
-    ASSERT(spin_is_locked(&ch->lock));
+    ASSERT(!local_irq_is_enabled());
+    spin_lock(&ch->lock);
 
     per_cpu(cpu_bc_channel, cpu) = ch;
 
@@ -484,31 +492,34 @@ static void hpet_attach_channel(unsigned int cpu,
         ch->cpu = cpu;
 
     if ( ch->cpu != cpu )
-        return;
-
-    set_channel_irq_affinity(ch);
+        spin_unlock(&ch->lock);
+    else
+        set_channel_irq_affinity(ch);
 }
 
 static void hpet_detach_channel(unsigned int cpu,
                                 struct hpet_event_channel *ch)
 {
-    ASSERT(spin_is_locked(&ch->lock));
+    spin_lock_irq(&ch->lock);
+
     ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
 
     per_cpu(cpu_bc_channel, cpu) = NULL;
 
     if ( cpu != ch->cpu )
-        return;
-
-    if ( cpumask_empty(ch->cpumask) )
+        spin_unlock_irq(&ch->lock);
+    else if ( cpumask_empty(ch->cpumask) )
     {
         ch->cpu = -1;
         clear_bit(HPET_EVT_USED_BIT, &ch->flags);
-        return;
+        spin_unlock_irq(&ch->lock);
+    }
+    else
+    {
+        ch->cpu = cpumask_first(ch->cpumask);
+        set_channel_irq_affinity(ch);
+        local_irq_enable();
     }
-
-    ch->cpu = cpumask_first(ch->cpumask);
-    set_channel_irq_affinity(ch);
 }
 
 #include <asm/mc146818rtc.h>
@@ -686,11 +697,7 @@ void hpet_broadcast_enter(void)
     ASSERT(!local_irq_is_enabled());
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
-    {
-        spin_lock(&ch->lock);
         hpet_attach_channel(cpu, ch);
-        spin_unlock(&ch->lock);
-    }
 
     /* Disable LAPIC timer interrupts. */
     disable_APIC_timer();
@@ -722,11 +729,7 @@ void hpet_broadcast_exit(void)
     cpumask_clear_cpu(cpu, ch->cpumask);
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
-    {
-        spin_lock_irq(&ch->lock);
         hpet_detach_channel(cpu, ch);
-        spin_unlock_irq(&ch->lock);
-    }
 }
 
 int hpet_broadcast_is_available(void)
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.