[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.0-testing] [LINUX] Only trigger unhandled irq path if irq is not shared across
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 3a12ae277cbcde84400a9166705c5d7804c46bcf # Parent 1de04b7a58366cf659876239036bf2323eb0b3b3 [LINUX] Only trigger unhandled irq path if irq is not shared across multiple guests (another guest may have handled the interrupt). Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> xen-unstable changeset: 10298:6fb0d5ad63d74ef272d1f1754b9eabd83000577e xen-unstable date: Thu Jun 8 10:11:04 2006 +0100 --- linux-2.6-xen-sparse/drivers/xen/Kconfig | 4 linux-2.6-xen-sparse/drivers/xen/core/evtchn.c | 9 linux-2.6-xen-sparse/include/linux/interrupt.h | 301 +++++++++++++++++++++++++ linux-2.6-xen-sparse/kernel/irq/spurious.c | 206 +++++++++++++++++ 4 files changed, 520 insertions(+) diff -r 1de04b7a5836 -r 3a12ae277cbc linux-2.6-xen-sparse/drivers/xen/Kconfig --- a/linux-2.6-xen-sparse/drivers/xen/Kconfig Thu Jun 08 09:57:53 2006 +0100 +++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig Thu Jun 08 10:21:34 2006 +0100 @@ -216,6 +216,10 @@ config HAVE_ARCH_DEV_ALLOC_SKB bool default y +config HAVE_IRQ_IGNORE_UNHANDLED + bool + default y + config NO_IDLE_HZ bool default y diff -r 1de04b7a5836 -r 3a12ae277cbc linux-2.6-xen-sparse/drivers/xen/core/evtchn.c --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c Thu Jun 08 09:57:53 2006 +0100 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c Thu Jun 08 10:21:34 2006 +0100 @@ -669,6 +669,15 @@ static struct hw_interrupt_type pirq_typ set_affinity_irq }; +int irq_ignore_unhandled(unsigned int irq) +{ + physdev_op_t op; + op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; + op.u.irq_status_query.irq = irq; + (void)HYPERVISOR_physdev_op(&op); + return !!(op.u.irq_status_query.flags & PHYSDEVOP_IRQ_SHARED); +} + void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { int evtchn = evtchn_from_irq(i); diff -r 1de04b7a5836 -r 3a12ae277cbc linux-2.6-xen-sparse/include/linux/interrupt.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/linux-2.6-xen-sparse/include/linux/interrupt.h Thu Jun 08 10:21:34 2006 +0100 @@ -0,0 +1,301 @@ +/* interrupt.h */ +#ifndef _LINUX_INTERRUPT_H +#define _LINUX_INTERRUPT_H + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/linkage.h> +#include <linux/bitops.h> +#include <linux/preempt.h> +#include <linux/cpumask.h> +#include <linux/hardirq.h> +#include <linux/sched.h> +#include <asm/atomic.h> +#include <asm/ptrace.h> +#include <asm/system.h> + +/* + * For 2.4.x compatibility, 2.4.x can use + * + * typedef void irqreturn_t; + * #define IRQ_NONE + * #define IRQ_HANDLED + * #define IRQ_RETVAL(x) + * + * To mix old-style and new-style irq handler returns. + * + * IRQ_NONE means we didn't handle it. + * IRQ_HANDLED means that we did have a valid interrupt and handled it. + * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled) + */ +typedef int irqreturn_t; + +#define IRQ_NONE (0) +#define IRQ_HANDLED (1) +#define IRQ_RETVAL(x) ((x) != 0) + +struct irqaction { + irqreturn_t (*handler)(int, void *, struct pt_regs *); + unsigned long flags; + cpumask_t mask; + const char *name; + void *dev_id; + struct irqaction *next; + int irq; + struct proc_dir_entry *dir; +}; + +extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs); +extern int request_irq(unsigned int, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long, const char *, void *); +extern void free_irq(unsigned int, void *); + + +#ifdef CONFIG_GENERIC_HARDIRQS +extern void disable_irq_nosync(unsigned int irq); +extern void disable_irq(unsigned int irq); +extern void enable_irq(unsigned int irq); +#endif + +#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED +int irq_ignore_unhandled(unsigned int irq); +#else +#define irq_ignore_unhandled(irq) 0 +#endif + +#ifndef __ARCH_SET_SOFTIRQ_PENDING +#define set_softirq_pending(x) (local_softirq_pending() = (x)) +#define or_softirq_pending(x) (local_softirq_pending() |= (x)) +#endif + +/* + * Temporary defines for UP kernels, until all code gets fixed. + */ +#ifndef CONFIG_SMP +static inline void __deprecated cli(void) +{ + local_irq_disable(); +} +static inline void __deprecated sti(void) +{ + local_irq_enable(); +} +static inline void __deprecated save_flags(unsigned long *x) +{ + local_save_flags(*x); +} +#define save_flags(x) save_flags(&x) +static inline void __deprecated restore_flags(unsigned long x) +{ + local_irq_restore(x); +} + +static inline void __deprecated save_and_cli(unsigned long *x) +{ + local_irq_save(*x); +} +#define save_and_cli(x) save_and_cli(&x) +#endif /* CONFIG_SMP */ + +/* SoftIRQ primitives. */ +#define local_bh_disable() \ + do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) +#define __local_bh_enable() \ + do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) + +extern void local_bh_enable(void); + +/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high + frequency threaded job scheduling. For almost all the purposes + tasklets are more than enough. F.e. all serial device BHs et + al. should be converted to tasklets, not to softirqs. + */ + +enum +{ + HI_SOFTIRQ=0, + TIMER_SOFTIRQ, + NET_TX_SOFTIRQ, + NET_RX_SOFTIRQ, + BLOCK_SOFTIRQ, + TASKLET_SOFTIRQ +}; + +/* softirq mask and active fields moved to irq_cpustat_t in + * asm/hardirq.h to get better cache usage. KAO + */ + +struct softirq_action +{ + void (*action)(struct softirq_action *); + void *data; +}; + +asmlinkage void do_softirq(void); +extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); +extern void softirq_init(void); +#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); +extern void FASTCALL(raise_softirq(unsigned int nr)); + + +/* Tasklets --- multithreaded analogue of BHs. + + Main feature differing them of generic softirqs: tasklet + is running only on one CPU simultaneously. + + Main feature differing them of BHs: different tasklets + may be run simultaneously on different CPUs. + + Properties: + * If tasklet_schedule() is called, then tasklet is guaranteed + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its excecution is still not + started, it will be executed only once. + * If this tasklet is already running on another CPU (or schedule is called + from tasklet itself), it is rescheduled for later. + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. + */ + +struct tasklet_struct +{ + struct tasklet_struct *next; + unsigned long state; + atomic_t count; + void (*func)(unsigned long); + unsigned long data; +}; + +#define DECLARE_TASKLET(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + +#define DECLARE_TASKLET_DISABLED(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } + + +enum +{ + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ +}; + +#ifdef CONFIG_SMP +static inline int tasklet_trylock(struct tasklet_struct *t) +{ + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline void tasklet_unlock(struct tasklet_struct *t) +{ + smp_mb__before_clear_bit(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +} +#else +#define tasklet_trylock(t) 1 +#define tasklet_unlock_wait(t) do { } while (0) +#define tasklet_unlock(t) do { } while (0) +#endif + +extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); + +static inline void tasklet_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_schedule(t); +} + +extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); + +static inline void tasklet_hi_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule(t); +} + + +static inline void tasklet_disable_nosync(struct tasklet_struct *t) +{ + atomic_inc(&t->count); + smp_mb__after_atomic_inc(); +} + +static inline void tasklet_disable(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_wait(t); + smp_mb(); +} + +static inline void tasklet_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&t->count); +} + +static inline void tasklet_hi_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&t->count); +} + +extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); + +/* + * Autoprobing for irqs: + * + * probe_irq_on() and probe_irq_off() provide robust primitives + * for accurate IRQ probing during kernel initialization. They are + * reasonably simple to use, are not "fooled" by spurious interrupts, + * and, unlike other attempts at IRQ probing, they do not get hung on + * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). + * + * For reasonably foolproof probing, use them as follows: + * + * 1. clear and/or mask the device's internal interrupt. + * 2. sti(); + * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs + * 4. enable the device and cause it to trigger an interrupt. + * 5. wait for the device to interrupt, using non-intrusive polling or a delay. + * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple + * 7. service the device to clear its pending interrupt. + * 8. loop again if paranoia is required. + * + * probe_irq_on() returns a mask of allocated irq's. + * + * probe_irq_off() takes the mask as a parameter, + * and returns the irq number which occurred, + * or zero if none occurred, or a negative irq number + * if more than one irq occurred. + */ + +#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) +static inline unsigned long probe_irq_on(void) +{ + return 0; +} +static inline int probe_irq_off(unsigned long val) +{ + return 0; +} +static inline unsigned int probe_irq_mask(unsigned long val) +{ + return 0; +} +#else +extern unsigned long probe_irq_on(void); /* returns 0 on failure */ +extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ +extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ +#endif + +#endif diff -r 1de04b7a5836 -r 3a12ae277cbc linux-2.6-xen-sparse/kernel/irq/spurious.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/linux-2.6-xen-sparse/kernel/irq/spurious.c Thu Jun 08 10:21:34 2006 +0100 @@ -0,0 +1,206 @@ +/* + * linux/kernel/irq/spurious.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains spurious interrupt handling. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/interrupt.h> + +static int irqfixup; + +/* + * Recovery handler for misrouted interrupts. + */ + +static int misrouted_irq(int irq, struct pt_regs *regs) +{ + int i; + irq_desc_t *desc; + int ok = 0; + int work = 0; /* Did we do work for a real IRQ */ + + for(i = 1; i < NR_IRQS; i++) { + struct irqaction *action; + + if (i == irq) /* Already tried */ + continue; + desc = &irq_desc[i]; + spin_lock(&desc->lock); + action = desc->action; + /* Already running on another processor */ + if (desc->status & IRQ_INPROGRESS) { + /* + * Already running: If it is shared get the other + * CPU to go looking for our mystery interrupt too + */ + if (desc->action && (desc->action->flags & SA_SHIRQ)) + desc->status |= IRQ_PENDING; + spin_unlock(&desc->lock); + continue; + } + /* Honour the normal IRQ locking */ + desc->status |= IRQ_INPROGRESS; + spin_unlock(&desc->lock); + while (action) { + /* Only shared IRQ handlers are safe to call */ + if (action->flags & SA_SHIRQ) { + if (action->handler(i, action->dev_id, regs) == + IRQ_HANDLED) + ok = 1; + } + action = action->next; + } + local_irq_disable(); + /* Now clean up the flags */ + spin_lock(&desc->lock); + action = desc->action; + + /* + * While we were looking for a fixup someone queued a real + * IRQ clashing with our walk + */ + + while ((desc->status & IRQ_PENDING) && action) { + /* + * Perform real IRQ processing for the IRQ we deferred + */ + work = 1; + spin_unlock(&desc->lock); + handle_IRQ_event(i, regs, action); + spin_lock(&desc->lock); + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + /* + * If we did actual work for the real IRQ line we must let the + * IRQ controller clean up too + */ + if(work) + desc->handler->end(i); + spin_unlock(&desc->lock); + } + /* So the caller can adjust the irq error counts */ + return ok; +} + +/* + * If 99,900 of the previous 100,000 interrupts have not been handled + * then assume that the IRQ is stuck in some manner. Drop a diagnostic + * and try to turn the IRQ off. + * + * (The other 100-of-100,000 interrupts may have been a correctly + * functioning device sharing an IRQ with the failing one) + * + * Called under desc->lock + */ + +static void +__report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) +{ + struct irqaction *action; + + if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { + printk(KERN_ERR "irq event %d: bogus return value %x\n", + irq, action_ret); + } else { + printk(KERN_ERR "irq %d: nobody cared (try booting with " + "the \"irqpoll\" option)\n", irq); + } + dump_stack(); + printk(KERN_ERR "handlers:\n"); + action = desc->action; + while (action) { + printk(KERN_ERR "[<%p>]", action->handler); + print_symbol(" (%s)", + (unsigned long)action->handler); + printk("\n"); + action = action->next; + } +} + +static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) +{ + static int count = 100; + + if (count > 0) { + count--; + __report_bad_irq(irq, desc, action_ret); + } +} + +void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret, + struct pt_regs *regs) +{ + if (action_ret != IRQ_HANDLED) { + if (!irq_ignore_unhandled(irq)) + desc->irqs_unhandled++; + if (action_ret != IRQ_NONE) + report_bad_irq(irq, desc, action_ret); + } + + if (unlikely(irqfixup)) { + /* Don't punish working computers */ + if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { + int ok = misrouted_irq(irq, regs); + if (action_ret == IRQ_NONE) + desc->irqs_unhandled -= ok; + } + } + + desc->irq_count++; + if (desc->irq_count < 100000) + return; + + desc->irq_count = 0; + if (desc->irqs_unhandled > 99900) { + /* + * The interrupt is stuck + */ + __report_bad_irq(irq, desc, action_ret); + /* + * Now kill the IRQ + */ + printk(KERN_EMERG "Disabling IRQ #%d\n", irq); + desc->status |= IRQ_DISABLED; + desc->handler->disable(irq); + } + desc->irqs_unhandled = 0; +} + +int noirqdebug; + +int __init noirqdebug_setup(char *str) +{ + noirqdebug = 1; + printk(KERN_INFO "IRQ lockup detection disabled\n"); + return 1; +} + +__setup("noirqdebug", noirqdebug_setup); + +static int __init irqfixup_setup(char *str) +{ + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); + return 1; +} + +__setup("irqfixup", irqfixup_setup); + +static int __init irqpoll_setup(char *str) +{ + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); + printk(KERN_WARNING "This may significantly impact system " + "performance\n"); + return 1; +} + +__setup("irqpoll", irqpoll_setup); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |