[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [XEN] Fix pirq_guest_unbind(). Remove unnecessary code
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID 0bb18319b8a0ee957ea9307ac3d9e40c31b456d4 # Parent af704c33a9a49b58f4546d00b8d742b08f4e5a51 [XEN] Fix pirq_guest_unbind(). Remove unnecessary code (in fact flush_all_pending_eoi() was broken!). Based on a patch from Steven Rostedt <srostedt@xxxxxxxxxx> Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/irq.c | 87 ++++++----------------------------------------------- 1 files changed, 10 insertions(+), 77 deletions(-) diff -r af704c33a9a4 -r 0bb18319b8a0 xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Tue Aug 22 18:51:02 2006 +0100 +++ b/xen/arch/x86/irq.c Wed Aug 23 11:27:06 2006 +0100 @@ -260,40 +260,6 @@ static void set_eoi_ready(void *data) spin_lock(&desc->lock); __set_eoi_ready(desc); spin_unlock(&desc->lock); - - flush_ready_eoi(NULL); -} - -/* - * Forcibly flush all pending EOIs on this CPU by emulating end-of-ISR - * notifications from guests. The caller of this function must ensure that - * all CPUs execute flush_ready_eoi(). - */ -static void flush_all_pending_eoi(void *unused) -{ - irq_desc_t *desc; - irq_guest_action_t *action; - struct pending_eoi *peoi = this_cpu(pending_eoi); - int i, vector, sp; - - ASSERT(!local_irq_is_enabled()); - - sp = pending_eoi_sp(peoi); - while ( --sp >= 0 ) - { - if ( peoi[sp].ready ) - continue; - vector = peoi[sp].vector; - desc = &irq_desc[vector]; - spin_lock(&desc->lock); - action = (irq_guest_action_t *)desc->action; - ASSERT(action->ack_type == ACKTYPE_EOI); - ASSERT(desc->status & IRQ_GUEST); - for ( i = 0; i < action->nr_guests; i++ ) - clear_bit(vector_to_irq(vector), action->guest[i]->pirq_mask); - action->in_flight = 0; - spin_unlock(&desc->lock); - } flush_ready_eoi(NULL); } @@ -566,6 +532,10 @@ int pirq_guest_unbind(struct domain *d, break; } + /* + * The guest cannot re-bind to this IRQ until this function returns. So, + * when we have flushed this IRQ from pirq_mask, it should remain flushed. + */ BUG_ON(test_bit(irq, d->pirq_mask)); if ( action->nr_guests != 0 ) @@ -579,17 +549,18 @@ int pirq_guest_unbind(struct domain *d, desc->handler->disable(vector); /* - * We may have a EOI languishing anywhere in one of the per-CPU - * EOI stacks. Forcibly flush the stack on every CPU where this might - * be the case. + * Mark any remaining pending EOIs as ready to flush. + * NOTE: We will need to make this a stronger barrier if in future we allow + * an interrupt vectors to be re-bound to a different PIC. In that case we + * would need to flush all ready EOIs before returning as otherwise the + * desc->handler could change and we would call the wrong 'end' hook. */ cpu_eoi_map = action->cpu_eoi_map; if ( !cpus_empty(cpu_eoi_map) ) { BUG_ON(action->ack_type != ACKTYPE_EOI); spin_unlock_irqrestore(&desc->lock, flags); - on_selected_cpus(cpu_eoi_map, flush_all_pending_eoi, NULL, 1, 1); - on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 1); + on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1); spin_lock_irqsave(&desc->lock, flags); } @@ -672,41 +643,3 @@ static int __init setup_dump_irqs(void) return 0; } __initcall(setup_dump_irqs); - -static DEFINE_PER_CPU(struct timer, end_irq_timer); - -/* - * force_intack: Forcibly emit all pending EOIs on each CPU every second. - * Mainly useful for debugging or poking lazy guests ISRs. - */ - -static void end_irq_timeout(void *unused) -{ - local_irq_disable(); - flush_all_pending_eoi(NULL); - local_irq_enable(); - - on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 0); - - set_timer(&this_cpu(end_irq_timer), NOW() + MILLISECS(1000)); -} - -static int force_intack; -boolean_param("force_intack", force_intack); - -static int __init setup_irq_timeout(void) -{ - unsigned int cpu; - - if ( !force_intack ) - return 0; - - for_each_online_cpu ( cpu ) - { - init_timer(&per_cpu(end_irq_timer, cpu), end_irq_timeout, NULL, cpu); - set_timer(&per_cpu(end_irq_timer, cpu), NOW() + MILLISECS(1000)); - } - - return 0; -} -__initcall(setup_irq_timeout); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |