[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Remove MSI IRQ storms prevention logic
# HG changeset patch # User Shan Haitao <haitao.shan@xxxxxxxxx> # Date 1310803939 -3600 # Node ID 4cbf5bc0265c369ef1af183343d8f5d44fa1ef56 # Parent 7effe4eacf2139d4a3c86afd25d59c9cb8a82aeb Remove MSI IRQ storms prevention logic The reason is: 1. The logic has negative impact on 10G NIC performance (assigned to guest) by lowering the interrupt frequency that Xen can handle. 2. Xen already has IRQ rate limit logic, which can also help to prevent IRQ storms. Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx> --- diff -r 7effe4eacf21 -r 4cbf5bc0265c xen/arch/x86/hvm/vlapic.c --- a/xen/arch/x86/hvm/vlapic.c Sat Jul 16 09:09:46 2011 +0100 +++ b/xen/arch/x86/hvm/vlapic.c Sat Jul 16 09:12:19 2011 +0100 @@ -400,8 +400,6 @@ if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) ) vioapic_update_EOI(vlapic_domain(vlapic), vector); - - hvm_dpci_msi_eoi(current->domain, vector); } int vlapic_ipi( diff -r 7effe4eacf21 -r 4cbf5bc0265c xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Sat Jul 16 09:09:46 2011 +0100 +++ b/xen/arch/x86/irq.c Sat Jul 16 09:12:19 2011 +0100 @@ -790,22 +790,6 @@ clear_bit(irq, d->arch.pv_domain.pirq_eoi_map); } -static void _irq_guest_eoi(struct irq_desc *desc) -{ - irq_guest_action_t *action = (irq_guest_action_t *)desc->action; - unsigned int i, irq = desc - irq_desc; - - if ( !(desc->status & IRQ_GUEST_EOI_PENDING) ) - return; - - for ( i = 0; i < action->nr_guests; ++i ) - clear_pirq_eoi(action->guest[i], - domain_irq_to_pirq(action->guest[i], irq)); - - desc->status &= ~(IRQ_INPROGRESS|IRQ_GUEST_EOI_PENDING); - desc->handler->enable(irq); -} - static void set_eoi_ready(void *data); static void irq_guest_eoi_timer_fn(void *data) @@ -849,9 +833,6 @@ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0); spin_lock_irq(&desc->lock); break; - case ACKTYPE_NONE: - _irq_guest_eoi(desc); - break; } out: @@ -863,7 +844,7 @@ struct irq_desc *desc = irq_to_desc(irq); irq_guest_action_t *action = (irq_guest_action_t *)desc->action; struct domain *d; - int i, sp, already_pending = 0; + int i, sp; struct pending_eoi *peoi = this_cpu(pending_eoi); int vector = get_irq_regs()->entry_vector; @@ -897,45 +878,16 @@ if ( (action->ack_type != ACKTYPE_NONE) && !test_and_set_bool(pirq->masked) ) action->in_flight++; - if ( hvm_do_IRQ_dpci(d, pirq) ) - { - if ( action->ack_type == ACKTYPE_NONE ) - { - already_pending += !!(desc->status & IRQ_INPROGRESS); - desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */ - } - } - else if ( send_guest_pirq(d, pirq) && - (action->ack_type == ACKTYPE_NONE) ) - { - already_pending++; - } + if ( !hvm_do_IRQ_dpci(d, pirq) ) + send_guest_pirq(d, pirq); } - stop_timer(&action->eoi_timer); - - if ( (action->ack_type == ACKTYPE_NONE) && - (already_pending == action->nr_guests) ) + if ( action->ack_type != ACKTYPE_NONE ) { - desc->handler->disable(irq); - desc->status |= IRQ_GUEST_EOI_PENDING; - for ( i = 0; i < already_pending; ++i ) - { - d = action->guest[i]; - set_pirq_eoi(d, domain_irq_to_pirq(d, irq)); - /* - * Could check here whether the guest unmasked the event by now - * (or perhaps just re-issue the send_guest_pirq()), and if it - * can now accept the event, - * - clear all the pirq_eoi bits we already set, - * - re-enable the vector, and - * - skip the timer setup below. - */ - } + stop_timer(&action->eoi_timer); + migrate_timer(&action->eoi_timer, smp_processor_id()); + set_timer(&action->eoi_timer, NOW() + MILLISECS(1)); } - - migrate_timer(&action->eoi_timer, smp_processor_id()); - set_timer(&action->eoi_timer, NOW() + MILLISECS(1)); } /* @@ -1183,13 +1135,6 @@ action = (irq_guest_action_t *)desc->action; irq = desc - irq_desc; - if ( action->ack_type == ACKTYPE_NONE ) - { - ASSERT(!pirq->masked); - stop_timer(&action->eoi_timer); - _irq_guest_eoi(desc); - } - if ( unlikely(!test_and_clear_bool(pirq->masked)) || unlikely(--action->in_flight != 0) ) { @@ -1468,10 +1413,6 @@ spin_lock_irq(&desc->lock); } break; - case ACKTYPE_NONE: - stop_timer(&action->eoi_timer); - _irq_guest_eoi(desc); - break; } /* @@ -1509,7 +1450,7 @@ BUG_ON(!cpus_empty(action->cpu_eoi_map)); desc->action = NULL; - desc->status &= ~(IRQ_GUEST|IRQ_GUEST_EOI_PENDING|IRQ_INPROGRESS); + desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS); desc->handler->shutdown(irq); /* Caller frees the old guest descriptor block. */ diff -r 7effe4eacf21 -r 4cbf5bc0265c xen/drivers/passthrough/io.c --- a/xen/drivers/passthrough/io.c Sat Jul 16 09:09:46 2011 +0100 +++ b/xen/drivers/passthrough/io.c Sat Jul 16 09:12:19 2011 +0100 @@ -421,58 +421,6 @@ } #ifdef SUPPORT_MSI_REMAPPING -/* called with d->event_lock held */ -static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci) -{ - irq_desc_t *desc; - - if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) && - (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) ) - { - struct pirq *pirq = dpci_pirq(pirq_dpci); - - BUG_ON(!local_irq_is_enabled()); - desc = pirq_spin_lock_irq_desc(pirq, NULL); - if ( !desc ) - return; - - desc->status &= ~IRQ_INPROGRESS; - desc_guest_eoi(desc, pirq); - } -} - -static int _hvm_dpci_msi_eoi(struct domain *d, - struct hvm_pirq_dpci *pirq_dpci, void *arg) -{ - int vector = (long)arg; - - if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) && - (pirq_dpci->gmsi.gvec == vector) ) - { - int dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK; - int dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK); - - if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, - dest_mode) ) - { - __msi_pirq_eoi(pirq_dpci); - return 1; - } - } - - return 0; -} - -void hvm_dpci_msi_eoi(struct domain *d, int vector) -{ - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) - return; - - spin_lock(&d->event_lock); - pt_pirq_iterate(d, _hvm_dpci_msi_eoi, (void *)(long)vector); - spin_unlock(&d->event_lock); -} - static int hvm_pci_msi_assert(struct domain *d, struct hvm_pirq_dpci *pirq_dpci) { @@ -510,14 +458,6 @@ else hvm_pci_intx_assert(d, device, intx); pirq_dpci->pending++; - -#ifdef SUPPORT_MSI_REMAPPING - if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE ) - { - /* for translated MSI to INTx interrupt, eoi as early as possible */ - __msi_pirq_eoi(pirq_dpci); - } -#endif } /* diff -r 7effe4eacf21 -r 4cbf5bc0265c xen/include/asm-x86/hvm/io.h --- a/xen/include/asm-x86/hvm/io.h Sat Jul 16 09:09:46 2011 +0100 +++ b/xen/include/asm-x86/hvm/io.h Sat Jul 16 09:12:19 2011 +0100 @@ -139,6 +139,5 @@ void stdvga_init(struct domain *d); void stdvga_deinit(struct domain *d); -extern void hvm_dpci_msi_eoi(struct domain *d, int vector); #endif /* __ASM_X86_HVM_IO_H__ */ diff -r 7effe4eacf21 -r 4cbf5bc0265c xen/include/xen/irq.h --- a/xen/include/xen/irq.h Sat Jul 16 09:09:46 2011 +0100 +++ b/xen/include/xen/irq.h Sat Jul 16 09:12:19 2011 +0100 @@ -25,7 +25,6 @@ #define IRQ_PENDING 4 /* IRQ pending - replay on enable */ #define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ #define IRQ_GUEST 16 /* IRQ is handled by guest OS(es) */ -#define IRQ_GUEST_EOI_PENDING 32 /* IRQ was disabled, pending a guest EOI */ #define IRQ_MOVE_PENDING 64 /* IRQ is migrating to another CPUs */ #define IRQ_PER_CPU 256 /* IRQ is per CPU */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |