[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen: do not loose level interrupt notifications
PV on HVM guests can loose level interrupts coming from emulated devices: we are missing code to retry to inject a pirq in the guest if it corresponds to a level interrupt and the interrupt has been raised while the guest is servicing the first one. The same thing could also happen with PV guests, including dom0, even though it is much more unlikely. In case of PV guests the scenario would be the following: 1) a device raises a level interrupt and xen injects it into the guest; 2) the guest is temporarely stuck: it does not ack it or eoi it; 3) the xen timer kicks in and eois the interrupt; 4) the device thinks it is all fine and sends a second interrupt; 5) Xen fails to inject the second interrupt into the guest because the guest has still the event channel pending bit set; at this point the guest looses the second interrupt notification, that is not supposed to happen with level interrupts and it might cause problems with some devices. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> diff -r bf533533046c xen/arch/x86/hvm/irq.c --- a/xen/arch/x86/hvm/irq.c Fri Sep 30 14:12:35 2011 +0000 +++ b/xen/arch/x86/hvm/irq.c Mon Oct 03 16:54:51 2011 +0000 @@ -36,7 +36,8 @@ static void assert_gsi(struct domain *d, if ( hvm_domain_use_pirq(d, pirq) ) { - send_guest_pirq(d, pirq); + if ( send_guest_pirq(d, pirq) && ioapic_gsi >= NR_ISAIRQS ) + pirq->lost++; return; } vioapic_irq_positive_edge(d, ioapic_gsi); @@ -63,6 +64,7 @@ static void __hvm_pci_intx_assert( { struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; unsigned int gsi, link, isa_irq; + struct pirq *pirq; ASSERT((device <= 31) && (intx <= 3)); @@ -72,6 +74,11 @@ static void __hvm_pci_intx_assert( gsi = hvm_pci_intx_gsi(device, intx); if ( hvm_irq->gsi_assert_count[gsi]++ == 0 ) assert_gsi(d, gsi); + else { + pirq = pirq_info(d, domain_emuirq_to_pirq(d, gsi)); + if ( hvm_domain_use_pirq(d, pirq) ) + pirq->lost++; + } link = hvm_pci_intx_link(device, intx); isa_irq = hvm_irq->pci_link.route[link]; diff -r bf533533046c xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Fri Sep 30 14:12:35 2011 +0000 +++ b/xen/arch/x86/irq.c Mon Oct 03 16:54:51 2011 +0000 @@ -965,7 +965,11 @@ static void __do_IRQ_guest(int irq) !test_and_set_bool(pirq->masked) ) action->in_flight++; if ( !hvm_do_IRQ_dpci(d, pirq) ) - send_guest_pirq(d, pirq); + { + if ( send_guest_pirq(d, pirq) && + action->ack_type == ACKTYPE_EOI ) + pirq->lost++; + } } if ( action->ack_type != ACKTYPE_NONE ) diff -r bf533533046c xen/arch/x86/physdev.c --- a/xen/arch/x86/physdev.c Fri Sep 30 14:12:35 2011 +0000 +++ b/xen/arch/x86/physdev.c Mon Oct 03 16:54:51 2011 +0000 @@ -11,6 +11,7 @@ #include <asm/current.h> #include <asm/io_apic.h> #include <asm/msi.h> +#include <asm/hvm/irq.h> #include <asm/hypercall.h> #include <public/xen.h> #include <public/physdev.h> @@ -270,6 +271,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H if ( !is_hvm_domain(v->domain) || domain_pirq_to_irq(v->domain, eoi.irq) > 0 ) pirq_guest_eoi(pirq); + if ( pirq->lost > 0) { + if ( !send_guest_pirq(v->domain, pirq) ) + pirq->lost--; + } spin_unlock(&v->domain->event_lock); ret = 0; break; @@ -328,9 +333,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H break; irq_status_query.flags = 0; if ( is_hvm_domain(v->domain) && - domain_pirq_to_irq(v->domain, irq) <= 0 ) + domain_pirq_to_irq(v->domain, irq) <= 0 && + domain_pirq_to_emuirq(v->domain, irq) == IRQ_UNBOUND ) { - ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0; + ret = -EINVAL; break; } diff -r bf533533046c xen/include/xen/irq.h --- a/xen/include/xen/irq.h Fri Sep 30 14:12:35 2011 +0000 +++ b/xen/include/xen/irq.h Mon Oct 03 16:54:51 2011 +0000 @@ -146,6 +146,7 @@ struct pirq { int pirq; u16 evtchn; bool_t masked; + u32 lost; struct rcu_head rcu_head; struct arch_pirq arch; }; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |