[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] xen/events: avoid using info_for_irq() in xen_send_IPI_one()



xen_send_IPI_one() is being used by cpuhp_report_idle_dead() after
it calls rcu_report_dead(), meaning that any RCU usage by
xen_send_IPI_one() is a bad idea.

Unfortunately xen_send_IPI_one() is using notify_remote_via_irq()
today, which is using irq_get_chip_data() via info_for_irq(). And
irq_get_chip_data() in turn is using a maple-tree lookup requiring
RCU.

Avoid this problem by caching the ipi event channels in another
percpu variable, allowing the use notify_remote_via_evtchn() in
xen_send_IPI_one().

Fixes: 721255b9826b ("genirq: Use a maple tree for interrupt descriptor 
management")
Reported-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Tested-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
---
 drivers/xen/events/events_base.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1b2136fe0fa5..2cf0c2b69386 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 
... NR_VIRQS-1] = -1};
 
 /* IRQ <-> IPI mapping */
 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] 
= -1};
+/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). 
*/
+static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... 
XEN_NR_IPIS-1] = 0};
 
 /* Event channel distribution data */
 static atomic_t channels_on_cpu[NR_CPUS];
@@ -366,6 +368,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
        info->u.ipi = ipi;
 
        per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+       per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
 
        return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
 }
@@ -981,6 +984,7 @@ static void __unbind_from_irq(unsigned int irq)
                        break;
                case IRQT_IPI:
                        per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
+                       per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
                        break;
                case IRQT_EVTCHN:
                        dev = info->u.interdomain;
@@ -1631,7 +1635,7 @@ EXPORT_SYMBOL_GPL(evtchn_put);
 
 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
 {
-       int irq;
+       evtchn_port_t evtchn;
 
 #ifdef CONFIG_X86
        if (unlikely(vector == XEN_NMI_VECTOR)) {
@@ -1642,9 +1646,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector 
vector)
                return;
        }
 #endif
-       irq = per_cpu(ipi_to_irq, cpu)[vector];
-       BUG_ON(irq < 0);
-       notify_remote_via_irq(irq);
+       evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
+       BUG_ON(evtchn == 0);
+       notify_remote_via_evtchn(evtchn);
 }
 
 struct evtchn_loop_ctrl {
-- 
2.35.3




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.