[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] IRQ/evtchn: adjust affinity handling


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Wed, 26 Sep 2012 09:44:04 +0000
  • Delivery-date: Wed, 26 Sep 2012 09:45:11 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1348652183 -7200
# Node ID f91e59ea970843355ab682d41c2181c1d5ec8850
# Parent  a752ff67b472662f15883f179e554cc64a87b272
IRQ/evtchn: adjust affinity handling

Virtually forever, Xen code diverged from native in the way affinities
of IRQs got managed: Native, even if restricting handling of an IRQ to
a single CPU e.g. because of APIC ID constraints, it would still keep
the affinity set to all permitted CPUs. Xen instead restricted the
affinity along with the handling. Retain that behavior only for per-CPU
IRQs (and for other dynamic ones on their initial setup, albeit perhaps
event that is still too strict), but make physical ones (and dynamic
ones if their affinity gets adjusted an the fly) match native behavior.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r a752ff67b472 -r f91e59ea9708 drivers/xen/core/evtchn.c
--- a/drivers/xen/core/evtchn.c Wed Sep 26 11:35:01 2012 +0200
+++ b/drivers/xen/core/evtchn.c Wed Sep 26 11:36:23 2012 +0200
@@ -137,21 +137,28 @@ static inline unsigned long active_evtch
                ~sh->evtchn_mask[idx]);
 }
 
-static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+static void _bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu, int irq,
+                               cpumask_t cpumask)
 {
        shared_info_t *s = HYPERVISOR_shared_info;
-       int irq = evtchn_to_irq[chn];
 
        BUG_ON(!test_bit(chn, s->evtchn_mask));
 
-       if (irq != -1)
-               set_native_irq_info(irq, cpumask_of_cpu(cpu));
+       if (irq >= 0) {
+               BUG_ON(!cpu_isset(cpu, cpumask));
+               set_native_irq_info(irq, cpumask);
+       }
 
        clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
        set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
        cpu_evtchn[chn] = cpu;
 }
 
+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+{
+       _bind_evtchn_to_cpu(chn, cpu, evtchn_to_irq[chn], cpumask_of_cpu(cpu));
+}
+
 static void init_evtchn_cpu_bindings(void)
 {
        int i;
@@ -180,6 +187,11 @@ static inline unsigned long active_evtch
        return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
 }
 
+static void _bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu, int irq,
+                               cpumask_t cpumask)
+{
+}
+
 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 {
 }
@@ -663,30 +675,32 @@ void unbind_from_irqhandler(unsigned int
 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
 
 #ifdef CONFIG_SMP
-void rebind_evtchn_to_cpu(int port, unsigned int cpu)
+static void _rebind_evtchn_to_cpu(int port, unsigned int cpu, int irq,
+                                 cpumask_t dest)
 {
        struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
        int masked;
 
        masked = test_and_set_evtchn_mask(port);
        if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
-               bind_evtchn_to_cpu(port, cpu);
+               _bind_evtchn_to_cpu(port, cpu, irq, dest);
        if (!masked)
                unmask_evtchn(port);
 }
 
-static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
+void rebind_evtchn_to_cpu(int port, unsigned int cpu)
 {
-       int evtchn = evtchn_from_irq(irq);
-
-       if (VALID_EVTCHN(evtchn))
-               rebind_evtchn_to_cpu(evtchn, tcpu);
+       _rebind_evtchn_to_cpu(port, cpu, evtchn_to_irq[port],
+                             cpumask_of_cpu(cpu));
 }
 
 static void set_affinity_irq(unsigned int irq, cpumask_t dest)
 {
+       int evtchn = evtchn_from_irq(irq);
        unsigned tcpu = first_cpu(dest);
-       rebind_irq_to_cpu(irq, tcpu);
+
+       if (VALID_EVTCHN(evtchn))
+               _rebind_evtchn_to_cpu(evtchn, tcpu, irq, dest);
 }
 #endif
 
@@ -854,7 +868,7 @@ static unsigned int startup_pirq(unsigne
        pirq_query_unmask(irq);
 
        evtchn_to_irq[evtchn] = irq;
-       bind_evtchn_to_cpu(evtchn, 0);
+       _bind_evtchn_to_cpu(evtchn, 0, irq, cpu_possible_map);
        irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
 
  out:
@@ -1019,7 +1033,7 @@ static void restore_cpu_virqs(unsigned i
                /* Record the new mapping. */
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-               bind_evtchn_to_cpu(evtchn, cpu);
+               _bind_evtchn_to_cpu(evtchn, cpu, -1, CPU_MASK_NONE);
 
                /* Ready for use. */
                unmask_evtchn(evtchn);
@@ -1047,7 +1061,7 @@ static void restore_cpu_ipis(unsigned in
                /* Record the new mapping. */
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-               bind_evtchn_to_cpu(evtchn, cpu);
+               _bind_evtchn_to_cpu(evtchn, cpu, -1, CPU_MASK_NONE);
 
                /* Ready for use. */
                unmask_evtchn(evtchn);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.