[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Cset exclude: kaf24@xxxxxxxxxxxxxxxxxxxx|ChangeSet|20050510144837|42684



ChangeSet 1.1432, 2005/05/10 17:35:45+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Cset exclude: kaf24@xxxxxxxxxxxxxxxxxxxx|ChangeSet|20050510144837|42684



 arch/ia64/irq.c                                   |   23 +
 arch/x86/acpi/boot.c                              |   42 +
 arch/x86/io_apic.c                                |  493 ++++++++++++++++++++--
 arch/x86/irq.c                                    |   62 +-
 arch/x86/physdev.c                                |   11 
 include/asm-x86/io_apic.h                         |   38 +
 include/asm-x86/mach-default/irq_vectors_limits.h |   15 
 include/xen/irq.h                                 |    1 
 8 files changed, 608 insertions(+), 77 deletions(-)


diff -Nru a/xen/arch/ia64/irq.c b/xen/arch/ia64/irq.c
--- a/xen/arch/ia64/irq.c       2005-05-10 13:03:58 -04:00
+++ b/xen/arch/ia64/irq.c       2005-05-10 13:03:58 -04:00
@@ -1468,6 +1468,29 @@
     spin_unlock_irqrestore(&desc->lock, flags);    
     return 0;
 }
+
+int pirq_guest_bindable(int irq, int will_share)
+{
+    irq_desc_t         *desc = &irq_desc[irq];
+    irq_guest_action_t *action;
+    unsigned long       flags;
+    int                 okay;
+
+    spin_lock_irqsave(&desc->lock, flags);
+
+    action = (irq_guest_action_t *)desc->action;
+
+    /*
+     * To be bindable the IRQ must either be not currently bound (1), or
+     * it must be shareable (2) and not at its share limit (3).
+     */
+    okay = ((!(desc->status & IRQ_GUEST) && (action == NULL)) || /* 1 */
+            (action->shareable && will_share &&                  /* 2 */
+             (action->nr_guests != IRQ_MAX_GUESTS)));            /* 3 */
+
+    spin_unlock_irqrestore(&desc->lock, flags);
+    return okay;
+}
 #endif
 
 #ifdef XEN
diff -Nru a/xen/arch/x86/acpi/boot.c b/xen/arch/x86/acpi/boot.c
--- a/xen/arch/x86/acpi/boot.c  2005-05-10 13:03:58 -04:00
+++ b/xen/arch/x86/acpi/boot.c  2005-05-10 13:03:58 -04:00
@@ -447,6 +447,44 @@
 
 #endif /* CONFIG_ACPI_BUS */
 
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+#ifdef CONFIG_X86_IO_APIC
+       if (use_pci_vector() && !platform_legacy_irq(gsi))
+               *irq = IO_APIC_VECTOR(gsi);
+       else
+#endif
+               *irq = gsi;
+       return 0;
+}
+
+unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
+{
+       unsigned int irq;
+       unsigned int plat_gsi = gsi;
+
+#ifdef CONFIG_PCI
+       /*
+        * Make sure all (legacy) PCI IRQs are set as level-triggered.
+        */
+       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+               extern void eisa_set_level_irq(unsigned int irq);
+
+               if (edge_level == ACPI_LEVEL_SENSITIVE)
+                               eisa_set_level_irq(gsi);
+       }
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+       if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
+               plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
+       }
+#endif
+       acpi_gsi_to_irq(plat_gsi, &irq);
+       return irq;
+}
+EXPORT_SYMBOL(acpi_register_gsi);
+
 /*
  *  ACPI based hotplug support for CPU
  */
@@ -817,6 +855,10 @@
                disable_acpi();
                return error;
        }
+
+#if 0 /*def __i386__*/
+       check_acpi_pci();
+#endif
 
        acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
 
diff -Nru a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    2005-05-10 13:03:58 -04:00
+++ b/xen/arch/x86/io_apic.c    2005-05-10 13:03:58 -04:00
@@ -64,8 +64,12 @@
 } irq_2_pin[PIN_MAP_SIZE];
 
 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
+#ifdef CONFIG_PCI_MSI
 #define vector_to_irq(vector)  \
        (platform_legacy_irq(vector) ? vector : vector_irq[vector])
+#else
+#define vector_to_irq(vector)  (vector)
+#endif
 
 /*
  * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
@@ -141,16 +145,16 @@
        __modify_IO_APIC_irq(irq, 0, 0x00010000);
 }
 
-/* trigger = 0 */
-static void __edge_IO_APIC_irq (unsigned int irq)
+/* mask = 1, trigger = 0 */
+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
 {
-       __modify_IO_APIC_irq(irq, 0, 0x00008000);
+       __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
 }
 
-/* trigger = 1 */
-static void __level_IO_APIC_irq (unsigned int irq)
+/* mask = 0, trigger = 1 */
+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
 {
-       __modify_IO_APIC_irq(irq, 0x00008000, 0);
+       __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
 }
 
 static void mask_IO_APIC_irq (unsigned int irq)
@@ -227,6 +231,423 @@
        spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
+#if defined(CONFIG_IRQBALANCE)
+# include <asm/processor.h>    /* kernel_thread() */
+# include <xen/kernel_stat.h>  /* kstat */
+# include <xen/slab.h>         /* kmalloc() */
+# include <xen/timer.h>        /* time_after() */
+ 
+# ifdef CONFIG_BALANCED_IRQ_DEBUG
+#  define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, 
__LINE__); printk(x); } while (0)
+#  define Dprintk(x...) do { TDprintk(x); } while (0)
+# else
+#  define TDprintk(x...) 
+#  define Dprintk(x...) 
+# endif
+
+cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
+
+#define IRQBALANCE_CHECK_ARCH -999
+static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
+static int physical_balance = 0;
+
+struct irq_cpu_info {
+       unsigned long * last_irq;
+       unsigned long * irq_delta;
+       unsigned long irq;
+} irq_cpu_data[NR_CPUS];
+
+#define CPU_IRQ(cpu)           (irq_cpu_data[cpu].irq)
+#define LAST_CPU_IRQ(cpu,irq)   (irq_cpu_data[cpu].last_irq[irq])
+#define IRQ_DELTA(cpu,irq)     (irq_cpu_data[cpu].irq_delta[irq])
+
+#define IDLE_ENOUGH(cpu,now) \
+               (idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1))
+
+#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
+
+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
+
+#define MAX_BALANCED_IRQ_INTERVAL      (5*HZ)
+#define MIN_BALANCED_IRQ_INTERVAL      (HZ/2)
+#define BALANCED_IRQ_MORE_DELTA                (HZ/10)
+#define BALANCED_IRQ_LESS_DELTA                (HZ)
+
+long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
+
+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
+                       unsigned long now, int direction)
+{
+       int search_idle = 1;
+       int cpu = curr_cpu;
+
+       goto inside;
+
+       do {
+               if (unlikely(cpu == curr_cpu))
+                       search_idle = 0;
+inside:
+               if (direction == 1) {
+                       cpu++;
+                       if (cpu >= NR_CPUS)
+                               cpu = 0;
+               } else {
+                       cpu--;
+                       if (cpu == -1)
+                               cpu = NR_CPUS-1;
+               }
+       } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
+                       (search_idle && !IDLE_ENOUGH(cpu,now)));
+
+       return cpu;
+}
+
+static inline void balance_irq(int cpu, int irq)
+{
+       unsigned long now = jiffies;
+       cpumask_t allowed_mask;
+       unsigned int new_cpu;
+               
+       if (irqbalance_disabled)
+               return; 
+
+       cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
+       new_cpu = move(cpu, allowed_mask, now, 1);
+       if (cpu != new_cpu) {
+               irq_desc_t *desc = irq_desc + irq;
+               unsigned long flags;
+
+               spin_lock_irqsave(&desc->lock, flags);
+               pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
+               spin_unlock_irqrestore(&desc->lock, flags);
+       }
+}
+
+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
+{
+       int i, j;
+       Dprintk("Rotating IRQs among CPUs.\n");
+       for (i = 0; i < NR_CPUS; i++) {
+               for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
+                       if (!irq_desc[j].action)
+                               continue;
+                       /* Is it a significant load ?  */
+                       if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
+                                               useful_load_threshold)
+                               continue;
+                       balance_irq(i, j);
+               }
+       }
+       balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
+               balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);       
+       return;
+}
+
+static void do_irq_balance(void)
+{
+       int i, j;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.