[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: All vectored interrupts go through do_IRQ().



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1333093833 -3600
# Node ID adf1112309cf63a5897716face0a6441eff7b610
# Parent  58b5b500ba40d1563c804c9e00246c904d56991b
x86: All vectored interrupts go through do_IRQ().

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c       Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/apic.c       Fri Mar 30 08:50:33 2012 +0100
@@ -34,7 +34,6 @@
 #include <asm/hardirq.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
-#include <asm/asm_defns.h> /* for BUILD_SMP_INTERRUPT */
 #include <mach_apic.h>
 #include <io_ports.h>
 #include <xen/kexec.h>
@@ -84,33 +83,6 @@ static enum apic_mode apic_boot_mode = A
 bool_t __read_mostly x2apic_enabled = 0;
 bool_t __read_mostly directed_eoi_enabled = 0;
 
-/*
- * The following vectors are part of the Linux architecture, there
- * is no hardware IRQ pin equivalent for them, they are triggered
- * through the ICC by us (IPIs)
- */
-__asm__(".section .text");
-BUILD_SMP_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
-BUILD_SMP_INTERRUPT(event_check_interrupt,EVENT_CHECK_VECTOR)
-BUILD_SMP_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
-BUILD_SMP_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
-
-/*
- * Every pentium local APIC has two 'local interrupts', with a
- * soft-definable vector attached to both interrupts, one of
- * which is a timer interrupt, the other one is error counter
- * overflow. Linux uses the local APIC timer interrupt to get
- * a much simpler SMP time architecture:
- */
-BUILD_SMP_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
-BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
-BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-BUILD_SMP_INTERRUPT(pmu_apic_interrupt,PMU_APIC_VECTOR)
-BUILD_SMP_INTERRUPT(cmci_interrupt, CMCI_APIC_VECTOR)
-#ifdef CONFIG_X86_MCE_THERMAL
-BUILD_SMP_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
-#endif
-
 static int modern_apic(void)
 {
     unsigned int lvr, version;
@@ -148,21 +120,21 @@ void __init apic_intr_init(void)
     smp_intr_init();
 
     /* self generated IPI for local APIC timer */
-    set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
+    set_direct_apic_vector(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
     /* IPI vectors for APIC spurious and error interrupts */
-    set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
-    set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+    set_direct_apic_vector(SPURIOUS_APIC_VECTOR, spurious_interrupt);
+    set_direct_apic_vector(ERROR_APIC_VECTOR, error_interrupt);
 
     /* Performance Counters Interrupt */
-    set_intr_gate(PMU_APIC_VECTOR, pmu_apic_interrupt);
+    set_direct_apic_vector(PMU_APIC_VECTOR, pmu_apic_interrupt);
 
     /* CMCI Correctable Machine Check Interrupt */
-    set_intr_gate(CMCI_APIC_VECTOR, cmci_interrupt);
+    set_direct_apic_vector(CMCI_APIC_VECTOR, cmci_interrupt);
 
     /* thermal monitor LVT interrupt, for P4 and latest Intel CPU*/
 #ifdef CONFIG_X86_MCE_THERMAL
-    set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+    set_direct_apic_vector(THERMAL_APIC_VECTOR, thermal_interrupt);
 #endif
 }
 
@@ -1332,14 +1304,11 @@ int reprogram_timer(s_time_t timeout)
     return apic_tmict || !timeout;
 }
 
-fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
+void apic_timer_interrupt(struct cpu_user_regs * regs)
 {
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
     perfc_incr(apic_timer);
-    this_cpu(irq_count)++;
     raise_softirq(TIMER_SOFTIRQ);
-    set_irq_regs(old_regs);
 }
 
 static DEFINE_PER_CPU(bool_t, state_dump_pending);
@@ -1354,13 +1323,9 @@ void smp_send_state_dump(unsigned int cp
 /*
  * Spurious interrupts should _never_ happen with our APIC/SMP architecture.
  */
-fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
+void spurious_interrupt(struct cpu_user_regs *regs)
 {
     unsigned long v;
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
-
-    this_cpu(irq_count)++;
-    irq_enter();
 
     /*
      * Check if this is a vectored interrupt (most likely, as this is probably
@@ -1381,22 +1346,17 @@ fastcall void smp_spurious_interrupt(str
     printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should "
            "never happen.\n", smp_processor_id());
 
- out:
-    irq_exit();
-    set_irq_regs(old_regs);
+out: ;
 }
 
 /*
  * This interrupt should never happen with our APIC/SMP architecture
  */
 
-fastcall void smp_error_interrupt(struct cpu_user_regs *regs)
+void error_interrupt(struct cpu_user_regs *regs)
 {
     unsigned long v, v1;
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
-    this_cpu(irq_count)++;
-    irq_enter();
     /* First tickle the hardware, only then report what went on. -- REW */
     v = apic_read(APIC_ESR);
     apic_write(APIC_ESR, 0);
@@ -1415,21 +1375,16 @@ fastcall void smp_error_interrupt(struct
     */
     printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
             smp_processor_id(), v , v1);
-    irq_exit();
-    set_irq_regs(old_regs);
 }
 
 /*
  * This interrupt handles performance counters interrupt
  */
 
-fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
+void pmu_apic_interrupt(struct cpu_user_regs *regs)
 {
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
-    this_cpu(irq_count)++;
     hvm_do_pmu_interrupt(regs);
-    set_irq_regs(old_regs);
 }
 
 /*
diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/cpu/mcheck/k7.c
--- a/xen/arch/x86/cpu/mcheck/k7.c      Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/cpu/mcheck/k7.c      Fri Mar 30 08:50:33 2012 +0100
@@ -17,7 +17,7 @@
 #include "x86_mca.h"
 
 /* Machine Check Handler For AMD Athlon/Duron */
-static fastcall void k7_machine_check(struct cpu_user_regs * regs, long 
error_code)
+static void k7_machine_check(struct cpu_user_regs * regs, long error_code)
 {
        int recover = 1;
        uint64_t msr_content, mcgst;
diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Fri Mar 30 08:50:33 2012 +0100
@@ -62,7 +62,6 @@ static void intel_thermal_interrupt(stru
     unsigned int cpu = smp_processor_id();
     static DEFINE_PER_CPU(s_time_t, next);
 
-    ack_APIC_irq();
     if (NOW() < per_cpu(next, cpu))
         return;
 
@@ -79,17 +78,13 @@ static void intel_thermal_interrupt(stru
 }
 
 /* Thermal interrupt handler for this CPU setup */
-static void (*__read_mostly vendor_thermal_interrupt)(struct cpu_user_regs 
*regs)
-        = unexpected_thermal_interrupt;
+static void (*__read_mostly vendor_thermal_interrupt)(
+    struct cpu_user_regs *regs) = unexpected_thermal_interrupt;
 
-fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs)
+void thermal_interrupt(struct cpu_user_regs *regs)
 {
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
-    this_cpu(irq_count)++;
-    irq_enter();
+    ack_APIC_irq();
     vendor_thermal_interrupt(regs);
-    irq_exit();
-    set_irq_regs(old_regs);
 }
 
 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
@@ -1188,15 +1183,12 @@ static void intel_init_cmci(struct cpuin
     mce_set_owner();
 }
 
-fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs)
+void cmci_interrupt(struct cpu_user_regs *regs)
 {
     mctelem_cookie_t mctc;
     struct mca_summary bs;
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
     ack_APIC_irq();
-    this_cpu(irq_count)++;
-    irq_enter();
 
     mctc = mcheck_mca_logout(
         MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL);
@@ -1212,9 +1204,6 @@ fastcall void smp_cmci_interrupt(struct 
        }
     } else if (mctc != NULL)
         mctelem_dismiss(mctc);
-
-    irq_exit();
-    set_irq_regs(old_regs);
 }
 
 /* MCA */
diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Mar 30 08:50:33 2012 +0100
@@ -2042,45 +2042,8 @@ static void vmx_do_extint(struct cpu_use
     vector &= INTR_INFO_VECTOR_MASK;
     HVMTRACE_1D(INTR, vector);
 
-    switch ( vector )
-    {
-    case IRQ_MOVE_CLEANUP_VECTOR:
-        smp_irq_move_cleanup_interrupt(regs);
-        break;
-    case LOCAL_TIMER_VECTOR:
-        smp_apic_timer_interrupt(regs);
-        break;
-    case EVENT_CHECK_VECTOR:
-        smp_event_check_interrupt(regs);
-        break;
-    case INVALIDATE_TLB_VECTOR:
-        smp_invalidate_interrupt();
-        break;
-    case CALL_FUNCTION_VECTOR:
-        smp_call_function_interrupt(regs);
-        break;
-    case SPURIOUS_APIC_VECTOR:
-        smp_spurious_interrupt(regs);
-        break;
-    case ERROR_APIC_VECTOR:
-        smp_error_interrupt(regs);
-        break;
-    case CMCI_APIC_VECTOR:
-        smp_cmci_interrupt(regs);
-        break;
-    case PMU_APIC_VECTOR:
-        smp_pmu_apic_interrupt(regs);
-        break;
-#ifdef CONFIG_X86_MCE_THERMAL
-    case THERMAL_APIC_VECTOR:
-        smp_thermal_interrupt(regs);
-        break;
-#endif
-    default:
-        regs->entry_vector = vector;
-        do_IRQ(regs);
-        break;
-    }
+    regs->entry_vector = vector;
+    do_IRQ(regs);
 }
 
 static void wbinvd_ipi(void *info)
diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/irq.c        Fri Mar 30 08:50:33 2012 +0100
@@ -610,14 +610,11 @@ void move_native_irq(struct irq_desc *de
 
 static void dump_irqs(unsigned char key);
 
-fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
+void irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
 {
     unsigned vector, me;
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
     ack_APIC_irq();
-    this_cpu(irq_count)++;
-    irq_enter();
 
     me = smp_processor_id();
     for (vector = FIRST_DYNAMIC_VECTOR; vector < NR_VECTORS; vector++) {
@@ -687,9 +684,6 @@ fastcall void smp_irq_move_cleanup_inter
 unlock:
         spin_unlock(&desc->lock);
     }
-
-    irq_exit();
-    set_irq_regs(old_regs);
 }
 
 static void send_cleanup_vector(struct irq_desc *desc)
@@ -770,6 +764,14 @@ void pirq_set_affinity(struct domain *d,
 
 DEFINE_PER_CPU(unsigned int, irq_count);
 
+static void (*direct_apic_vector[NR_VECTORS])(struct cpu_user_regs *);
+void set_direct_apic_vector(
+    uint8_t vector, void (*handler)(struct cpu_user_regs *))
+{
+    BUG_ON(direct_apic_vector[vector] != NULL);
+    direct_apic_vector[vector] = handler;
+}
+
 void do_IRQ(struct cpu_user_regs *regs)
 {
     struct irqaction *action;
@@ -780,20 +782,21 @@ void do_IRQ(struct cpu_user_regs *regs)
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     
     perfc_incr(irqs);
-
     this_cpu(irq_count)++;
+    irq_enter();
 
     if (irq < 0) {
-        ack_APIC_irq();
-        printk("%s: %d.%d No irq handler for vector (irq %d)\n",
-                __func__, smp_processor_id(), vector, irq);
-        set_irq_regs(old_regs);
-        TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
-        return;
+        if (direct_apic_vector[vector] != NULL) {
+            (*direct_apic_vector[vector])(regs);
+        } else {
+            ack_APIC_irq();
+            printk("%s: %d.%d No irq handler for vector (irq %d)\n",
+                   __func__, smp_processor_id(), vector, irq);
+            TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
+        }
+        goto out_no_unlock;
     }
 
-    irq_enter();
-
     desc = irq_to_desc(irq);
 
     spin_lock(&desc->lock);
@@ -863,6 +866,7 @@ void do_IRQ(struct cpu_user_regs *regs)
         desc->handler->end(desc, regs->entry_vector);
  out_no_end:
     spin_unlock(&desc->lock);
+ out_no_unlock:
     irq_exit();
     set_irq_regs(old_regs);
 }
diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/smp.c        Fri Mar 30 08:50:33 2012 +0100
@@ -218,17 +218,14 @@ static cpumask_t flush_cpumask;
 static const void *flush_va;
 static unsigned int flush_flags;
 
-fastcall void smp_invalidate_interrupt(void)
+void invalidate_interrupt(struct cpu_user_regs *regs)
 {
     ack_APIC_irq();
     perfc_incr(ipis);
-    this_cpu(irq_count)++;
-    irq_enter();
     if ( !__sync_local_execstate() ||
          (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
         flush_area_local(flush_va, flush_flags);
     cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
-    irq_exit();
 }
 
 void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
@@ -386,13 +383,11 @@ void smp_send_nmi_allbutself(void)
     send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
 }
 
-fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
+void event_check_interrupt(struct cpu_user_regs *regs)
 {
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
     perfc_incr(ipis);
     this_cpu(irq_count)++;
-    set_irq_regs(old_regs);
 }
 
 static void __smp_call_function_interrupt(void)
@@ -422,13 +417,9 @@ static void __smp_call_function_interrup
     irq_exit();
 }
 
-fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
+void call_function_interrupt(struct cpu_user_regs *regs)
 {
-    struct cpu_user_regs *old_regs = set_irq_regs(regs);
-
     ack_APIC_irq();
     perfc_incr(ipis);
-    this_cpu(irq_count)++;
     __smp_call_function_interrupt();
-    set_irq_regs(old_regs);
 }
diff -r 58b5b500ba40 -r adf1112309cf xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/arch/x86/smpboot.c    Fri Mar 30 08:50:33 2012 +0100
@@ -1026,15 +1026,9 @@ void __init smp_intr_init(void)
         cpumask_copy(irq_to_desc(irq)->arch.cpu_mask, &cpu_online_map);
     }
 
-    /* IPI for cleanuping vectors after irq move */
-    set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
-
-    /* IPI for event checking. */
-    set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
-
-    /* IPI for invalidation */
-    set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
-
-    /* IPI for generic function call */
-    set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+    /* Direct IPI vectors. */
+    set_direct_apic_vector(IRQ_MOVE_CLEANUP_VECTOR, 
irq_move_cleanup_interrupt);
+    set_direct_apic_vector(EVENT_CHECK_VECTOR, event_check_interrupt);
+    set_direct_apic_vector(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+    set_direct_apic_vector(CALL_FUNCTION_VECTOR, call_function_interrupt);
 }
diff -r 58b5b500ba40 -r adf1112309cf xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/include/asm-x86/irq.h Fri Mar 30 08:50:33 2012 +0100
@@ -79,27 +79,19 @@ static inline struct cpu_user_regs *set_
 
 #define platform_legacy_irq(irq)       ((irq) < 16)
 
-fastcall void event_check_interrupt(void);
-fastcall void invalidate_interrupt(void);
-fastcall void call_function_interrupt(void);
-fastcall void apic_timer_interrupt(void);
-fastcall void error_interrupt(void);
-fastcall void pmu_apic_interrupt(void);
-fastcall void spurious_interrupt(void);
-fastcall void thermal_interrupt(void);
-fastcall void cmci_interrupt(void);
-fastcall void irq_move_cleanup_interrupt(void);
+void event_check_interrupt(struct cpu_user_regs *regs);
+void invalidate_interrupt(struct cpu_user_regs *regs);
+void call_function_interrupt(struct cpu_user_regs *regs);
+void apic_timer_interrupt(struct cpu_user_regs *regs);
+void error_interrupt(struct cpu_user_regs *regs);
+void pmu_apic_interrupt(struct cpu_user_regs *regs);
+void spurious_interrupt(struct cpu_user_regs *regs);
+void thermal_interrupt(struct cpu_user_regs *regs);
+void cmci_interrupt(struct cpu_user_regs *regs);
+void irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
 
-fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_invalidate_interrupt(void);
-fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
+void set_direct_apic_vector(
+    uint8_t vector, void (*handler)(struct cpu_user_regs *));
 
 void do_IRQ(struct cpu_user_regs *regs);
 
diff -r 58b5b500ba40 -r adf1112309cf xen/include/asm-x86/x86_32/asm_defns.h
--- a/xen/include/asm-x86/x86_32/asm_defns.h    Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/include/asm-x86/x86_32/asm_defns.h    Fri Mar 30 08:50:33 2012 +0100
@@ -107,21 +107,6 @@ 1:      addl  $4,%esp;
 #define FIXUP_RING0_GUEST_STACK
 #endif
 
-#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
-#define XBUILD_SMP_INTERRUPT(x,v)               \
-__asm__(                                        \
-    "\n"__ALIGN_STR"\n"                         \
-    ".globl " STR(x) "\n\t"                     \
-    STR(x) ":\n\t"                              \
-    "pushl $"#v"<<16\n\t"                       \
-    STR(FIXUP_RING0_GUEST_STACK)                \
-    STR(SAVE_ALL(1f,1f)) "\n\t"                 \
-    "1:movl %esp,%eax\n\t"                      \
-    "pushl %eax\n\t"                            \
-    "call "STR(smp_##x)"\n\t"                   \
-    "addl $4,%esp\n\t"                          \
-    "jmp ret_from_intr\n");
-
 #define BUILD_COMMON_IRQ()                      \
 __asm__(                                        \
     "\n" __ALIGN_STR"\n"                        \
diff -r 58b5b500ba40 -r adf1112309cf xen/include/asm-x86/x86_64/asm_defns.h
--- a/xen/include/asm-x86/x86_64/asm_defns.h    Thu Mar 29 16:59:33 2012 +0100
+++ b/xen/include/asm-x86/x86_64/asm_defns.h    Fri Mar 30 08:50:33 2012 +0100
@@ -90,19 +90,6 @@ 1:      addq  $8,%rsp;
 #define REX64_PREFIX "rex64/"
 #endif
 
-#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
-#define XBUILD_SMP_INTERRUPT(x,v)               \
-__asm__(                                        \
-    "\n"__ALIGN_STR"\n"                         \
-    ".globl " STR(x) "\n\t"                     \
-    STR(x) ":\n\t"                              \
-    "pushq $0\n\t"                              \
-    "movl $"#v",4(%rsp)\n\t"                    \
-    STR(SAVE_ALL)                               \
-    "movq %rsp,%rdi\n\t"                        \
-    "callq "STR(smp_##x)"\n\t"                  \
-    "jmp ret_from_intr\n");
-
 #define BUILD_COMMON_IRQ()                      \
 __asm__(                                        \
     "\n" __ALIGN_STR"\n"                        \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.