[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge in MCA tree



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 05d227d819355f1f3501ac791f0ece9b6194010d
# Parent  f78e499dd669c5736abb8c3b8c6bf16cf3ec7f3f
# Parent  344fc55eb52fec67bd8cb54cb629fce1ca23485a
Merge in MCA tree
---
 linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c |   61 +
 linux-2.6-xen-sparse/include/asm-ia64/irq.h      |    4 
 linux-2.6-xen-sparse/include/asm-ia64/sal.h      |  904 +++++++++++++++++++++++
 xen/arch/ia64/asm-offsets.c                      |   34 
 xen/arch/ia64/linux-xen/mca.c                    |  358 ++++++++-
 xen/arch/ia64/linux-xen/mca_asm.S                |  152 +++
 xen/arch/ia64/linux-xen/smpboot.c                |    2 
 xen/arch/ia64/xen/dom_fw.c                       |   23 
 xen/arch/ia64/xen/fw_emul.c                      |  241 +++++-
 xen/arch/ia64/xen/mm_init.c                      |    9 
 xen/arch/ia64/xen/vcpu.c                         |    2 
 xen/include/asm-ia64/event.h                     |    2 
 xen/include/asm-ia64/linux-xen/asm/mca_asm.h     |    4 
 xen/include/asm-ia64/linux-xen/linux/interrupt.h |    6 
 xen/include/asm-ia64/vcpu.h                      |    2 
 xen/include/asm-ia64/xenmca.h                    |   34 
 xen/include/public/arch-ia64.h                   |    2 
 xen/include/xen/softirq.h                        |    6 
 18 files changed, 1785 insertions(+), 61 deletions(-)

diff -r f78e499dd669 -r 05d227d81935 
linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c  Tue Nov 14 14:59:37 
2006 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c  Wed Nov 15 12:02:09 
2006 -0700
@@ -241,9 +241,15 @@ static DEFINE_PER_CPU(int, timer_irq) = 
 static DEFINE_PER_CPU(int, timer_irq) = -1;
 static DEFINE_PER_CPU(int, ipi_irq) = -1;
 static DEFINE_PER_CPU(int, resched_irq) = -1;
+static DEFINE_PER_CPU(int, cmc_irq) = -1;
+static DEFINE_PER_CPU(int, cmcp_irq) = -1;
+static DEFINE_PER_CPU(int, cpep_irq) = -1;
 static char timer_name[NR_CPUS][15];
 static char ipi_name[NR_CPUS][15];
 static char resched_name[NR_CPUS][15];
+static char cmc_name[NR_CPUS][15];
+static char cmcp_name[NR_CPUS][15];
+static char cpep_name[NR_CPUS][15];
 
 struct saved_irq {
        unsigned int irq;
@@ -323,6 +329,43 @@ xen_register_percpu_irq (unsigned int ir
                        break;
                case IA64_SPURIOUS_INT_VECTOR:
                        break;
+               case IA64_CMC_VECTOR:
+                       sprintf(cmc_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
+                                                     action->handler,
+                                                     action->flags,
+                                                     cmc_name[cpu],
+                                                     action->dev_id);
+                       per_cpu(cmc_irq,cpu) = ret;
+                       printk(KERN_INFO "register VIRQ_MCA_CMC (%s) to xen "
+                              "irq (%d)\n", cmc_name[cpu], ret);
+                       break;
+               case IA64_CMCP_VECTOR:
+                       sprintf(cmcp_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
+                                                    action->handler,
+                                                    action->flags,
+                                                    cmcp_name[cpu],
+                                                    action->dev_id);
+                       per_cpu(cmcp_irq,cpu) = ret;
+                       printk(KERN_INFO "register CMCP_VECTOR (%s) to xen "
+                              "irq (%d)\n", cmcp_name[cpu], ret);
+                       break;
+               case IA64_CPEP_VECTOR:
+                       sprintf(cpep_name[cpu], "%s%d", action->name, cpu);
+                       ret = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
+                                                    action->handler,
+                                                    action->flags,
+                                                    cpep_name[cpu],
+                                                    action->dev_id);
+                       per_cpu(cpep_irq,cpu) = ret;
+                       printk(KERN_INFO "register CPEP_VECTOR (%s) to xen "
+                              "irq (%d)\n", cpep_name[cpu], ret);
+                       break;
+               case IA64_CPE_VECTOR:
+                       printk(KERN_WARNING "register IA64_CPE_VECTOR "
+                              "IGNORED\n");
+                       break;
                default:
                        printk(KERN_WARNING "Percpu irq %d is unsupported by 
xen!\n", irq);
                        break;
@@ -373,6 +416,18 @@ unbind_evtchn_callback(struct notifier_b
 
        if (action == CPU_DEAD) {
                /* Unregister evtchn.  */
+               if (per_cpu(cpep_irq,cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
+                       per_cpu(cpep_irq, cpu) = -1;
+               }
+               if (per_cpu(cmcp_irq,cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
+                       per_cpu(cmcp_irq, cpu) = -1;
+               }
+               if (per_cpu(cmc_irq,cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
+                       per_cpu(cmc_irq, cpu) = -1;
+               }
                if (per_cpu(ipi_irq,cpu) >= 0) {
                        unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
                        per_cpu(ipi_irq, cpu) = -1;
@@ -503,6 +558,12 @@ ia64_send_ipi (int cpu, int vector, int 
                case IA64_IPI_RESCHEDULE:
                        irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
                        break;
+               case IA64_CMCP_VECTOR:
+                       irq = per_cpu(ipi_to_irq, cpu)[CMCP_VECTOR];
+                       break;
+               case IA64_CPEP_VECTOR:
+                       irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR];
+                       break;
                default:
                        printk(KERN_WARNING"Unsupported IPI type 0x%x\n", 
vector);
                        irq = 0;
diff -r f78e499dd669 -r 05d227d81935 linux-2.6-xen-sparse/include/asm-ia64/irq.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/irq.h       Tue Nov 14 14:59:37 
2006 -0700
+++ b/linux-2.6-xen-sparse/include/asm-ia64/irq.h       Wed Nov 15 12:02:09 
2006 -0700
@@ -42,7 +42,9 @@
 
 #define RESCHEDULE_VECTOR      0
 #define IPI_VECTOR             1
-#define NR_IPIS                        2
+#define CMCP_VECTOR            2
+#define CPEP_VECTOR            3
+#define NR_IPIS                        4
 #endif /* CONFIG_XEN */
 
 /*
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/asm-offsets.c       Wed Nov 15 12:02:09 2006 -0700
@@ -12,6 +12,7 @@
 #include <public/xen.h>
 #include <asm/tlb.h>
 #include <asm/regs.h>
+#include <asm/xenmca.h>
 
 #define task_struct vcpu
 
@@ -221,4 +222,37 @@ void foo(void)
        DEFINE(FAST_HYPERPRIVOP_PERFC_OFS, offsetof (struct perfcounter, 
fast_hyperprivop));
        DEFINE(FAST_REFLECT_PERFC_OFS, offsetof (struct perfcounter, 
fast_reflect));
 #endif
+
+       BLANK();
+       DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
+              offsetof(struct cpuinfo_ia64, ptce_base));
+       DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
+              offsetof(struct cpuinfo_ia64, ptce_count));
+       DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
+              offsetof(struct cpuinfo_ia64, ptce_stride));
+
+       BLANK();
+       DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
+              offsetof(struct ia64_mca_cpu, proc_state_dump));
+       DEFINE(IA64_MCA_CPU_STACK_OFFSET,
+              offsetof(struct ia64_mca_cpu, stack));
+       DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
+              offsetof(struct ia64_mca_cpu, stackframe));
+       DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
+              offsetof(struct ia64_mca_cpu, rbstore));
+
+       DEFINE(IA64_DOMAIN_SHARED_INFO_OFFSET,
+              offsetof(struct domain, shared_info));
+       DEFINE(IA64_DOMAIN_SHARED_INFO_VA_OFFSET,
+              offsetof(struct domain, arch.shared_info_va));
+       DEFINE(IA64_DOMAIN_FLAGS_OFFSET,
+              offsetof(struct domain, arch.flags));
+
+       DEFINE(IA64_VCPU_VHPT_MADDR_OFFSET,
+              offsetof(struct vcpu, arch.vhpt_maddr));
+
+       BLANK();
+       DEFINE(IA64_MCA_TLB_INFO_SIZE, sizeof(struct ia64_mca_tlb_info));
+       DEFINE(IA64_MCA_PERCPU_OFFSET,
+              offsetof(struct ia64_mca_tlb_info, percpu_paddr));
 }
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/linux-xen/mca.c     Wed Nov 15 12:02:09 2006 -0700
@@ -81,6 +81,9 @@
 #include <xen/symbols.h>
 #include <xen/mm.h>
 #include <xen/console.h>
+#include <xen/event.h>
+#include <xen/softirq.h>
+#include <asm/xenmca.h>
 #endif
 
 #if defined(IA64_MCA_DEBUG_INFO)
@@ -108,18 +111,33 @@ unsigned long __per_cpu_mca[NR_CPUS];
 /* In mca_asm.S */
 extern void                    ia64_monarch_init_handler (void);
 extern void                    ia64_slave_init_handler (void);
+#ifdef XEN
+extern void setup_vector (unsigned int vec, struct irqaction *action);
+#define setup_irq(irq, action) setup_vector(irq, action)
+#endif
 
 static ia64_mc_info_t          ia64_mc_info;
 
-#ifndef XEN
+#ifdef XEN
+#define jiffies                        NOW()
+#undef HZ
+#define HZ                     1000000000UL
+#endif
+
 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
 #define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
 #define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
 #define CPE_HISTORY_LENGTH    5
 #define CMC_HISTORY_LENGTH    5
 
+#ifndef XEN 
 static struct timer_list cpe_poll_timer;
 static struct timer_list cmc_poll_timer;
+#else
+#define mod_timer(timer, expires)      set_timer(timer, expires)
+static struct timer cpe_poll_timer;
+static struct timer cmc_poll_timer;
+#endif
 /*
  * This variable tells whether we are currently in polling mode.
  * Start with this in the wrong state so we won't play w/ timers
@@ -136,11 +154,9 @@ static int cpe_poll_enabled = 1;
 static int cpe_poll_enabled = 1;
 
 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-#endif /* !XEN */
 
 static int mca_init;
 
-#ifndef XEN
 /*
  * IA64_MCA log support
  */
@@ -157,11 +173,24 @@ typedef struct ia64_state_log_s
 
 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 
+#ifndef XEN
 #define IA64_LOG_ALLOCATE(it, size) \
        {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
                (ia64_err_rec_t *)alloc_bootmem(size); \
        ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
                (ia64_err_rec_t *)alloc_bootmem(size);}
+#else
+#define IA64_LOG_ALLOCATE(it, size) \
+       do { \
+               unsigned int pageorder; \
+               pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu)); 
\
+               ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
+                 (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
+               ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
+                 (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
+       } while(0)
+#endif
+
 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
 #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, 
s)
 #define IA64_LOG_UNLOCK(it)    
spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -176,6 +205,12 @@ static ia64_state_log_t ia64_state_log[I
 #define IA64_LOG_CURR_BUFFER(it)   (void 
*)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
 #define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
 
+#ifdef XEN
+struct list_head sal_queue[IA64_MAX_LOG_TYPES];
+sal_log_record_header_t *sal_record = NULL;
+DEFINE_SPINLOCK(sal_queue_lock);
+#endif
+
 /*
  * ia64_log_init
  *     Reset the OS ia64 log buffer
@@ -200,8 +235,19 @@ ia64_log_init(int sal_info_type)
        IA64_LOG_ALLOCATE(sal_info_type, max_size);
        memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
        memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
-}
-
+
+#ifdef XEN
+       if (sal_record == NULL) {
+               unsigned int pageorder;
+               pageorder  = get_order_from_bytes(max_size);
+               sal_record = (sal_log_record_header_t *)
+                            alloc_xenheap_pages(pageorder);
+               BUG_ON(sal_record == NULL);
+       }
+#endif
+}
+
+#ifndef XEN
 /*
  * ia64_log_get
  *
@@ -277,15 +323,159 @@ ia64_mca_log_sal_error_record(int sal_in
        if (rh->severity == sal_log_severity_corrected)
                ia64_sal_clear_state_info(sal_info_type);
 }
+#else /* !XEN */
+/*
+ * ia64_log_queue
+ *
+ *     Get the current MCA log from SAL and copy it into the OS log buffer.
+ *
+ *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
+ *  Outputs :   size        (total record length)
+ *              *buffer     (ptr to error record)
+ *
+ */
+static u64
+ia64_log_queue(int sal_info_type, int virq)
+{
+       sal_log_record_header_t     *log_buffer;
+       u64                         total_len = 0;
+       int                         s;
+       sal_queue_entry_t           *e;
+       unsigned long               flags;
+
+       IA64_LOG_LOCK(sal_info_type);
+
+       /* Get the process state information */
+       log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
+
+       total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
+
+       if (total_len) {
+               int queue_type;
+
+               spin_lock_irqsave(&sal_queue_lock, flags);
+
+               if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
+                       queue_type = SAL_INFO_TYPE_CMC;
+               else
+                       queue_type = sal_info_type;
+
+               e = xmalloc(sal_queue_entry_t);
+               BUG_ON(e == NULL);
+               e->cpuid = smp_processor_id();
+               e->sal_info_type = sal_info_type;
+               e->vector = IA64_CMC_VECTOR;
+               e->virq = virq;
+               e->length = total_len;
+
+               list_add_tail(&e->list, &sal_queue[queue_type]);
+               spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+               IA64_LOG_INDEX_INC(sal_info_type);
+               IA64_LOG_UNLOCK(sal_info_type);
+               if (sal_info_type != SAL_INFO_TYPE_MCA &&
+                   sal_info_type != SAL_INFO_TYPE_INIT) {
+                       IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. 
"
+                                      "Record length = %ld\n", __FUNCTION__,
+                                      sal_info_type, total_len);
+               }
+               return total_len;
+       } else {
+               IA64_LOG_UNLOCK(sal_info_type);
+               return 0;
+       }
+}
+#endif /* !XEN */
 
 /*
  * platform dependent error handling
  */
-#endif /* !XEN */
 #ifndef PLATFORM_MCA_HANDLERS
-#ifndef XEN
 
 #ifdef CONFIG_ACPI
+
+#ifdef XEN
+/**
+ *     Copy from linux/include/asm-generic/bug.h
+ */
+#define WARN_ON(condition) do { \
+       if (unlikely((condition)!=0)) { \
+               printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, 
__LINE__); \
+               dump_stack(); \
+       } \
+} while (0)
+
+/**
+ *     Copy from linux/kernel/irq/manage.c
+ *
+ *     disable_irq_nosync - disable an irq without waiting
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Disables and Enables are
+ *     nested.
+ *     Unlike disable_irq(), this function does not ensure existing
+ *     instances of the IRQ handler have completed before returning.
+ *
+ *     This function may be called from IRQ context.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       if (!desc->depth++) {
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ *     Copy from linux/kernel/irq/manage.c
+ *
+ *     enable_irq - enable handling of an irq
+ *     @irq: Interrupt to enable
+ *
+ *     Undoes the effect of one call to disable_irq().  If this
+ *     matches the last disable, processing of interrupts on this
+ *     IRQ line is re-enabled.
+ *
+ *     This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       switch (desc->depth) {
+       case 0:
+               WARN_ON(1);
+               break;
+       case 1: {
+               unsigned int status = desc->status & ~IRQ_DISABLED;
+
+               desc->status = status;
+               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+                       desc->status = status | IRQ_REPLAY;
+                       hw_resend_irq(desc->handler,irq);
+               }
+               desc->handler->enable(irq);
+               /* fall-through */
+       }
+       default:
+               desc->depth--;
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+#endif /* XEN */
 
 int cpe_vector = -1;
 
@@ -302,8 +492,15 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
        /* SAL spec states this should run w/ interrupts enabled */
        local_irq_enable();
 
+#ifndef XEN
        /* Get the CPE error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
+#else
+       /* CPE error does not inform to dom0 but the following codes are 
+          reserved for future implementation */
+/*     ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE); */
+/*     send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
+#endif
 
        spin_lock(&cpe_history_lock);
        if (!cpe_poll_enabled && cpe_vector >= 0) {
@@ -345,7 +542,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
 }
 
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 static void
 show_min_state (pal_min_state_area_t *minstate)
@@ -593,7 +789,6 @@ init_handler_platform (pal_min_state_are
        while (1);                      /* hang city if no debugger */
 }
 
-#ifndef XEN
 #ifdef CONFIG_ACPI
 /*
  * ia64_mca_register_cpev
@@ -624,9 +819,7 @@ ia64_mca_register_cpev (int cpev)
 }
 #endif /* CONFIG_ACPI */
 
-#endif /* !XEN */
 #endif /* PLATFORM_MCA_HANDLERS */
-#ifndef XEN
 
 /*
  * ia64_mca_cmc_vector_setup
@@ -713,6 +906,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
 }
 
+#ifndef XEN
 /*
  * ia64_mca_cmc_vector_disable_keventd
  *
@@ -736,6 +930,7 @@ ia64_mca_cmc_vector_enable_keventd(void 
 {
        on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
 }
+#endif /* !XEN */
 
 /*
  * ia64_mca_wakeup_ipi_wait
@@ -887,15 +1082,26 @@ static void
 static void
 ia64_return_to_sal_check(int recover)
 {
+#ifdef XEN
+       int cpu = smp_processor_id();
+#endif
 
        /* Copy over some relevant stuff from the sal_to_os_mca_handoff
         * so that it can be used at the time of os_mca_to_sal_handoff
         */
+#ifdef XEN
+       ia64_os_to_sal_handoff_state.imots_sal_gp =
+               ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
+
+       ia64_os_to_sal_handoff_state.imots_sal_check_ra =
+               ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
+#else
        ia64_os_to_sal_handoff_state.imots_sal_gp =
                ia64_sal_to_os_handoff_state.imsto_sal_gp;
 
        ia64_os_to_sal_handoff_state.imots_sal_check_ra =
                ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
+#endif
 
        if (recover)
                ia64_os_to_sal_handoff_state.imots_os_status = 
IA64_MCA_CORRECTED;
@@ -905,8 +1111,13 @@ ia64_return_to_sal_check(int recover)
        /* Default = tell SAL to return to same context */
        ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
 
+#ifdef XEN
+       ia64_os_to_sal_handoff_state.imots_new_min_state =
+               (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
+#else
        ia64_os_to_sal_handoff_state.imots_new_min_state =
                (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
+#endif
 
 }
 
@@ -954,27 +1165,44 @@ void
 void
 ia64_mca_ucmc_handler(void)
 {
+#ifdef XEN
+       int cpu = smp_processor_id();
+       pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
+               &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
+#else
        pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
                &ia64_sal_to_os_handoff_state.proc_state_param;
+#endif
        int recover; 
 
+#ifndef XEN
        /* Get the MCA error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
+#else
+       ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
+       send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
+#endif
 
        /* TLB error is only exist in this SAL error record */
        recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
        /* other error recovery */
+#ifndef XEN
           || (ia64_mca_ucmc_extension 
                && ia64_mca_ucmc_extension(
                        IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
                        &ia64_sal_to_os_handoff_state,
                        &ia64_os_to_sal_handoff_state)); 
-
+#else
+       ;
+#endif
+
+#ifndef XEN
        if (recover) {
                sal_log_record_header_t *rh = 
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
                rh->severity = sal_log_severity_corrected;
                ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
        }
+#endif
        /*
         *  Wakeup all the processors which are spinning in the rendezvous
         *  loop.
@@ -985,8 +1213,10 @@ ia64_mca_ucmc_handler(void)
        ia64_return_to_sal_check(recover);
 }
 
+#ifndef XEN
 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, 
NULL);
 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+#endif
 
 /*
  * ia64_mca_cmc_int_handler
@@ -1016,8 +1246,13 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
        /* SAL spec states this should run w/ interrupts enabled */
        local_irq_enable();
 
+#ifndef XEN    
        /* Get the CMC error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
+#else
+       ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
+       send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
+#endif
 
        spin_lock(&cmc_history_lock);
        if (!cmc_polling_enabled) {
@@ -1034,7 +1269,12 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
 
                        cmc_polling_enabled = 1;
                        spin_unlock(&cmc_history_lock);
+#ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_disable_work);
+#else
+                       cpumask_raise_softirq(cpu_online_map,
+                                             CMC_DISABLE_SOFTIRQ);
+#endif
 
                        /*
                         * Corrected errors will still be corrected, but
@@ -1083,7 +1323,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
        if (start_count == -1)
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
 
+#ifndef XEN
        ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
+#endif
 
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 
@@ -1094,7 +1336,12 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
                if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
 
                        printk(KERN_WARNING "Returning to interrupt driven CMC 
handler\n");
+#ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_enable_work);
+#else
+                       cpumask_raise_softirq(cpu_online_map,
+                                             CMC_ENABLE_SOFTIRQ);
+#endif
                        cmc_polling_enabled = 0;
 
                } else {
@@ -1104,7 +1351,6 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
 
                start_count = -1;
        }
-
        return IRQ_HANDLED;
 }
 
@@ -1118,7 +1364,11 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
  *
  */
 static void
+#ifndef XEN
 ia64_mca_cmc_poll (unsigned long dummy)
+#else
+ia64_mca_cmc_poll (void *dummy)
+#endif
 {
        /* Trigger a CMC interrupt cascade  */
        platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
@@ -1144,7 +1394,11 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
 ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
 {
        static int start_count = -1;
+#ifdef XEN
+       static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;
+#else
        static int poll_time = MIN_CPE_POLL_INTERVAL;
+#endif
        unsigned int cpuid;
 
        cpuid = smp_processor_id();
@@ -1153,7 +1407,9 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
        if (start_count == -1)
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
 
+#ifndef XEN
        ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
+#endif
 
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
 
@@ -1180,7 +1436,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
                        mod_timer(&cpe_poll_timer, jiffies + poll_time);
                start_count = -1;
        }
-
        return IRQ_HANDLED;
 }
 
@@ -1195,14 +1450,17 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
  *
  */
 static void
+#ifndef XEN
 ia64_mca_cpe_poll (unsigned long dummy)
+#else
+ia64_mca_cpe_poll (void *dummy)
+#endif
 {
        /* Trigger a CPE interrupt cascade  */
        platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 /*
  * C portion of the OS INIT handler
@@ -1248,7 +1506,6 @@ ia64_init_handler (struct pt_regs *pt, s
        init_handler_platform(ms, pt, sw);      /* call platform specific 
routines */
 }
 
-#ifndef XEN
 static int __init
 ia64_mca_disable_cpe_polling(char *str)
 {
@@ -1260,42 +1517,53 @@ __setup("disable_cpe_poll", ia64_mca_dis
 
 static struct irqaction cmci_irqaction = {
        .handler =      ia64_mca_cmc_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cmc_hndlr"
 };
 
 static struct irqaction cmcp_irqaction = {
        .handler =      ia64_mca_cmc_int_caller,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cmc_poll"
 };
 
 static struct irqaction mca_rdzv_irqaction = {
        .handler =      ia64_mca_rendez_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "mca_rdzv"
 };
 
 static struct irqaction mca_wkup_irqaction = {
        .handler =      ia64_mca_wakeup_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "mca_wkup"
 };
 
 #ifdef CONFIG_ACPI
 static struct irqaction mca_cpe_irqaction = {
        .handler =      ia64_mca_cpe_int_handler,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cpe_hndlr"
 };
 
 static struct irqaction mca_cpep_irqaction = {
        .handler =      ia64_mca_cpe_int_caller,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "cpe_poll"
 };
 #endif /* CONFIG_ACPI */
-#endif /* !XEN */
 
 /* Do per-CPU MCA-related initialization.  */
 
@@ -1329,6 +1597,13 @@ ia64_mca_cpu_init(void *cpu_data)
 #endif
                }
        }
+#ifdef XEN
+       else {
+               int i;
+               for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
+                       ia64_log_queue(i, 0);
+       }
+#endif
 
         /*
          * The MCA info structure was allocated earlier and its
@@ -1395,17 +1670,14 @@ ia64_mca_init(void)
        ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
        ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
        ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
-#ifdef XEN
-       s64 rc;
-
-       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
-
-       IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
-#else
        int i;
        s64 rc;
        struct ia64_sal_retval isrv;
        u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;  /* platform specific */
+
+#ifdef XEN
+       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
+#endif
 
        IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
 
@@ -1451,7 +1723,6 @@ ia64_mca_init(void)
        }
 
        IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup 
mech.\n", __FUNCTION__);
-#endif /* !XEN */
 
        ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
        /*
@@ -1503,7 +1774,6 @@ ia64_mca_init(void)
 
        IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", 
__FUNCTION__);
 
-#ifndef XEN
        /*
         *  Configure the CMCI/P vector and handler. Interrupts for CMC are
         *  per-processor, so AP CMC interrupts are setup in smp_callin() 
(smpboot.c).
@@ -1531,13 +1801,26 @@ ia64_mca_init(void)
        ia64_log_init(SAL_INFO_TYPE_INIT);
        ia64_log_init(SAL_INFO_TYPE_CMC);
        ia64_log_init(SAL_INFO_TYPE_CPE);
-#endif /* !XEN */
+
+#ifdef XEN
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_MCA]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_INIT]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CMC]);
+       INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CPE]);
+
+       open_softirq(CMC_DISABLE_SOFTIRQ,
+                    (softirq_handler)ia64_mca_cmc_vector_disable);
+       open_softirq(CMC_ENABLE_SOFTIRQ,
+                    (softirq_handler)ia64_mca_cmc_vector_enable);
+
+       for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
+               ia64_log_queue(i, 0);
+#endif
 
        mca_init = 1;
        printk(KERN_INFO "MCA related initialization done\n");
 }
 
-#ifndef XEN
 /*
  * ia64_mca_late_init
  *
@@ -1555,20 +1838,34 @@ ia64_mca_late_init(void)
                return 0;
 
        /* Setup the CMCI/P vector and handler */
+#ifndef XEN
        init_timer(&cmc_poll_timer);
        cmc_poll_timer.function = ia64_mca_cmc_poll;
+#else
+       init_timer(&cmc_poll_timer, ia64_mca_cmc_poll, NULL, 
smp_processor_id());
+       printk("INIT_TIMER(cmc_poll_timer): on cpu%d\n", smp_processor_id());
+#endif
 
        /* Unmask/enable the vector */
        cmc_polling_enabled = 0;
+#ifndef XEN    /* XXX FIXME */
        schedule_work(&cmc_enable_work);
+#else
+       cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
+#endif
 
        IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
 
 #ifdef CONFIG_ACPI
        /* Setup the CPEI/P vector and handler */
        cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+#ifndef        XEN
        init_timer(&cpe_poll_timer);
        cpe_poll_timer.function = ia64_mca_cpe_poll;
+#else
+       init_timer(&cpe_poll_timer, ia64_mca_cpe_poll, NULL, 
smp_processor_id());
+       printk("INIT_TIMER(cpe_poll_timer): on cpu%d\n", smp_processor_id());
+#endif
 
        {
                irq_desc_t *desc;
@@ -1598,5 +1895,8 @@ ia64_mca_late_init(void)
        return 0;
 }
 
+#ifndef XEN
 device_initcall(ia64_mca_late_init);
-#endif /* !XEN */
+#else
+__initcall(ia64_mca_late_init);
+#endif
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/linux-xen/mca_asm.S
--- a/xen/arch/ia64/linux-xen/mca_asm.S Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/linux-xen/mca_asm.S Wed Nov 15 12:02:09 2006 -0700
@@ -24,6 +24,9 @@
 #include <asm/processor.h>
 #include <asm/mca_asm.h>
 #include <asm/mca.h>
+#ifdef XEN
+#include <asm/vhpt.h>
+#endif
 
 /*
  * When we get a machine check, the kernel stack pointer is no longer
@@ -50,8 +53,7 @@
  */
 #ifdef XEN
 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)         \
-       movl    _tmp=THIS_CPU(ia64_sal_to_os_handoff_state_addr);;      \
-       tpa     _tmp=_tmp;;                             \
+       GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);;       \
        ld8     _tmp=[_tmp];;                           \
        st8     [_tmp]=r1,0x08;;                        \
        st8     [_tmp]=r8,0x08;;                        \
@@ -72,6 +74,7 @@
        st8     [_tmp]=r12,0x08;;                       \
        st8     [_tmp]=r17,0x08;;                       \
        st8     [_tmp]=r18,0x08
+#endif /* XEN */
 
 /*
  * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
@@ -101,6 +104,24 @@
  *     imots_sal_check_ra=Return address to location within SAL_CHECK
  *
  */
+#ifdef XEN
+#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
+       movl    tmp=IA64_MCA_COLD_BOOT;                                 \
+       GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;          \
+       ld8     sal_to_os_handoff=[sal_to_os_handoff];;                 \
+       movl    os_to_sal_handoff=ia64_os_to_sal_handoff_state;;        \
+       dep     os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;;       \
+       /*DATA_VA_TO_PA(os_to_sal_handoff);;*/                          \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       ld8     tmp=[sal_to_os_handoff],48;;                            \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       movl    tmp=IA64_MCA_SAME_CONTEXT;;                             \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       ld8     tmp=[sal_to_os_handoff],-8;;                            \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       ld8     tmp=[sal_to_os_handoff];;                               \
+       st8     [os_to_sal_handoff]=tmp;;
+#else  /* XEN */
 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
        movl    tmp=IA64_MCA_COLD_BOOT;                                 \
        movl    sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);   \
@@ -114,13 +135,13 @@
        st8     [os_to_sal_handoff]=tmp,8;;                             \
        ld8     tmp=[sal_to_os_handoff];;                               \
        st8     [os_to_sal_handoff]=tmp;;
+#endif /* XEN */
 
 #define GET_IA64_MCA_DATA(reg)                                         \
        GET_THIS_PADDR(reg, ia64_mca_data)                              \
        ;;                                                              \
        ld8 reg=[reg]
 
-#endif /* XEN */
        .global ia64_os_mca_dispatch
        .global ia64_os_mca_dispatch_end
 #ifndef XEN
@@ -132,7 +153,40 @@
        .text
        .align 16
 
-#ifndef XEN
+#ifdef XEN
+/*
+ * void set_per_cpu_data(void)
+ * {
+ *   int i;
+ *   for (i = 0; i < 64; i++) {
+ *     if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) {
+ *       ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_mca_tlb_list[i].percpu_paddr);
+ *       return;
+ *     }
+ *   }
+ *   while(1); // Endless loop on error
+ * }
+ */
+#define SET_PER_CPU_DATA()                                     \
+       LOAD_PHYSICAL(p0,r2,ia64_mca_tlb_list);;                \
+       mov r7 = r0;                                            \
+       mov r6 = r0;;                                           \
+       adds r3 = IA64_MCA_PERCPU_OFFSET, r2;                   \
+1:     add r4 = r6, r2;                                        \
+       mov r5=cr.lid;;                                         \
+       adds r7 = 1, r7;                                        \
+       ld8 r4 = [r4];;                                         \
+       cmp.ne p6, p7 = r5, r4;                                 \
+       cmp4.lt p8, p9 = NR_CPUS-1, r7;                         \
+(p7)   br.cond.dpnt 3f;                                        \
+       adds r6 = 16, r6;                                       \
+(p9)   br.cond.sptk 1b;                                        \
+2:     br 2b;;                 /* Endless loop on error */     \
+3:     add r4 = r6, r3;;                                       \
+       ld8 r4 = [r4];;                                         \
+       mov ar.k3=r4
+#endif /* XEN */
+
 /*
  * Just the TLB purge part is moved to a separate function
  * so we can re-use the code for cpu hotplug code as well
@@ -221,6 +275,44 @@ 4:
        ;;
        srlz.i
        ;;
+#ifdef XEN
+       // 5. VHPT
+#if VHPT_ENABLED
+       // r25 = __va_ul(vcpu_vhpt_maddr(v));
+       GET_THIS_PADDR(r2,cpu_kr);;
+       add r2=IA64_KR_CURRENT_OFFSET,r2;;
+       ld8 r2=[r2];;
+#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
+#define HAS_PERVCPU_VHPT_MASK  0x2
+       dep r3=0,r2,60,4;;                      // virtual to physical
+       add r3=IA64_VCPU_DOMAIN_OFFSET,r3;;
+       ld8 r3=[r3];; 
+       dep r3=0,r3,60,4;;                      // virtual to physical
+       add r3=IA64_DOMAIN_FLAGS_OFFSET,r3;;
+       ld8 r3=[r3];; 
+       and r3=HAS_PERVCPU_VHPT_MASK,r3;;
+       cmp.eq p6,p0=r3,r0;;
+(p6)   br.cond.sptk    .not_pervcpu_vhpt
+       add r2=IA64_VCPU_VHPT_MADDR_OFFSET,r2;;
+       dep r2=0,r2,60,4;;                      // virtual to physical
+       ld8 r2=[r2];; 
+       dep r25=-1,r2,60,4;;                    // physical to virtual
+       br.sptk         .percpu_vhpt_done
+#endif
+.not_pervcpu_vhpt:
+       GET_THIS_PADDR(r2, vhpt_paddr);; 
+       ld8 r2=[r2];; 
+       dep r25=-1,r2,60,4;;                    // physical to virtual
+.percpu_vhpt_done:
+       dep r20=0,r25,0,IA64_GRANULE_SHIFT
+       mov r24=IA64_GRANULE_SHIFT<<2
+       ;;
+       ptr.d r20,r24
+       ;;
+       srlz.d
+       ;;
+#endif
+#endif
        // Now branch away to caller.
        br.sptk.many b1
        ;;
@@ -235,6 +327,9 @@ ia64_os_mca_spin:
        cmp.ne  p6,p0=r4,r0
 (p6)   br ia64_os_mca_spin
 
+#ifdef XEN
+       SET_PER_CPU_DATA();;
+#endif
        // Save the SAL to OS MCA handoff state as defined
        // by SAL SPEC 3.0
        // NOTE : The order in which the state gets saved
@@ -250,7 +345,19 @@ begin_os_mca_dump:
 
 ia64_os_mca_done_dump:
 
+#ifdef XEN
+       // Set current to ar.k6
+       GET_THIS_PADDR(r2,cpu_kr);;
+       add r2=IA64_KR_CURRENT_OFFSET,r2;;
+       ld8 r2=[r2];;
+       mov ar.k6=r2;;
+
+       GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;
+       ld8 r2=[r2];;
+       adds r16=56,r2
+#else
        LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
+#endif
        ;;
        ld8 r18=[r16]           // Get processor state parameter on existing 
PALE_CHECK.
        ;;
@@ -342,6 +449,28 @@ ia64_reload_tr:
        ;;
        srlz.d
        ;;
+#ifdef XEN
+       // 5. VHPT
+#if VHPT_ENABLED
+       // r25 = __va_ul(vcpu_vhpt_maddr(v));
+       dep r20=0,r25,0,IA64_GRANULE_SHIFT
+       movl r26=PAGE_KERNEL
+       ;;
+       mov r21=IA64_TR_VHPT
+       dep r22=0,r20,60,4              // physical address of
+                                       // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
+       mov r24=IA64_GRANULE_SHIFT<<2
+       ;;
+       or r23=r22,r26                  // construct PA | page properties
+       mov cr.itir=r24
+       mov cr.ifa=r20
+       ;;
+       itr.d dtr[r21]=r23              // wire in new mapping...
+       ;;
+       srlz.d
+       ;;
+#endif
+#endif
        br.sptk.many done_tlb_purge_and_reload
 err:
        COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
@@ -874,12 +1003,6 @@ end_os_mca_restore:
        br      ia64_os_mca_done_restore;;
 
 //EndStub//////////////////////////////////////////////////////////////////////
-#else
-ia64_os_mca_dispatch:
-1:
-       br.sptk 1b
-ia64_os_mca_dispatch_end:
-#endif /* !XEN */
 
 
 // ok, the issue here is that we need to save state information so
@@ -911,6 +1034,15 @@ ia64_os_mca_dispatch_end:
 
 GLOBAL_ENTRY(ia64_monarch_init_handler)
        .prologue
+#ifdef XEN     /* Need in ia64_monarch_init_handler? */
+       SET_PER_CPU_DATA();;
+
+       // Set current to ar.k6
+       GET_THIS_PADDR(r2,cpu_kr);;
+       add r2=IA64_KR_CURRENT_OFFSET,r2;;
+       ld8 r2=[r2];;
+       mov ar.k6=r2;;
+#endif
        // stash the information the SAL passed to os
        SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
        ;;
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/linux-xen/smpboot.c Wed Nov 15 12:02:09 2006 -0700
@@ -365,9 +365,7 @@ smp_callin (void)
 
        smp_setup_percpu_timer();
 
-#ifndef XEN
        ia64_mca_cmc_vector_setup();    /* Setup vector on AP */
-#endif
 
 #ifdef CONFIG_PERFMON
        pfm_init_percpu();
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c        Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/xen/dom_fw.c        Wed Nov 15 12:02:09 2006 -0700
@@ -240,6 +240,23 @@ acpi_update_lsapic (acpi_table_entry_hea
        return 0;
 }
 
+static int __init
+acpi_patch_plat_int_src (
+       acpi_table_entry_header *header, const unsigned long end)
+{
+       struct acpi_table_plat_int_src *plintsrc;
+
+       plintsrc = (struct acpi_table_plat_int_src *)header;
+       if (!plintsrc)
+               return -EINVAL;
+
+       if (plintsrc->type == ACPI_INTERRUPT_CPEI) {
+               printk("ACPI_INTERRUPT_CPEI disabled for Domain0\n");
+               plintsrc->type = -1;
+       }
+       return 0;
+}
+
 static u8
 generate_acpi_checksum(void *tbl, unsigned long len)
 {
@@ -271,7 +288,11 @@ static void touch_acpi_table(void)
 {
        lsapic_nbr = 0;
        if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
-               printk("Error parsing MADT - no LAPIC entires\n");
+               printk("Error parsing MADT - no LAPIC entries\n");
+       if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
+                                 acpi_patch_plat_int_src, 0) < 0)
+               printk("Error parsing MADT - no PLAT_INT_SRC entries\n");
+
        acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
 
        return;
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/xen/fw_emul.c
--- a/xen/arch/ia64/xen/fw_emul.c       Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/xen/fw_emul.c       Wed Nov 15 12:02:09 2006 -0700
@@ -22,6 +22,7 @@
 #include <linux/efi.h>
 #include <asm/pal.h>
 #include <asm/sal.h>
+#include <asm/xenmca.h>
 
 #include <public/sched.h>
 #include "hpsim_ssc.h"
@@ -36,6 +37,93 @@ static DEFINE_SPINLOCK(efi_time_services
 
 extern unsigned long running_on_sim;
 
+struct sal_mc_params {
+       u64 param_type;
+       u64 i_or_m;
+       u64 i_or_m_val;
+       u64 timeout;
+       u64 rz_always;
+} sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
+
+struct sal_vectors {
+       u64 vector_type;
+       u64 handler_addr1;
+       u64 gp1;
+       u64 handler_len1;
+       u64 handler_addr2;
+       u64 gp2;
+       u64 handler_len2;
+} sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
+
+struct smp_call_args_t {
+       u64 type;
+       u64 ret;
+       u64 target;
+       struct domain *domain;
+       int corrected;
+       int status;
+       void *data;
+}; 
+
+extern sal_log_record_header_t *sal_record;
+DEFINE_SPINLOCK(sal_record_lock);
+
+extern spinlock_t sal_queue_lock;
+
+#define IA64_SAL_NO_INFORMATION_AVAILABLE      -5
+
+#if defined(IA64_SAL_DEBUG_INFO)
+static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
+
+# define IA64_SAL_DEBUG(fmt...)        printk("sal_emulator: " fmt)
+#else
+# define IA64_SAL_DEBUG(fmt...)
+#endif
+
+void get_state_info_on(void *data) {
+       struct smp_call_args_t *arg = data;
+       int flags;
+
+       spin_lock_irqsave(&sal_record_lock, flags);
+       memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
+       arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
+       IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
+                      rec_name[arg->type], smp_processor_id(), arg->ret);
+       if (arg->corrected) {
+               sal_record->severity = sal_log_severity_corrected;
+               IA64_SAL_DEBUG("%s: 
IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
+                              " force\n", __FUNCTION__);
+       }
+       if (arg->ret > 0) {
+               /*
+                * Save current->domain and set to local(caller) domain for
+                * xencomm_paddr_to_maddr() which calculates maddr from
+                * paddr using mpa value of current->domain.
+                */
+               struct domain *save;
+               save = current->domain;
+               current->domain = arg->domain;
+               if (xencomm_copy_to_guest((void*)arg->target,
+                                         sal_record, arg->ret, 0)) {
+                       printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
+                       arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                       arg->ret = 0;
+               }
+               /* Restore current->domain to saved value. */
+               current->domain = save;
+       }
+       spin_unlock_irqrestore(&sal_record_lock, flags);
+}
+
+void clear_state_info_on(void *data) {
+       struct smp_call_args_t *arg = data;
+
+       arg->ret = ia64_sal_clear_state_info(arg->type);
+       IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
+                      rec_name[arg->type], smp_processor_id(), arg->ret);
+
+}
+  
 struct sal_ret_values
 sal_emulator (long index, unsigned long in1, unsigned long in2,
              unsigned long in3, unsigned long in4, unsigned long in5,
@@ -106,27 +194,160 @@ sal_emulator (long index, unsigned long 
                        }
                }
                else
-                       printk("*** CALLED SAL_SET_VECTORS %lu.  IGNORED...\n",
-                              in1);
+               {
+                       if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
+                               BUG();
+                       sal_vectors[in1].vector_type    = in1;
+                       sal_vectors[in1].handler_addr1  = in2;
+                       sal_vectors[in1].gp1            = in3;
+                       sal_vectors[in1].handler_len1   = in4;
+                       sal_vectors[in1].handler_addr2  = in5;
+                       sal_vectors[in1].gp2            = in6;
+                       sal_vectors[in1].handler_len2   = in7;
+               }
                break;
            case SAL_GET_STATE_INFO:
-               /* No more info.  */
-               status = -5;
-               r9 = 0;
+               if (current->domain == dom0) {
+                       sal_queue_entry_t *e;
+                       unsigned long flags;
+                       struct smp_call_args_t arg;
+
+                       spin_lock_irqsave(&sal_queue_lock, flags);
+                       if (list_empty(&sal_queue[in1])) {
+                               sal_log_record_header_t header;
+                               XEN_GUEST_HANDLE(void) handle =
+                                       *(XEN_GUEST_HANDLE(void)*)&in3;
+
+                               IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
+                                              "no sal_queue entry found.\n",
+                                              rec_name[in1]);
+                               memset(&header, 0, sizeof(header));
+
+                               if (copy_to_guest(handle, &header, 1)) {
+                                       printk("sal_emulator: "
+                                              "SAL_GET_STATE_INFO can't copy "
+                                              "empty header to user: 0x%lx\n",
+                                              in3);
+                               }
+                               status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               r9 = 0;
+                               spin_unlock_irqrestore(&sal_queue_lock, flags);
+                               break;
+                       }
+                       e = list_entry(sal_queue[in1].next,
+                                      sal_queue_entry_t, list);
+                       spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+                       IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
+                                      "on CPU#%d.\n",
+                                      rec_name[e->sal_info_type],
+                                      rec_name[in1], e->cpuid);
+
+                       arg.type = e->sal_info_type;
+                       arg.target = in3;
+                       arg.corrected = !!((in1 != e->sal_info_type) && 
+                                       (e->sal_info_type == 
SAL_INFO_TYPE_MCA));
+                       arg.domain = current->domain;
+                       arg.status = 0;
+
+                       if (e->cpuid == smp_processor_id()) {
+                               IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
+                               get_state_info_on(&arg);
+                       } else {
+                               int ret;
+                               IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
+                               ret = smp_call_function_single(e->cpuid,
+                                                              
get_state_info_on,
+                                                              &arg, 0, 1);
+                               if (ret < 0) {
+                                       printk("SAL_GET_STATE_INFO "
+                                              "smp_call_function_single error:"
+                                              " %d\n", ret);
+                                       arg.ret = 0;
+                                       arg.status =
+                                            IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               }
+                       }
+                       r9 = arg.ret;
+                       status = arg.status;
+                       if (r9 == 0) {
+                               spin_lock_irqsave(&sal_queue_lock, flags);
+                               list_del(&e->list);
+                               spin_unlock_irqrestore(&sal_queue_lock, flags);
+                               xfree(e);
+                       }
+               } else {
+                       status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                       r9 = 0;
+               }
                break;
            case SAL_GET_STATE_INFO_SIZE:
-               /* Return a dummy size.  */
-               status = 0;
-               r9 = 128;
+               r9 = ia64_sal_get_state_info_size(in1);
                break;
            case SAL_CLEAR_STATE_INFO:
-               /* Noop.  */
+               if (current->domain == dom0) {
+                       sal_queue_entry_t *e;
+                       unsigned long flags;
+                       struct smp_call_args_t arg;
+
+                       spin_lock_irqsave(&sal_queue_lock, flags);
+                       if (list_empty(&sal_queue[in1])) {
+                               IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
+                                              "no sal_queue entry found.\n",
+                                              rec_name[in1]);
+                               status = IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               r9 = 0;
+                               spin_unlock_irqrestore(&sal_queue_lock, flags);
+                               break;
+                       }
+                       e = list_entry(sal_queue[in1].next,
+                                      sal_queue_entry_t, list);
+
+                       list_del(&e->list);
+                       spin_unlock_irqrestore(&sal_queue_lock, flags);
+
+                       IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
+                                      "on CPU#%d.\n",
+                                      rec_name[e->sal_info_type],
+                                      rec_name[in1], e->cpuid);
+                       
+
+                       arg.type = e->sal_info_type;
+                       arg.status = 0;
+                       if (e->cpuid == smp_processor_id()) {
+                               IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
+                               clear_state_info_on(&arg);
+                       } else {
+                               int ret;
+                               IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: 
remote\n");
+                               ret = smp_call_function_single(e->cpuid,
+                                       clear_state_info_on, &arg, 0, 1);
+                               if (ret < 0) {
+                                       printk("sal_emulator: "
+                                              "SAL_CLEAR_STATE_INFO "
+                                              "smp_call_function_single error:"
+                                              " %d\n", ret);
+                                       arg.ret = 0;
+                                       arg.status =
+                                            IA64_SAL_NO_INFORMATION_AVAILABLE;
+                               }
+                       }
+                       r9 = arg.ret;
+                       status = arg.status;
+                       xfree(e);
+               }
                break;
            case SAL_MC_RENDEZ:
                printk("*** CALLED SAL_MC_RENDEZ.  IGNORED...\n");
                break;
            case SAL_MC_SET_PARAMS:
-               printk("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
+               if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
+                       BUG();
+               sal_mc_params[in1].param_type   = in1;
+               sal_mc_params[in1].i_or_m       = in2;
+               sal_mc_params[in1].i_or_m_val   = in3;
+               sal_mc_params[in1].timeout      = in4;
+               sal_mc_params[in1].rz_always    = in5;
                break;
            case SAL_CACHE_FLUSH:
                if (1) {
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/xen/mm_init.c
--- a/xen/arch/ia64/xen/mm_init.c       Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/xen/mm_init.c       Wed Nov 15 12:02:09 2006 -0700
@@ -10,6 +10,11 @@
 
 #include <xen/sched.h>
 #include <asm/vhpt.h>
+#include <asm/xenmca.h>
+#include <asm/meminit.h>
+#include <asm/page.h>
+
+struct ia64_mca_tlb_info ia64_mca_tlb_list[NR_CPUS];
 
 extern void ia64_tlb_init (void);
 
@@ -93,11 +98,13 @@ ia64_mmu_init (void *my_cpu_data)
 
        cpu = smp_processor_id();
 
-#ifndef XEN
        /* mca handler uses cr.lid as key to pick the right entry */
        ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
 
        /* insert this percpu data information into our list for MCA recovery 
purposes */
+#ifdef XEN
+       ia64_mca_tlb_list[cpu].percpu_paddr = __pa(my_cpu_data);
+#else
        ia64_mca_tlb_list[cpu].percpu_paddr = 
pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
        /* Also save per-cpu tlb flush recipe for use in physical mode mca 
handler */
        ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
diff -r f78e499dd669 -r 05d227d81935 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/arch/ia64/xen/vcpu.c  Wed Nov 15 12:02:09 2006 -0700
@@ -46,8 +46,6 @@ typedef union {
 // this def for vcpu_regs won't work if kernel stack is present
 //#define       vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
 
-#define        TRUE                    1
-#define        FALSE                   0
 #define        IA64_PTA_SZ_BIT         2
 #define        IA64_PTA_VF_BIT         8
 #define        IA64_PTA_BASE_BIT       15
diff -r f78e499dd669 -r 05d227d81935 xen/include/asm-ia64/event.h
--- a/xen/include/asm-ia64/event.h      Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/include/asm-ia64/event.h      Wed Nov 15 12:02:09 2006 -0700
@@ -70,6 +70,8 @@ static inline int arch_virq_is_global(in
     switch ( virq )
     {
     case VIRQ_ITC:
+    case VIRQ_MCA_CMC:
+    case VIRQ_MCA_CPE:
         rc = 0;
         break;
     default:
diff -r f78e499dd669 -r 05d227d81935 
xen/include/asm-ia64/linux-xen/asm/mca_asm.h
--- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h      Tue Nov 14 14:59:37 
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h      Wed Nov 15 12:02:09 
2006 -0700
@@ -59,8 +59,8 @@
 
 #ifdef XEN
 #define GET_THIS_PADDR(reg, var)               \
-       movl    reg = THIS_CPU(var)             \
-       tpa     reg = reg
+       mov     reg = IA64_KR(PER_CPU_DATA);;   \
+       addl    reg = THIS_CPU(var) - PERCPU_ADDR, reg
 #else
 #define GET_THIS_PADDR(reg, var)               \
        mov     reg = IA64_KR(PER_CPU_DATA);;   \
diff -r f78e499dd669 -r 05d227d81935 
xen/include/asm-ia64/linux-xen/linux/interrupt.h
--- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h  Tue Nov 14 14:59:37 
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h  Wed Nov 15 12:02:09 
2006 -0700
@@ -29,13 +29,15 @@
  */
 #ifdef XEN
 typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
 #else
 typedef int irqreturn_t;
-#endif
-
 #define IRQ_NONE       (0)
 #define IRQ_HANDLED    (1)
 #define IRQ_RETVAL(x)  ((x) != 0)
+#endif
 
 #ifndef XEN
 struct irqaction {
diff -r f78e499dd669 -r 05d227d81935 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/include/asm-ia64/vcpu.h       Wed Nov 15 12:02:09 2006 -0700
@@ -10,7 +10,7 @@
 #include <asm/ia64_int.h>
 #include <xen/types.h>
 #include <public/xen.h>
-typedef int BOOLEAN;
+#include <linux/acpi.h>
 struct vcpu;
 typedef struct vcpu VCPU;
 typedef cpu_user_regs_t REGS;
diff -r f78e499dd669 -r 05d227d81935 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/include/public/arch-ia64.h    Wed Nov 15 12:02:09 2006 -0700
@@ -59,6 +59,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 
 /* Arch specific VIRQs definition */
 #define VIRQ_ITC        VIRQ_ARCH_0 /* V. Virtual itc timer */
+#define VIRQ_MCA_CMC    VIRQ_ARCH_1 /* MCA cmc interrupt */
+#define VIRQ_MCA_CPE    VIRQ_ARCH_2 /* MCA cpe interrupt */
 
 /* Maximum number of virtual CPUs in multi-processor guests. */
 /* WARNING: before changing this, check that shared_info fits on a page */
diff -r f78e499dd669 -r 05d227d81935 xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Tue Nov 14 14:59:37 2006 -0700
+++ b/xen/include/xen/softirq.h Wed Nov 15 12:02:09 2006 -0700
@@ -9,7 +9,13 @@
 #define NMI_SOFTIRQ                       4
 #define PAGE_SCRUB_SOFTIRQ                5
 #define TRACE_SOFTIRQ                     6
+#ifdef __ia64__
+#define CMC_DISABLE_SOFTIRQ               7
+#define CMC_ENABLE_SOFTIRQ                8
+#define NR_SOFTIRQS                       9
+#else  /* __ia64__ */
 #define NR_SOFTIRQS                       7
+#endif /* __ia64__ */
 
 #ifndef __ASSEMBLY__
 
diff -r f78e499dd669 -r 05d227d81935 linux-2.6-xen-sparse/include/asm-ia64/sal.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/include/asm-ia64/sal.h       Wed Nov 15 12:02:09 
2006 -0700
@@ -0,0 +1,904 @@
+#ifndef _ASM_IA64_SAL_H
+#define _ASM_IA64_SAL_H
+
+/*
+ * System Abstraction Layer definitions.
+ *
+ * This is based on version 2.5 of the manual "IA-64 System
+ * Abstraction Layer".
+ *
+ * Copyright (C) 2001 Intel
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@xxxxxxxxx>
+ * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@xxxxxxxxx>
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar 
<sprasad@xxxxxxxxxxxxxxxxxxxx>
+ *
+ * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
+ *                 revision of the SAL spec.
+ * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
+ *                  revision of the SAL spec.
+ * 99/09/29 davidm     Updated for SAL 2.6.
+ * 00/03/29 cfleck      Updated SAL Error Logging info for processor (SAL 2.6)
+ *                      (plus examples of platform error info structures from 
smariset @ Intel)
+ */
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT         0
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT   1
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT   2
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT                3
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK       
(1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT 
(1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT 
(1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT      
(1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bcd.h>
+#include <linux/spinlock.h>
+#include <linux/efi.h>
+
+#include <asm/pal.h>
+#include <asm/system.h>
+#include <asm/fpu.h>
+#ifdef CONFIG_XEN
+#include <asm/xen/xencomm.h>
+#endif
+
+extern spinlock_t sal_lock;
+
+/* SAL spec _requires_ eight args for each call. */
+#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7)     \
+       result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
+
+# define SAL_CALL(result,args...) do {                         \
+       unsigned long __ia64_sc_flags;                          \
+       struct ia64_fpreg __ia64_sc_fr[6];                      \
+       ia64_save_scratch_fpregs(__ia64_sc_fr);                 \
+       spin_lock_irqsave(&sal_lock, __ia64_sc_flags);          \
+       __SAL_CALL(result, args);                               \
+       spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);     \
+       ia64_load_scratch_fpregs(__ia64_sc_fr);                 \
+} while (0)
+
+# define SAL_CALL_NOLOCK(result,args...) do {          \
+       unsigned long __ia64_scn_flags;                 \
+       struct ia64_fpreg __ia64_scn_fr[6];             \
+       ia64_save_scratch_fpregs(__ia64_scn_fr);        \
+       local_irq_save(__ia64_scn_flags);               \
+       __SAL_CALL(result, args);                       \
+       local_irq_restore(__ia64_scn_flags);            \
+       ia64_load_scratch_fpregs(__ia64_scn_fr);        \
+} while (0)
+
+# define SAL_CALL_REENTRANT(result,args...) do {       \
+       struct ia64_fpreg __ia64_scs_fr[6];             \
+       ia64_save_scratch_fpregs(__ia64_scs_fr);        \
+       preempt_disable();                              \
+       __SAL_CALL(result, args);                       \
+       preempt_enable();                               \
+       ia64_load_scratch_fpregs(__ia64_scs_fr);        \
+} while (0)
+
+#define SAL_SET_VECTORS                        0x01000000
+#define SAL_GET_STATE_INFO             0x01000001
+#define SAL_GET_STATE_INFO_SIZE                0x01000002
+#define SAL_CLEAR_STATE_INFO           0x01000003
+#define SAL_MC_RENDEZ                  0x01000004
+#define SAL_MC_SET_PARAMS              0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR     0x01000006
+
+#define SAL_CACHE_FLUSH                        0x01000008
+#define SAL_CACHE_INIT                 0x01000009
+#define SAL_PCI_CONFIG_READ            0x01000010
+#define SAL_PCI_CONFIG_WRITE           0x01000011
+#define SAL_FREQ_BASE                  0x01000012
+#define SAL_PHYSICAL_ID_INFO           0x01000013
+
+#define SAL_UPDATE_PAL                 0x01000020
+
+struct ia64_sal_retval {
+       /*
+        * A zero status value indicates call completed without error.
+        * A negative status value indicates reason of call failure.
+        * A positive status value indicates success but an
+        * informational value should be printed (e.g., "reboot for
+        * change to take effect").
+        */
+       s64 status;
+       u64 v0;
+       u64 v1;
+       u64 v2;
+};
+
+typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
+
+enum {
+       SAL_FREQ_BASE_PLATFORM = 0,
+       SAL_FREQ_BASE_INTERVAL_TIMER = 1,
+       SAL_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+/*
+ * The SAL system table is followed by a variable number of variable
+ * length descriptors.  The structure of these descriptors follows
+ * below.
+ * The defininition follows SAL specs from July 2000
+ */
+struct ia64_sal_systab {
+       u8 signature[4];        /* should be "SST_" */
+       u32 size;               /* size of this table in bytes */
+       u8 sal_rev_minor;
+       u8 sal_rev_major;
+       u16 entry_count;        /* # of entries in variable portion */
+       u8 checksum;
+       u8 reserved1[7];
+       u8 sal_a_rev_minor;
+       u8 sal_a_rev_major;
+       u8 sal_b_rev_minor;
+       u8 sal_b_rev_major;
+       /* oem_id & product_id: terminating NUL is missing if string is exactly 
32 bytes long. */
+       u8 oem_id[32];
+       u8 product_id[32];      /* ASCII product id  */
+       u8 reserved2[8];
+};
+
+enum sal_systab_entry_type {
+       SAL_DESC_ENTRY_POINT = 0,
+       SAL_DESC_MEMORY = 1,
+       SAL_DESC_PLATFORM_FEATURE = 2,
+       SAL_DESC_TR = 3,
+       SAL_DESC_PTC = 4,
+       SAL_DESC_AP_WAKEUP = 5
+};
+
+/*
+ * Entry type: Size:
+ *     0       48
+ *     1       32
+ *     2       16
+ *     3       32
+ *     4       16
+ *     5       16
+ */
+#define SAL_DESC_SIZE(type)    "\060\040\020\040\020\020"[(unsigned) type]
+
+typedef struct ia64_sal_desc_entry_point {
+       u8 type;
+       u8 reserved1[7];
+       u64 pal_proc;
+       u64 sal_proc;
+       u64 gp;
+       u8 reserved2[16];
+}ia64_sal_desc_entry_point_t;
+
+typedef struct ia64_sal_desc_memory {
+       u8 type;
+       u8 used_by_sal; /* needs to be mapped for SAL? */
+       u8 mem_attr;            /* current memory attribute setting */
+       u8 access_rights;       /* access rights set up by SAL */
+       u8 mem_attr_mask;       /* mask of supported memory attributes */
+       u8 reserved1;
+       u8 mem_type;            /* memory type */
+       u8 mem_usage;           /* memory usage */
+       u64 addr;               /* physical address of memory */
+       u32 length;     /* length (multiple of 4KB pages) */
+       u32 reserved2;
+       u8 oem_reserved[8];
+} ia64_sal_desc_memory_t;
+
+typedef struct ia64_sal_desc_platform_feature {
+       u8 type;
+       u8 feature_mask;
+       u8 reserved1[14];
+} ia64_sal_desc_platform_feature_t;
+
+typedef struct ia64_sal_desc_tr {
+       u8 type;
+       u8 tr_type;             /* 0 == instruction, 1 == data */
+       u8 regnum;              /* translation register number */
+       u8 reserved1[5];
+       u64 addr;               /* virtual address of area covered */
+       u64 page_size;          /* encoded page size */
+       u8 reserved2[8];
+} ia64_sal_desc_tr_t;
+
+typedef struct ia64_sal_desc_ptc {
+       u8 type;
+       u8 reserved1[3];
+       u32 num_domains;        /* # of coherence domains */
+       u64 domain_info;        /* physical address of domain info table */
+} ia64_sal_desc_ptc_t;
+
+typedef struct ia64_sal_ptc_domain_info {
+       u64 proc_count;         /* number of processors in domain */
+       u64 proc_list;          /* physical address of LID array */
+} ia64_sal_ptc_domain_info_t;
+
+typedef struct ia64_sal_ptc_domain_proc_entry {
+       u64 id  : 8;            /* id of processor */
+       u64 eid : 8;            /* eid of processor */
+} ia64_sal_ptc_domain_proc_entry_t;
+
+
+#define IA64_SAL_AP_EXTERNAL_INT 0
+
+typedef struct ia64_sal_desc_ap_wakeup {
+       u8 type;
+       u8 mechanism;           /* 0 == external interrupt */
+       u8 reserved1[6];
+       u64 vector;             /* interrupt vector in range 0x10-0xff */
+} ia64_sal_desc_ap_wakeup_t ;
+
+extern ia64_sal_handler ia64_sal;
+extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
+
+extern unsigned short sal_revision;    /* supported SAL spec revision */
+extern unsigned short sal_version;     /* SAL version; OEM dependent */
+#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
+
+extern const char *ia64_sal_strerror (long status);
+extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
+
+/* SAL information type encodings */
+enum {
+       SAL_INFO_TYPE_MCA  = 0,         /* Machine check abort information */
+        SAL_INFO_TYPE_INIT = 1,                /* Init information */
+        SAL_INFO_TYPE_CMC  = 2,                /* Corrected machine check 
information */
+        SAL_INFO_TYPE_CPE  = 3         /* Corrected platform error information 
*/
+};
+
+/* Encodings for machine check parameter types */
+enum {
+       SAL_MC_PARAM_RENDEZ_INT    = 1, /* Rendezvous interrupt */
+       SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
+       SAL_MC_PARAM_CPE_INT       = 3  /* Corrected Platform Error Int */
+};
+
+/* Encodings for rendezvous mechanisms */
+enum {
+       SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
+       SAL_MC_PARAM_MECHANISM_MEM = 2  /* Use memory synchronization variable*/
+};
+
+/* Encodings for vectors which can be registered by the OS with SAL */
+enum {
+       SAL_VECTOR_OS_MCA         = 0,
+       SAL_VECTOR_OS_INIT        = 1,
+       SAL_VECTOR_OS_BOOT_RENDEZ = 2
+};
+
+/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
+#define        SAL_MC_PARAM_RZ_ALWAYS          0x1
+#define        SAL_MC_PARAM_BINIT_ESCALATE     0x10
+
+/*
+ * Definition of the SAL Error Log from the SAL spec
+ */
+
+/* SAL Error Record Section GUID Definitions */
+#define SAL_PROC_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+#define SAL_PLAT_BUS_ERR_SECT_GUID  \
+    EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 
0x88, 0x81)
+
+#define MAX_CACHE_ERRORS       6
+#define MAX_TLB_ERRORS         6
+#define MAX_BUS_ERRORS         1
+
+/* Definition of version  according to SAL spec for logging purposes */
+typedef struct sal_log_revision {
+       u8 minor;               /* BCD (0..99) */
+       u8 major;               /* BCD (0..99) */
+} sal_log_revision_t;
+
+/* Definition of timestamp according to SAL spec for logging purposes */
+typedef struct sal_log_timestamp {
+       u8 slh_second;          /* Second (0..59) */
+       u8 slh_minute;          /* Minute (0..59) */
+       u8 slh_hour;            /* Hour (0..23) */
+       u8 slh_reserved;
+       u8 slh_day;             /* Day (1..31) */
+       u8 slh_month;           /* Month (1..12) */
+       u8 slh_year;            /* Year (00..99) */
+       u8 slh_century;         /* Century (19, 20, 21, ...) */
+} sal_log_timestamp_t;
+
+/* Definition of log record  header structures */
+typedef struct sal_log_record_header {
+       u64 id;                         /* Unique monotonically increasing ID */
+       sal_log_revision_t revision;    /* Major and Minor revision of header */
+       u8 severity;                    /* Error Severity */
+       u8 validation_bits;             /* 0: platform_guid, 1: !timestamp */
+       u32 len;                        /* Length of this error log in bytes */
+       sal_log_timestamp_t timestamp;  /* Timestamp */
+       efi_guid_t platform_guid;       /* Unique OEM Platform ID */
+} sal_log_record_header_t;
+
+#define sal_log_severity_recoverable   0
+#define sal_log_severity_fatal         1
+#define sal_log_severity_corrected     2
+
+/* Definition of log section header structures */
+typedef struct sal_log_sec_header {
+    efi_guid_t guid;                   /* Unique Section ID */
+    sal_log_revision_t revision;       /* Major and Minor revision of Section 
*/
+    u16 reserved;
+    u32 len;                           /* Section length */
+} sal_log_section_hdr_t;
+
+typedef struct sal_log_mod_error_info {
+       struct {
+               u64 check_info              : 1,
+                   requestor_identifier    : 1,
+                   responder_identifier    : 1,
+                   target_identifier       : 1,
+                   precise_ip              : 1,
+                   reserved                : 59;
+       } valid;
+       u64 check_info;
+       u64 requestor_identifier;
+       u64 responder_identifier;
+       u64 target_identifier;
+       u64 precise_ip;
+} sal_log_mod_error_info_t;
+
+typedef struct sal_processor_static_info {
+       struct {
+               u64 minstate        : 1,
+                   br              : 1,
+                   cr              : 1,
+                   ar              : 1,
+                   rr              : 1,
+                   fr              : 1,
+                   reserved        : 58;
+       } valid;
+       pal_min_state_area_t min_state_area;
+       u64 br[8];
+       u64 cr[128];
+       u64 ar[128];
+       u64 rr[8];
+       struct ia64_fpreg __attribute__ ((packed)) fr[128];
+} sal_processor_static_info_t;
+
+struct sal_cpuid_info {
+       u64 regs[5];
+       u64 reserved;
+};
+
+typedef struct sal_log_processor_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 proc_error_map      : 1,
+                   proc_state_param    : 1,
+                   proc_cr_lid         : 1,
+                   psi_static_struct   : 1,
+                   num_cache_check     : 4,
+                   num_tlb_check       : 4,
+                   num_bus_check       : 4,
+                   num_reg_file_check  : 4,
+                   num_ms_check        : 4,
+                   cpuid_info          : 1,
+                   reserved1           : 39;
+       } valid;
+       u64 proc_error_map;
+       u64 proc_state_parameter;
+       u64 proc_cr_lid;
+       /*
+        * The rest of this structure consists of variable-length arrays, which 
can't be
+        * expressed in C.
+        */
+       sal_log_mod_error_info_t info[0];
+       /*
+        * This is what the rest looked like if C supported variable-length 
arrays:
+        *
+        * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
+        * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
+        * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
+        * sal_log_mod_error_info_t 
reg_file_check_info[.valid.num_reg_file_check];
+        * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
+        * struct sal_cpuid_info cpuid_info;
+        * sal_processor_static_info_t processor_static_info;
+        */
+} sal_log_processor_info_t;
+
+/* Given a sal_log_processor_info_t pointer, return a pointer to the 
processor_static_info: */
+#define SAL_LPI_PSI_INFO(l)                                                    
                \
+({     sal_log_processor_info_t *_l = (l);                                     
                \
+       ((sal_processor_static_info_t *)                                        
                \
+        ((char *) _l->info + ((_l->valid.num_cache_check + 
_l->valid.num_tlb_check             \
+                               + _l->valid.num_bus_check + 
_l->valid.num_reg_file_check        \
+                               + _l->valid.num_ms_check) * 
sizeof(sal_log_mod_error_info_t)    \
+                              + sizeof(struct sal_cpuid_info))));              
                \
+})
+
+/* platform error log structures */
+
+typedef struct sal_log_mem_dev_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 error_status    : 1,
+                   physical_addr   : 1,
+                   addr_mask       : 1,
+                   node            : 1,
+                   card            : 1,
+                   module          : 1,
+                   bank            : 1,
+                   device          : 1,
+                   row             : 1,
+                   column          : 1,
+                   bit_position    : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   bus_spec_data   : 1,
+                   oem_id          : 1,
+                   oem_data        : 1,
+                   reserved        : 47;
+       } valid;
+       u64 error_status;
+       u64 physical_addr;
+       u64 addr_mask;
+       u16 node;
+       u16 card;
+       u16 module;
+       u16 bank;
+       u16 device;
+       u16 row;
+       u16 column;
+       u16 bit_position;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u64 bus_spec_data;
+       u8 oem_id[16];
+       u8 oem_data[1];                 /* Variable length data */
+} sal_log_mem_dev_err_info_t;
+
+typedef struct sal_log_sel_dev_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 record_id       : 1,
+                   record_type     : 1,
+                   generator_id    : 1,
+                   evm_rev         : 1,
+                   sensor_type     : 1,
+                   sensor_num      : 1,
+                   event_dir       : 1,
+                   event_data1     : 1,
+                   event_data2     : 1,
+                   event_data3     : 1,
+                   reserved        : 54;
+       } valid;
+       u16 record_id;
+       u8 record_type;
+       u8 timestamp[4];
+       u16 generator_id;
+       u8 evm_rev;
+       u8 sensor_type;
+       u8 sensor_num;
+       u8 event_dir;
+       u8 event_data1;
+       u8 event_data2;
+       u8 event_data3;
+} sal_log_sel_dev_err_info_t;
+
+typedef struct sal_log_pci_bus_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   err_type        : 1,
+                   bus_id          : 1,
+                   bus_address     : 1,
+                   bus_data        : 1,
+                   bus_cmd         : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   oem_data        : 1,
+                   reserved        : 54;
+       } valid;
+       u64 err_status;
+       u16 err_type;
+       u16 bus_id;
+       u32 reserved;
+       u64 bus_address;
+       u64 bus_data;
+       u64 bus_cmd;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u8 oem_data[1];                 /* Variable length data */
+} sal_log_pci_bus_err_info_t;
+
+typedef struct sal_log_smbios_dev_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 event_type      : 1,
+                   length          : 1,
+                   time_stamp      : 1,
+                   data            : 1,
+                   reserved1       : 60;
+       } valid;
+       u8 event_type;
+       u8 length;
+       u8 time_stamp[6];
+       u8 data[1];                     /* data of variable length, length == 
slsmb_length */
+} sal_log_smbios_dev_err_info_t;
+
+typedef struct sal_log_pci_comp_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   comp_info       : 1,
+                   num_mem_regs    : 1,
+                   num_io_regs     : 1,
+                   reg_data_pairs  : 1,
+                   oem_data        : 1,
+                   reserved        : 58;
+       } valid;
+       u64 err_status;
+       struct {
+               u16 vendor_id;
+               u16 device_id;
+               u8 class_code[3];
+               u8 func_num;
+               u8 dev_num;
+               u8 bus_num;
+               u8 seg_num;
+               u8 reserved[5];
+       } comp_info;
+       u32 num_mem_regs;
+       u32 num_io_regs;
+       u64 reg_data_pairs[1];
+       /*
+        * array of address/data register pairs is num_mem_regs + num_io_regs 
elements
+        * long.  Each array element consists of a u64 address followed by a 
u64 data
+        * value.  The oem_data array immediately follows the reg_data_pairs 
array
+        */
+       u8 oem_data[1];                 /* Variable length data */
+} sal_log_pci_comp_err_info_t;
+
+typedef struct sal_log_plat_specific_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   guid            : 1,
+                   oem_data        : 1,
+                   reserved        : 61;
+       } valid;
+       u64 err_status;
+       efi_guid_t guid;
+       u8 oem_data[1];                 /* platform specific variable length 
data */
+} sal_log_plat_specific_err_info_t;
+
+typedef struct sal_log_host_ctlr_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   bus_spec_data   : 1,
+                   oem_data        : 1,
+                   reserved        : 58;
+       } valid;
+       u64 err_status;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u64 bus_spec_data;
+       u8 oem_data[1];                 /* Variable length OEM data */
+} sal_log_host_ctlr_err_info_t;
+
+typedef struct sal_log_plat_bus_err_info {
+       sal_log_section_hdr_t header;
+       struct {
+               u64 err_status      : 1,
+                   requestor_id    : 1,
+                   responder_id    : 1,
+                   target_id       : 1,
+                   bus_spec_data   : 1,
+                   oem_data        : 1,
+                   reserved        : 58;
+       } valid;
+       u64 err_status;
+       u64 requestor_id;
+       u64 responder_id;
+       u64 target_id;
+       u64 bus_spec_data;
+       u8 oem_data[1];                 /* Variable length OEM data */
+} sal_log_plat_bus_err_info_t;
+
+/* Overall platform error section structure */
+typedef union sal_log_platform_err_info {
+       sal_log_mem_dev_err_info_t mem_dev_err;
+       sal_log_sel_dev_err_info_t sel_dev_err;
+       sal_log_pci_bus_err_info_t pci_bus_err;
+       sal_log_smbios_dev_err_info_t smbios_dev_err;
+       sal_log_pci_comp_err_info_t pci_comp_err;
+       sal_log_plat_specific_err_info_t plat_specific_err;
+       sal_log_host_ctlr_err_info_t host_ctlr_err;
+       sal_log_plat_bus_err_info_t plat_bus_err;
+} sal_log_platform_err_info_t;
+
+/* SAL log over-all, multi-section error record structure (processor+platform) 
*/
+typedef struct err_rec {
+       sal_log_record_header_t sal_elog_header;
+       sal_log_processor_info_t proc_err;
+       sal_log_platform_err_info_t plat_err;
+       u8 oem_data_pad[1024];
+} ia64_err_rec_t;
+
+/*
+ * Now define a couple of inline functions for improved type checking
+ * and convenience.
+ */
+static inline long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+                   unsigned long *drift_info)
+{
+       struct ia64_sal_retval isrv;
+
+       SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
+       *ticks_per_second = isrv.v0;
+       *drift_info = isrv.v1;
+       return isrv.status;
+}
+
+extern s64 ia64_sal_cache_flush (u64 cache_type);
+
+/* Initialize all the processor and platform level instruction and data caches 
*/
+static inline s64
+ia64_sal_cache_init (void)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Clear the processor and platform information logged by SAL with respect to 
the machine
+ * state at the time of MCA's, INITs, CMCs, or CPEs.
+ */
+static inline s64
+ia64_sal_clear_state_info (u64 sal_info_type)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
+                     0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+
+/* Get the processor and platform information logged by SAL with respect to 
the machine
+ * state at the time of the MCAs, INITs, CMCs, or CPEs.
+ */
+#ifdef CONFIG_XEN
+static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type);
+#endif
+
+static inline u64
+ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+{
+       struct ia64_sal_retval isrv;
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               struct xencomm_handle *desc;
+
+               if (xencomm_create(sal_info,
+                                  ia64_sal_get_state_info_size(sal_info_type),
+                                  &desc, GFP_KERNEL))
+                       return 0;
+
+               SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+                                  desc, 0, 0, 0, 0);
+               xencomm_free(desc);
+       } else
+#endif
+       SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+                     sal_info, 0, 0, 0, 0);
+       if (isrv.status)
+               return 0;
+
+       return isrv.v0;
+}
+
+/*
+ * Get the maximum size of the information logged by SAL with respect to the 
machine state
+ * at the time of MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info_size (u64 sal_info_type)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
+                     0, 0, 0, 0, 0);
+       if (isrv.status)
+               return 0;
+       return isrv.v0;
+}
+
+/*
+ * Causes the processor to go into a spin loop within SAL where SAL awaits a 
wakeup from
+ * the monarch processor.  Must not lock, because it will not return on any 
cpu until the
+ * monarch processor sends a wake up.
+ */
+static inline s64
+ia64_sal_mc_rendez (void)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Allow the OS to specify the interrupt number to be used by SAL to interrupt 
OS during
+ * the machine check rendezvous sequence as well as the mechanism to wake up 
the
+ * non-monarch processor at the end of machine check processing.
+ * Returns the complete ia64_sal_retval because some calls return more than 
just a status
+ * value.
+ */
+static inline struct ia64_sal_retval
+ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 
timeout, u64 rz_always)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
+                timeout, rz_always, 0, 0);
+       return isrv;
+}
+
+/* Read from PCI configuration space */
+static inline s64
+ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 
0, 0);
+       if (value)
+               *value = isrv.v0;
+       return isrv.status;
+}
+
+/* Write to PCI configuration space */
+static inline s64
+ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
+                type, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Register physical addresses of locations needed by SAL when SAL procedures 
are invoked
+ * in virtual mode.
+ */
+static inline s64
+ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
+                0, 0, 0, 0, 0);
+       return isrv.status;
+}
+
+/*
+ * Register software dependent code locations within SAL. These locations are 
handlers or
+ * entry points where SAL will pass control for the specified event. These 
event handlers
+ * are for the bott rendezvous, MCAs and INIT scenarios.
+ */
+static inline s64
+ia64_sal_set_vectors (u64 vector_type,
+                     u64 handler_addr1, u64 gp1, u64 handler_len1,
+                     u64 handler_addr2, u64 gp2, u64 handler_len2)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
+                       handler_addr1, gp1, handler_len1,
+                       handler_addr2, gp2, handler_len2);
+
+       return isrv.status;
+}
+
+/* Update the contents of PAL block in the non-volatile storage device */
+static inline s64
+ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
+                    u64 *error_code, u64 *scratch_buf_size_needed)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
+                0, 0, 0, 0);
+       if (error_code)
+               *error_code = isrv.v0;
+       if (scratch_buf_size_needed)
+               *scratch_buf_size_needed = isrv.v1;
+       return isrv.status;
+}
+
+/* Get physical processor die mapping in the platform. */
+static inline s64
+ia64_sal_physical_id_info(u16 *splid)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
+       if (splid)
+               *splid = isrv.v0;
+       return isrv.status;
+}
+
+extern unsigned long sal_platform_features;
+
+extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
+
+struct sal_ret_values {
+       long r8; long r9; long r10; long r11;
+};
+
+#define IA64_SAL_OEMFUNC_MIN           0x02000000
+#define IA64_SAL_OEMFUNC_MAX           0x03ffffff
+
+extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
+                           u64, u64, u64);
+extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
+                                  u64, u64, u64, u64, u64);
+extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
+                                     u64, u64, u64, u64, u64);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ * Note: region regs are stored first in head.S _start. Hence they must
+ * stay up front.
+ */
+struct sal_to_os_boot {
+       u64 rr[8];              /* Region Registers */
+       u64     br[6];          /* br0: return addr into SAL boot rendez 
routine */
+       u64 gr1;                /* SAL:GP */
+       u64 gr12;               /* SAL:SP */
+       u64 gr13;               /* SAL: Task Pointer */
+       u64 fpsr;
+       u64     pfs;
+       u64 rnat;
+       u64 unat;
+       u64 bspstore;
+       u64 dcr;                /* Default Control Register */
+       u64 iva;
+       u64 pta;
+       u64 itv;
+       u64 pmv;
+       u64 cmcv;
+       u64 lrr[2];
+       u64 gr[4];
+       u64 pr;                 /* Predicate registers */
+       u64 lc;                 /* Loop Count */
+       struct ia64_fpreg fp[20];
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
+
+extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SAL_H */
diff -r f78e499dd669 -r 05d227d81935 xen/include/asm-ia64/xenmca.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/xenmca.h     Wed Nov 15 12:02:09 2006 -0700
@@ -0,0 +1,34 @@
+/*
+ * File:       xenmca.h
+ * Purpose:    Machine check handling specific defines for Xen
+ *
+ * Copyright (C) 2006 FUJITSU LTD. (kaz@xxxxxxxxxxxxxx)
+ */
+
+#ifndef _ASM_IA64_XENMCA_H
+#define _ASM_IA64_XENMCA_H
+
+#ifndef __ASSEMBLER__
+#include <linux/list.h>
+#include <asm/sal.h>
+
+typedef struct sal_queue_entry_t {
+       int cpuid;
+       int sal_info_type;
+       unsigned int vector;
+       unsigned int virq;
+       unsigned int length;
+       struct list_head list;
+} sal_queue_entry_t;
+
+extern struct list_head sal_queue[];
+
+struct ia64_mca_tlb_info {
+       u64 cr_lid;
+       u64 percpu_paddr;
+};
+
+extern struct ia64_mca_tlb_info ia64_mca_tlb_list[];
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ASM_IA64_XENMCA_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.