[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Enable CONFIG_SMP compile and link



# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID f5c4042212b013d7750e5270d98c047c8419e5a7
# Parent  2b95125015a59eeb086b3748f9415ac440133b4d
Enable CONFIG_SMP compile and link

diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/hyperprivop.S
--- a/xen/arch/ia64/hyperprivop.S       Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/hyperprivop.S       Tue Aug 30 18:41:54 2005
@@ -25,6 +25,11 @@
 #define FAST_SSM_I
 #define FAST_PTC_GA
 #undef RFI_TO_INTERRUPT // not working yet
+#endif
+
+#ifdef CONFIG_SMP
+#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
+#undef FAST_PTC_GA
 #endif
 
 // FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
@@ -1506,9 +1511,6 @@
 END(hyper_thash)
 
 ENTRY(hyper_ptc_ga)
-#ifdef CONFIG_SMP
-FIXME: ptc.ga instruction requires spinlock for SMP
-#endif
 #ifndef FAST_PTC_GA
        br.spnt.few dispatch_break_fault ;;
 #endif
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/irq.c
--- a/xen/arch/ia64/irq.c       Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/irq.c       Tue Aug 30 18:41:54 2005
@@ -266,8 +266,12 @@
 #ifdef CONFIG_SMP
 inline void synchronize_irq(unsigned int irq)
 {
-       while (irq_descp(irq)->status & IRQ_INPROGRESS)
+#ifndef XEN
+       struct irq_desc *desc = irq_desc + irq;
+
+       while (desc->status & IRQ_INPROGRESS)
                cpu_relax();
+#endif
 }
 EXPORT_SYMBOL(synchronize_irq);
 #endif
@@ -1012,6 +1016,8 @@
        return 0;
 }
 
+#ifndef XEN
+
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
@@ -1121,6 +1127,7 @@
 
 
 #endif /* CONFIG_SMP */
+#endif
 
 #ifdef CONFIG_HOTPLUG_CPU
 unsigned int vectors_in_migration[NR_IRQS];
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c        Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c        Tue Aug 30 18:41:54 2005
@@ -323,7 +323,9 @@
 
 static struct irqaction ipi_irqaction = {
        .handler =      handle_IPI,
+#ifndef XEN
        .flags =        SA_INTERRUPT,
+#endif
        .name =         "IPI"
 };
 #endif
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/linux-xen/mm_contig.c
--- a/xen/arch/ia64/linux-xen/mm_contig.c       Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/linux-xen/mm_contig.c       Tue Aug 30 18:41:54 2005
@@ -191,8 +191,13 @@
         * get_zeroed_page().
         */
        if (smp_processor_id() == 0) {
+#ifdef XEN
+               cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SIZE -
+                       PAGE_SIZE + get_order(NR_CPUS));
+#else
                cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
                                           PERCPU_PAGE_SIZE, 
__pa(MAX_DMA_ADDRESS));
+#endif
                for (cpu = 0; cpu < NR_CPUS; cpu++) {
                        memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - 
__per_cpu_start);
                        __per_cpu_offset[cpu] = (char *) cpu_data - 
__per_cpu_start;
@@ -204,6 +209,7 @@
 }
 #endif /* CONFIG_SMP */
 
+#ifndef XEN
 static int
 count_pages (u64 start, u64 end, void *arg)
 {
@@ -229,7 +235,6 @@
  * Set up the page tables.
  */
 
-#ifndef XEN
 void
 paging_init (void)
 {
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/process.c
--- a/xen/arch/ia64/process.c   Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/process.c   Tue Aug 30 18:41:54 2005
@@ -224,7 +224,7 @@
        regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
        regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
 #ifdef CONFIG_SMP
-#error "sharedinfo doesn't handle smp yet"
+#warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
 #endif
        regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
 
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/xensetup.c
--- a/xen/arch/ia64/xensetup.c  Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/xensetup.c  Tue Aug 30 18:41:54 2005
@@ -26,6 +26,8 @@
 char saved_command_line[COMMAND_LINE_SIZE];
 
 struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
+
+cpumask_t cpu_present_map;
 
 #ifdef CLONE_DOMAIN0
 struct domain *clones[CLONE_DOMAIN0];
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/xentime.c
--- a/xen/arch/ia64/xentime.c   Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/xentime.c   Tue Aug 30 18:41:54 2005
@@ -31,6 +31,10 @@
 #include <linux/jiffies.h>     // not included by xen/sched.h
 #endif
 #include <xen/softirq.h>
+
+#ifdef XEN
+seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
+#endif
 
 #define TIME_KEEPER_ID  0
 extern unsigned long wall_jiffies;
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/config.h     Tue Aug 30 18:41:54 2005
@@ -20,6 +20,22 @@
 
 #define CONFIG_EFI_PCDP
 #define CONFIG_SERIAL_SGI_L1_CONSOLE
+
+#undef CONFIG_XEN_SMP
+
+#ifdef CONFIG_XEN_SMP
+#define CONFIG_SMP 1
+#define NR_CPUS 2
+#define CONFIG_NR_CPUS 2
+#else
+#undef CONFIG_SMP
+#define NR_CPUS 1
+#define CONFIG_NR_CPUS 1
+#endif
+//#define NR_CPUS 16
+//#define CONFIG_NR_CPUS 16
+//leave SMP for a later time
+//#undef CONFIG_SMP
 
 #ifndef __ASSEMBLY__
 
@@ -75,13 +91,16 @@
 //#define __cond_lock(x) (x)
 #define __must_check
 #define __deprecated
+#ifndef RELOC_HIDE
+# define RELOC_HIDE(ptr, off)                                  \
+  ({ unsigned long __ptr;                                      \
+     __ptr = (unsigned long) (ptr);                            \
+    (typeof(ptr)) (__ptr + (off)); })
+#endif
 
 // xen/include/asm/config.h
 #define HZ 100
-// leave SMP for a later time
-#define NR_CPUS 1
-//#define NR_CPUS 16
-//#define CONFIG_NR_CPUS 16
+// FIXME SMP: leave SMP for a later time
 #define barrier() __asm__ __volatile__("": : :"memory")
 
 ///////////////////////////////////////////////////////////////
@@ -99,13 +118,18 @@
 
 // from include/asm-ia64/smp.h
 #ifdef CONFIG_SMP
-#error "Lots of things to fix to enable CONFIG_SMP!"
-#endif
+#warning "Lots of things to fix to enable CONFIG_SMP!"
+#endif
+// FIXME SMP
 #define        get_cpu()       0
 #define put_cpu()      do {} while(0)
 
 // needed for common/dom0_ops.c until hyperthreading is supported
+#ifdef CONFIG_SMP
+extern int smp_num_siblings;
+#else
 #define smp_num_siblings 1
+#endif
 
 // from linux/include/linux/mm.h
 struct page;
@@ -253,10 +277,6 @@
 
 #define CONFIG_MCKINLEY
 
-//#define CONFIG_SMP 1
-//#define CONFIG_NR_CPUS 2
-//leave SMP for a later time
-#undef CONFIG_SMP
 #undef CONFIG_X86_LOCAL_APIC
 #undef CONFIG_X86_IO_APIC
 #undef CONFIG_X86_L1_CACHE_SHIFT
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/linux-xen/asm/pal.h
--- a/xen/include/asm-ia64/linux-xen/asm/pal.h  Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/pal.h  Tue Aug 30 18:41:54 2005
@@ -67,6 +67,7 @@
 #define PAL_REGISTER_INFO      39      /* return AR and CR register 
information*/
 #define PAL_SHUTDOWN           40      /* enter processor shutdown state */
 #define PAL_PREFETCH_VISIBILITY        41      /* Make Processor Prefetches 
Visible */
+#define PAL_LOGICAL_TO_PHYSICAL 42     /* returns information on logical to 
physical processor mapping */
 
 #define PAL_COPY_PAL           256     /* relocate PAL procedures and PAL PMI 
*/
 #define PAL_HALT_INFO          257     /* return the low power capabilities of 
processor */
@@ -1559,7 +1560,76 @@
        return iprv.status;
 }
 
+/* data structure for getting information on logical to physical mappings */
+typedef union pal_log_overview_u {
+       struct {
+               u64     num_log         :16,    /* Total number of logical
+                                                * processors on this die
+                                                */
+                       tpc             :8,     /* Threads per core */
+                       reserved3       :8,     /* Reserved */
+                       cpp             :8,     /* Cores per processor */
+                       reserved2       :8,     /* Reserved */
+                       ppid            :8,     /* Physical processor ID */
+                       reserved1       :8;     /* Reserved */
+       } overview_bits;
+       u64 overview_data;
+} pal_log_overview_t;
+
+typedef union pal_proc_n_log_info1_u{
+       struct {
+               u64     tid             :16,    /* Thread id */
+                       reserved2       :16,    /* Reserved */
+                       cid             :16,    /* Core id */
+                       reserved1       :16;    /* Reserved */
+       } ppli1_bits;
+       u64     ppli1_data;
+} pal_proc_n_log_info1_t;
+
+typedef union pal_proc_n_log_info2_u {
+       struct {
+               u64     la              :16,    /* Logical address */
+                       reserved        :48;    /* Reserved */
+       } ppli2_bits;
+       u64     ppli2_data;
+} pal_proc_n_log_info2_t;
+
+typedef struct pal_logical_to_physical_s
+{
+       pal_log_overview_t overview;
+       pal_proc_n_log_info1_t ppli1;
+       pal_proc_n_log_info2_t ppli2;
+} pal_logical_to_physical_t;
+
+#define overview_num_log       overview.overview_bits.num_log
+#define overview_tpc           overview.overview_bits.tpc
+#define overview_cpp           overview.overview_bits.cpp
+#define overview_ppid          overview.overview_bits.ppid
+#define log1_tid               ppli1.ppli1_bits.tid
+#define log1_cid               ppli1.ppli1_bits.cid
+#define log2_la                        ppli2.ppli2_bits.la
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
+
+       if (iprv.status == PAL_STATUS_SUCCESS)
+       {
+               if (proc_number == 0)
+                       mapping->overview.overview_data = iprv.v0;
+               mapping->ppli1.ppli1_data = iprv.v1;
+               mapping->ppli2.ppli2_data = iprv.v2;
+       }
+
+       return iprv.status;
+}
+#ifdef XEN
 #include <asm/vmx_pal.h>
+#endif
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_IA64_PAL_H */
diff -r 2b95125015a5 -r f5c4042212b0 
xen/include/asm-ia64/linux-xen/asm/processor.h
--- a/xen/include/asm-ia64/linux-xen/asm/processor.h    Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/processor.h    Tue Aug 30 18:41:54 2005
@@ -164,6 +164,13 @@
 #ifdef CONFIG_SMP
        __u64 loops_per_jiffy;
        int cpu;
+       __u32 socket_id;        /* physical processor socket id */
+       __u16 core_id;          /* core id */
+       __u16 thread_id;        /* thread id */
+       __u16 num_log;          /* Total number of logical processors on
+                                * this socket that were successfully booted */
+       __u8  cores_per_socket; /* Cores per processor socket */
+       __u8  threads_per_core; /* Threads per core */
 #endif
 
        /* CPUID-derived information: */
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/linux-xen/asm/system.h
--- a/xen/include/asm-ia64/linux-xen/asm/system.h       Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h       Tue Aug 30 18:41:54 2005
@@ -247,9 +247,9 @@
  */
 # define switch_to(prev,next,last) do {                                        
        \
        if (ia64_psr(ia64_task_regs(prev))->mfh && 
ia64_is_local_fpu_owner(prev)) {                             \
-               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        
\
-               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  
\
-               __ia64_save_fpu((prev)->thread.fph);                            
\
+               /* ia64_psr(ia64_task_regs(prev))->mfh = 0; */                  
\
+               /* (prev)->thread.flags |= IA64_THREAD_FPH_VALID; */            
        \
+               /* __ia64_save_fpu((prev)->thread.fph); */                      
        \
        }                                                                       
\
        __switch_to(prev, next, last);                                          
\
 } while (0)
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/linux/asm/sal.h
--- a/xen/include/asm-ia64/linux/asm/sal.h      Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux/asm/sal.h      Tue Aug 30 18:41:54 2005
@@ -91,6 +91,7 @@
 #define SAL_PCI_CONFIG_READ            0x01000010
 #define SAL_PCI_CONFIG_WRITE           0x01000011
 #define SAL_FREQ_BASE                  0x01000012
+#define SAL_PHYSICAL_ID_INFO           0x01000013
 
 #define SAL_UPDATE_PAL                 0x01000020
 
@@ -815,6 +816,17 @@
        return isrv.status;
 }
 
+/* Get physical processor die mapping in the platform. */
+static inline s64
+ia64_sal_physical_id_info(u16 *splid)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
+       if (splid)
+               *splid = isrv.v0;
+       return isrv.status;
+}
+
 extern unsigned long sal_platform_features;
 
 extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
@@ -832,6 +844,44 @@
                                   u64, u64, u64, u64, u64);
 extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
                                      u64, u64, u64, u64, u64);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ * Note: region regs are stored first in head.S _start. Hence they must
+ * stay up front.
+ */
+struct sal_to_os_boot {
+       u64 rr[8];              /* Region Registers */
+       u64     br[6];          /* br0: return addr into SAL boot rendez 
routine */
+       u64 gr1;                /* SAL:GP */
+       u64 gr12;               /* SAL:SP */
+       u64 gr13;               /* SAL: Task Pointer */
+       u64 fpsr;
+       u64     pfs;
+       u64 rnat;
+       u64 unat;
+       u64 bspstore;
+       u64 dcr;                /* Default Control Register */
+       u64 iva;
+       u64 pta;
+       u64 itv;
+       u64 pmv;
+       u64 cmcv;
+       u64 lrr[2];
+       u64 gr[4];
+       u64 pr;                 /* Predicate registers */
+       u64 lc;                 /* Loop Count */
+       struct ia64_fpreg fp[20];
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
 
 extern void ia64_sal_handler_init(void *entry_point, void *gpval);
 
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/vhpt.h       Tue Aug 30 18:41:54 2005
@@ -129,7 +129,7 @@
 #define VHPT_CCHAIN_LOOKUP(Name, i_or_d)
 #else
 #ifdef CONFIG_SMP
-#error "VHPT_CCHAIN_LOOKUP needs a semaphore on the VHPT!"
+#warning "FIXME SMP: VHPT_CCHAIN_LOOKUP needs a semaphore on the VHPT!"
 #endif
 
 // VHPT_CCHAIN_LOOKUP is intended to run with psr.i+ic off
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/linux-xen/sal.c
--- /dev/null   Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/linux-xen/sal.c     Tue Aug 30 18:41:54 2005
@@ -0,0 +1,305 @@
+/*
+ * System Abstraction Layer (SAL) interface routines.
+ *
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ */
+#include <linux/config.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/sal.h>
+#include <asm/pal.h>
+#ifdef XEN
+#include <linux/smp.h>
+#endif
+
+ __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
+unsigned long sal_platform_features;
+
+unsigned short sal_revision;
+unsigned short sal_version;
+
+#define SAL_MAJOR(x) ((x) >> 8)
+#define SAL_MINOR(x) ((x) & 0xff)
+
+static struct {
+       void *addr;     /* function entry point */
+       void *gpval;    /* gp value to use */
+} pdesc;
+
+static long
+default_handler (void)
+{
+       return -1;
+}
+
+ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
+ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
+
+const char *
+ia64_sal_strerror (long status)
+{
+       const char *str;
+       switch (status) {
+             case 0: str = "Call completed without error"; break;
+             case 1: str = "Effect a warm boot of the system to complete "
+                             "the update"; break;
+             case -1: str = "Not implemented"; break;
+             case -2: str = "Invalid argument"; break;
+             case -3: str = "Call completed with error"; break;
+             case -4: str = "Virtual address not registered"; break;
+             case -5: str = "No information available"; break;
+             case -6: str = "Insufficient space to add the entry"; break;
+             case -7: str = "Invalid entry_addr value"; break;
+             case -8: str = "Invalid interrupt vector"; break;
+             case -9: str = "Requested memory not available"; break;
+             case -10: str = "Unable to write to the NVM device"; break;
+             case -11: str = "Invalid partition type specified"; break;
+             case -12: str = "Invalid NVM_Object id specified"; break;
+             case -13: str = "NVM_Object already has the maximum number "
+                               "of partitions"; break;
+             case -14: str = "Insufficient space in partition for the "
+                               "requested write sub-function"; break;
+             case -15: str = "Insufficient data buffer space for the "
+                               "requested read record sub-function"; break;
+             case -16: str = "Scratch buffer required for the write/delete "
+                               "sub-function"; break;
+             case -17: str = "Insufficient space in the NVM_Object for the "
+                               "requested create sub-function"; break;
+             case -18: str = "Invalid value specified in the partition_rec "
+                               "argument"; break;
+             case -19: str = "Record oriented I/O not supported for this "
+                               "partition"; break;
+             case -20: str = "Bad format of record to be written or "
+                               "required keyword variable not "
+                               "specified"; break;
+             default: str = "Unknown SAL status code"; break;
+       }
+       return str;
+}
+
+void __init
+ia64_sal_handler_init (void *entry_point, void *gpval)
+{
+       /* fill in the SAL procedure descriptor and point ia64_sal to it: */
+       pdesc.addr = entry_point;
+       pdesc.gpval = gpval;
+       ia64_sal = (ia64_sal_handler) &pdesc;
+}
+
+static void __init
+check_versions (struct ia64_sal_systab *systab)
+{
+       sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
+       sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
+
+       /* Check for broken firmware */
+       if ((sal_revision == SAL_VERSION_CODE(49, 29))
+           && (sal_version == SAL_VERSION_CODE(49, 29)))
+       {
+               /*
+                * Old firmware for zx2000 prototypes have this weird version 
number,
+                * reset it to something sane.
+                */
+               sal_revision = SAL_VERSION_CODE(2, 8);
+               sal_version = SAL_VERSION_CODE(0, 0);
+       }
+}
+
+static void __init
+sal_desc_entry_point (void *p)
+{
+       struct ia64_sal_desc_entry_point *ep = p;
+       ia64_pal_handler_init(__va(ep->pal_proc));
+       ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
+}
+
+#ifdef CONFIG_SMP
+static void __init
+set_smp_redirect (int flag)
+{
+#ifndef CONFIG_HOTPLUG_CPU
+       if (no_int_routing)
+               smp_int_redirect &= ~flag;
+       else
+               smp_int_redirect |= flag;
+#else
+       /*
+        * For CPU Hotplug we dont want to do any chipset supported
+        * interrupt redirection. The reason is this would require that
+        * All interrupts be stopped and hard bind the irq to a cpu.
+        * Later when the interrupt is fired we need to set the redir hint
+        * on again in the vector. This is combersome for something that the
+        * user mode irq balancer will solve anyways.
+        */
+       no_int_routing=1;
+       smp_int_redirect &= ~flag;
+#endif
+}
+#else
+#define set_smp_redirect(flag) do { } while (0)
+#endif
+
+static void __init
+sal_desc_platform_feature (void *p)
+{
+       struct ia64_sal_desc_platform_feature *pf = p;
+       sal_platform_features = pf->feature_mask;
+
+       printk(KERN_INFO "SAL Platform features:");
+       if (!sal_platform_features) {
+               printk(" None\n");
+               return;
+       }
+
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
+               printk(" BusLock");
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
+               printk(" IRQ_Redirection");
+               set_smp_redirect(SMP_IRQ_REDIRECTION);
+       }
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
+               printk(" IPI_Redirection");
+               set_smp_redirect(SMP_IPI_REDIRECTION);
+       }
+       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
+               printk(" ITC_Drift");
+       printk("\n");
+}
+
+#ifdef CONFIG_SMP
+static void __init
+sal_desc_ap_wakeup (void *p)
+{
+       struct ia64_sal_desc_ap_wakeup *ap = p;
+
+       switch (ap->mechanism) {
+       case IA64_SAL_AP_EXTERNAL_INT:
+               ap_wakeup_vector = ap->vector;
+               printk(KERN_INFO "SAL: AP wakeup using external interrupt "
+                               "vector 0x%lx\n", ap_wakeup_vector);
+               break;
+       default:
+               printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
+               break;
+       }
+}
+
+static void __init
+chk_nointroute_opt(void)
+{
+       char *cp;
+       extern char saved_command_line[];
+
+       for (cp = saved_command_line; *cp; ) {
+               if (memcmp(cp, "nointroute", 10) == 0) {
+                       no_int_routing = 1;
+                       printk ("no_int_routing on\n");
+                       break;
+               } else {
+                       while (*cp != ' ' && *cp)
+                               ++cp;
+                       while (*cp == ' ')
+                               ++cp;
+               }
+       }
+}
+
+#else
+static void __init sal_desc_ap_wakeup(void *p) { }
+#endif
+
+void __init
+ia64_sal_init (struct ia64_sal_systab *systab)
+{
+       char *p;
+       int i;
+
+       if (!systab) {
+               printk(KERN_WARNING "Hmm, no SAL System Table.\n");
+               return;
+       }
+
+       if (strncmp(systab->signature, "SST_", 4) != 0)
+               printk(KERN_ERR "bad signature in system table!");
+
+       check_versions(systab);
+#ifdef CONFIG_SMP
+       chk_nointroute_opt();
+#endif
+
+       /* revisions are coded in BCD, so %x does the job for us */
+       printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
+                       SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
+                       systab->oem_id, systab->product_id,
+                       systab->product_id[0] ? " " : "",
+                       SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
+
+       p = (char *) (systab + 1);
+       for (i = 0; i < systab->entry_count; i++) {
+               /*
+                * The first byte of each entry type contains the type
+                * descriptor.
+                */
+               switch (*p) {
+               case SAL_DESC_ENTRY_POINT:
+                       sal_desc_entry_point(p);
+                       break;
+               case SAL_DESC_PLATFORM_FEATURE:
+                       sal_desc_platform_feature(p);
+                       break;
+               case SAL_DESC_PTC:
+                       ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
+                       break;
+               case SAL_DESC_AP_WAKEUP:
+                       sal_desc_ap_wakeup(p);
+                       break;
+               }
+               p += SAL_DESC_SIZE(*p);
+       }
+}
+
+int
+ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
+                u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
+{
+       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
+               return -1;
+       SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+       return 0;
+}
+EXPORT_SYMBOL(ia64_sal_oemcall);
+
+int
+ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
+                       u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
+                       u64 arg7)
+{
+       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
+               return -1;
+       SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
+                       arg7);
+       return 0;
+}
+EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
+
+int
+ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
+                          u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+                          u64 arg6, u64 arg7)
+{
+       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
+               return -1;
+       SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
+                          arg7);
+       return 0;
+}
+EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/linux-xen/smp.c
--- /dev/null   Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/linux-xen/smp.c     Tue Aug 30 18:41:54 2005
@@ -0,0 +1,427 @@
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * Lots of stuff stolen from arch/alpha/kernel/smp.c
+ *
+ * 01/05/16 Rohit Seth <rohit.seth@xxxxxxxxx>  IA64-SMP functions. Reorganized
+ * the existing code (on the lines of x86 port).
+ * 00/09/11 David Mosberger <davidm@xxxxxxxxxx> Do loops_per_jiffy
+ * calibration on each CPU.
+ * 00/08/23 Asit Mallick <asit.k.mallick@xxxxxxxxx> fixed logical processor id
+ * 00/03/31 Rohit Seth <rohit.seth@xxxxxxxxx>  Fixes for Bootstrap Processor
+ * & cpu_online_map now gets done here (instead of setup.c)
+ * 99/10/05 davidm     Update to bring it in sync with new command-line 
processing
+ *  scheme.
+ * 10/13/00 Goutham Rao <goutham.rao@xxxxxxxxx> Updated smp_call_function and
+ *             smp_call_function_single to resend IPI on timeouts
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/efi.h>
+#include <linux/bitops.h>
+
+#include <asm/atomic.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/machvec.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/tlbflush.h>
+#include <asm/unistd.h>
+#include <asm/mca.h>
+#ifdef XEN
+#include <asm/hw_irq.h>
+#endif
+
+#ifdef XEN
+// FIXME: MOVE ELSEWHERE
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+void flush_tlb_mask(cpumask_t mask)
+{
+       dummy();
+}
+//#if CONFIG_SMP || IA64
+#if CONFIG_SMP
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+void smp_send_event_check_mask(cpumask_t mask)
+{
+       dummy();
+       //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
+}
+
+
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+int try_flush_tlb_mask(cpumask_t mask)
+{
+       dummy();
+       return 1;
+}
+#endif
+#endif
+
+#ifdef CONFIG_SMP      /* ifdef XEN */
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise 
static memory
+ * requirements. It also looks cleaner.
+ */
+static  __cacheline_aligned DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+       void (*func) (void *info);
+       void *info;
+       long wait;
+       atomic_t started;
+       atomic_t finished;
+};
+
+static volatile struct call_data_struct *call_data;
+
+#define IPI_CALL_FUNC          0
+#define IPI_CPU_STOP           1
+
+/* This needs to be cacheline aligned because it is written to by *other* 
CPUs.  */
+static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+
+extern void cpu_halt (void);
+
+void
+lock_ipi_calllock(void)
+{
+       spin_lock_irq(&call_lock);
+}
+
+void
+unlock_ipi_calllock(void)
+{
+       spin_unlock_irq(&call_lock);
+}
+
+static void
+stop_this_cpu (void)
+{
+       /*
+        * Remove this CPU:
+        */
+       cpu_clear(smp_processor_id(), cpu_online_map);
+       max_xtp();
+       local_irq_disable();
+#ifndef XEN
+       cpu_halt();
+#endif
+}
+
+void
+cpu_die(void)
+{
+       max_xtp();
+       local_irq_disable();
+#ifndef XEN
+       cpu_halt();
+#endif
+       /* Should never be here */
+       BUG();
+       for (;;);
+}
+
+irqreturn_t
+handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
+{
+       int this_cpu = get_cpu();
+       unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
+       unsigned long ops;
+
+       mb();   /* Order interrupt and bit testing. */
+       while ((ops = xchg(pending_ipis, 0)) != 0) {
+               mb();   /* Order bit clearing and data access. */
+               do {
+                       unsigned long which;
+
+                       which = ffz(~ops);
+                       ops &= ~(1 << which);
+
+                       switch (which) {
+                             case IPI_CALL_FUNC:
+                             {
+                                     struct call_data_struct *data;
+                                     void (*func)(void *info);
+                                     void *info;
+                                     int wait;
+
+                                     /* release the 'pointer lock' */
+                                     data = (struct call_data_struct *) 
call_data;
+                                     func = data->func;
+                                     info = data->info;
+                                     wait = data->wait;
+
+                                     mb();
+                                     atomic_inc(&data->started);
+                                     /*
+                                      * At this point the structure may be 
gone unless
+                                      * wait is true.
+                                      */
+                                     (*func)(info);
+
+                                     /* Notify the sending CPU that the task 
is done.  */
+                                     mb();
+                                     if (wait)
+                                             atomic_inc(&data->finished);
+                             }
+                             break;
+
+                             case IPI_CPU_STOP:
+                               stop_this_cpu();
+                               break;
+
+                             default:
+                               printk(KERN_CRIT "Unknown IPI on CPU %d: 
%lu\n", this_cpu, which);
+                               break;
+                       }
+               } while (ops);
+               mb();   /* Order data access and bit testing. */
+       }
+       put_cpu();
+       return IRQ_HANDLED;
+}
+
+/*
+ * Called with preeemption disabled.
+ */
+static inline void
+send_IPI_single (int dest_cpu, int op)
+{
+       set_bit(op, &per_cpu(ipi_operation, dest_cpu));
+       platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
+}
+
+/*
+ * Called with preeemption disabled.
+ */
+static inline void
+send_IPI_allbutself (int op)
+{
+       unsigned int i;
+
+       for (i = 0; i < NR_CPUS; i++) {
+               if (cpu_online(i) && i != smp_processor_id())
+                       send_IPI_single(i, op);
+       }
+}
+
+/*
+ * Called with preeemption disabled.
+ */
+static inline void
+send_IPI_all (int op)
+{
+       int i;
+
+       for (i = 0; i < NR_CPUS; i++)
+               if (cpu_online(i))
+                       send_IPI_single(i, op);
+}
+
+/*
+ * Called with preeemption disabled.
+ */
+static inline void
+send_IPI_self (int op)
+{
+       send_IPI_single(smp_processor_id(), op);
+}
+
+/*
+ * Called with preeemption disabled.
+ */
+void
+smp_send_reschedule (int cpu)
+{
+       platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_flush_tlb_all (void)
+{
+       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+}
+
+void
+smp_flush_tlb_mm (struct mm_struct *mm)
+{
+       preempt_disable();
+       /* this happens for the common case of a single-threaded fork():  */
+       if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
+       {
+               local_finish_flush_tlb_mm(mm);
+               preempt_enable();
+               return;
+       }
+
+       preempt_enable();
+       /*
+        * We could optimize this further by using mm->cpu_vm_mask to track 
which CPUs
+        * have been running in the address space.  It's not clear that this is 
worth the
+        * trouble though: to avoid races, we have to raise the IPI on the 
target CPU
+        * anyhow, and once a CPU is interrupted, the cost of 
local_flush_tlb_all() is
+        * rather trivial.
+        */
+       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+}
+
+/*
+ * Run a function on another CPU
+ *  <func>     The function to run. This must be fast and non-blocking.
+ *  <info>     An arbitrary pointer to pass to the function.
+ *  <nonatomic>        Currently unused.
+ *  <wait>     If true, wait until function has completed on other CPUs.
+ *  [RETURNS]   0 on success, else a negative status code.
+ *
+ * Does not return until the remote CPU is nearly ready to execute <func>
+ * or is or has executed.
+ */
+
+int
+smp_call_function_single (int cpuid, void (*func) (void *info), void *info, 
int nonatomic,
+                         int wait)
+{
+       struct call_data_struct data;
+       int cpus = 1;
+       int me = get_cpu(); /* prevent preemption and reschedule on another 
processor */
+
+       if (cpuid == me) {
+               printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
+               put_cpu();
+               return -EBUSY;
+       }
+
+       data.func = func;
+       data.info = info;
+       atomic_set(&data.started, 0);
+       data.wait = wait;
+       if (wait)
+               atomic_set(&data.finished, 0);
+
+#ifdef XEN
+       spin_lock(&call_lock);
+#else
+       spin_lock_bh(&call_lock);
+#endif
+
+       call_data = &data;
+       mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC 
*/
+       send_IPI_single(cpuid, IPI_CALL_FUNC);
+
+       /* Wait for response */
+       while (atomic_read(&data.started) != cpus)
+               cpu_relax();
+
+       if (wait)
+               while (atomic_read(&data.finished) != cpus)
+                       cpu_relax();
+       call_data = NULL;
+
+#ifdef XEN
+       spin_unlock(&call_lock);
+#else
+       spin_unlock_bh(&call_lock);
+#endif
+       put_cpu();
+       return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+
+/*
+ *  [SUMMARY]  Run a function on all other CPUs.
+ *  <func>     The function to run. This must be fast and non-blocking.
+ *  <info>     An arbitrary pointer to pass to the function.
+ *  <nonatomic>        currently unused.
+ *  <wait>     If true, wait (atomically) until function has completed on 
other CPUs.
+ *  [RETURNS]   0 on success, else a negative status code.
+ *
+ * Does not return until remote CPUs are nearly ready to execute <func> or are 
or have
+ * executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int
+smp_call_function (void (*func) (void *info), void *info, int nonatomic, int 
wait)
+{
+       struct call_data_struct data;
+       int cpus = num_online_cpus()-1;
+
+       if (!cpus)
+               return 0;
+
+       /* Can deadlock when called with interrupts disabled */
+#ifdef XEN
+       if (irqs_disabled()) panic("smp_call_function called with interrupts 
disabled\n");
+#else
+       WARN_ON(irqs_disabled());
+#endif
+
+       data.func = func;
+       data.info = info;
+       atomic_set(&data.started, 0);
+       data.wait = wait;
+       if (wait)
+               atomic_set(&data.finished, 0);
+
+       spin_lock(&call_lock);
+
+       call_data = &data;
+       mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC 
*/
+       send_IPI_allbutself(IPI_CALL_FUNC);
+
+       /* Wait for response */
+       while (atomic_read(&data.started) != cpus)
+               cpu_relax();
+
+       if (wait)
+               while (atomic_read(&data.finished) != cpus)
+                       cpu_relax();
+       call_data = NULL;
+
+       spin_unlock(&call_lock);
+       return 0;
+}
+EXPORT_SYMBOL(smp_call_function);
+
+/*
+ * this function calls the 'stop' function on all other CPUs in the system.
+ */
+void
+smp_send_stop (void)
+{
+       send_IPI_allbutself(IPI_CPU_STOP);
+}
+
+int __init
+setup_profiling_timer (unsigned int multiplier)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_SMP ifdef XEN */
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/linux-xen/smpboot.c
--- /dev/null   Fri Aug 26 13:06:49 2005
+++ b/xen/arch/ia64/linux-xen/smpboot.c Tue Aug 30 18:41:54 2005
@@ -0,0 +1,903 @@
+/*
+ * SMP boot-related support
+ *
+ * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 2001, 2004-2005 Intel Corp
+ *     Rohit Seth <rohit.seth@xxxxxxxxx>
+ *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ *     Gordon Jin <gordon.jin@xxxxxxxxx>
+ *     Ashok Raj  <ashok.raj@xxxxxxxxx>
+ *
+ * 01/05/16 Rohit Seth <rohit.seth@xxxxxxxxx>  Moved SMP booting functions 
from smp.c to here.
+ * 01/04/27 David Mosberger <davidm@xxxxxxxxxx>        Added ITC synching code.
+ * 02/07/31 David Mosberger <davidm@xxxxxxxxxx>        Switch over to 
hotplug-CPU boot-sequence.
+ *                                             smp_boot_cpus()/smp_commence() 
is replaced by
+ *                                             
smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
+ * 04/06/21 Ashok Raj          <ashok.raj@xxxxxxxxx> Added CPU Hotplug Support
+ * 04/12/26 Jin Gordon <gordon.jin@xxxxxxxxx>
+ * 04/12/26 Rohit Seth <rohit.seth@xxxxxxxxx>
+ *                                             Add multi-threading and 
multi-core detection
+ * 05/01/30 Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ *                                             Setup cpu_sibling_map and 
cpu_core_map
+ */
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>    /* hg add me */
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/efi.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+
+#include <asm/atomic.h>
+#include <asm/cache.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/ia32.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/machvec.h>
+#include <asm/mca.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/tlbflush.h>
+#include <asm/unistd.h>
+
+#ifdef XEN
+#include <asm/hw_irq.h>
+int ht_per_core = 1;
+#endif
+
+#ifdef CONFIG_SMP /* ifdef XEN */
+
+#define SMP_DEBUG 0
+
+#if SMP_DEBUG
+#define Dprintk(x...)  printk(x)
+#else
+#define Dprintk(x...)
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Store all idle threads, this can be reused instead of creating
+ * a new thread. Also avoids complicated thread destroy functionality
+ * for idle threads.
+ */
+struct task_struct *idle_thread_array[NR_CPUS];
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+/*
+ * start_ap in head.S uses this to store current booting cpu
+ * info.
+ */
+struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
+
+#define set_brendez_area(x) (sal_state_for_booting_cpu = 
&sal_boot_rendez_state[(x)]);
+
+#define get_idle_for_cpu(x)            (idle_thread_array[(x)])
+#define set_idle_for_cpu(x,p)  (idle_thread_array[(x)] = (p))
+
+#else
+
+#define get_idle_for_cpu(x)            (NULL)
+#define set_idle_for_cpu(x,p)
+#define set_brendez_area(x)
+#endif
+
+
+/*
+ * ITC synchronization related stuff:
+ */
+#define MASTER 0
+#define SLAVE  (SMP_CACHE_BYTES/8)
+
+#define NUM_ROUNDS     64      /* magic value */
+#define NUM_ITERS      5       /* likewise */
+
+static DEFINE_SPINLOCK(itc_sync_lock);
+static volatile unsigned long go[SLAVE + 1];
+
+#define DEBUG_ITC_SYNC 0
+
+extern void __devinit calibrate_delay (void);
+extern void start_ap (void);
+extern unsigned long ia64_iobase;
+
+task_t *task_for_booting_cpu;
+
+/*
+ * State for each CPU
+ */
+DEFINE_PER_CPU(int, cpu_state);
+
+/* Bitmasks of currently online, and possible CPUs */
+cpumask_t cpu_online_map;
+EXPORT_SYMBOL(cpu_online_map);
+cpumask_t cpu_possible_map;
+EXPORT_SYMBOL(cpu_possible_map);
+
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+int smp_num_siblings = 1;
+int smp_num_cpucores = 1;
+
+/* which logical CPU number maps to which CPU (physical APIC ID) */
+volatile int ia64_cpu_to_sapicid[NR_CPUS];
+EXPORT_SYMBOL(ia64_cpu_to_sapicid);
+
+static volatile cpumask_t cpu_callin_map;
+
+struct smp_boot_data smp_boot_data __initdata;
+
+unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
+
+char __initdata no_int_routing;
+
+unsigned char smp_int_redirect; /* are INT and IPI redirectable by the 
chipset? */
+
+static int __init
+nointroute (char *str)
+{
+       no_int_routing = 1;
+       printk ("no_int_routing on\n");
+       return 1;
+}
+
+__setup("nointroute", nointroute);
+
+void
+sync_master (void *arg)
+{
+       unsigned long flags, i;
+
+       go[MASTER] = 0;
+
+       local_irq_save(flags);
+       {
+               for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
+                       while (!go[MASTER])
+                               cpu_relax();
+                       go[MASTER] = 0;
+                       go[SLAVE] = ia64_get_itc();
+               }
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Return the number of cycles by which our itc differs from the itc on the 
master
+ * (time-keeper) CPU.  A positive number indicates our itc is ahead of the 
master,
+ * negative that it is behind.
+ */
+static inline long
+get_delta (long *rt, long *master)
+{
+       unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
+       unsigned long tcenter, t0, t1, tm;
+       long i;
+
+       for (i = 0; i < NUM_ITERS; ++i) {
+               t0 = ia64_get_itc();
+               go[MASTER] = 1;
+               while (!(tm = go[SLAVE]))
+                       cpu_relax();
+               go[SLAVE] = 0;
+               t1 = ia64_get_itc();
+
+               if (t1 - t0 < best_t1 - best_t0)
+                       best_t0 = t0, best_t1 = t1, best_tm = tm;
+       }
+
+       *rt = best_t1 - best_t0;
+       *master = best_tm - best_t0;
+
+       /* average best_t0 and best_t1 without overflow: */
+       tcenter = (best_t0/2 + best_t1/2);
+       if (best_t0 % 2 + best_t1 % 2 == 2)
+               ++tcenter;
+       return tcenter - best_tm;
+}
+
+/*
+ * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER 
CPU
+ * (normally the time-keeper CPU).  We use a closed loop to eliminate the 
possibility of
+ * unaccounted-for errors (such as getting a machine check in the middle of a 
calibration
+ * step).  The basic idea is for the slave to ask the master what itc value it 
has and to
+ * read its own itc before and after the master responds.  Each iteration 
gives us three
+ * timestamps:
+ *
+ *     slave           master
+ *
+ *     t0 ---\
+ *             ---\
+ *                --->
+ *                     tm
+ *                /---
+ *            /---
+ *     t1 <---
+ *
+ *
+ * The goal is to adjust the slave's ar.itc such that tm falls exactly 
half-way between t0
+ * and t1.  If we achieve this, the clocks are synchronized provided the 
interconnect
+ * between the slave and the master is symmetric.  Even if the interconnect 
were
+ * asymmetric, we would still know that the synchronization error is smaller 
than the
+ * roundtrip latency (t0 - t1).
+ *
+ * When the interconnect is quiet and symmetric, this lets us synchronize the 
itc to
+ * within one or two cycles.  However, we can only *guarantee* that the 
synchronization is
+ * accurate to within a round-trip time, which is typically in the range of 
several
+ * hundred cycles (e.g., ~500 cycles).  In practice, this means that the itc's 
are usually
+ * almost perfectly synchronized, but we shouldn't assume that the accuracy is 
much better
+ * than half a micro second or so.
+ */
+void
+ia64_sync_itc (unsigned int master)
+{
+       long i, delta, adj, adjust_latency = 0, done = 0;
+       unsigned long flags, rt, master_time_stamp, bound;
+#if DEBUG_ITC_SYNC
+       struct {
+               long rt;        /* roundtrip time */
+               long master;    /* master's timestamp */
+               long diff;      /* difference between midpoint and master's 
timestamp */
+               long lat;       /* estimate of itc adjustment latency */
+       } t[NUM_ROUNDS];
+#endif
+
+       /*
+        * Make sure local timer ticks are disabled while we sync.  If
+        * they were enabled, we'd have to worry about nasty issues
+        * like setting the ITC ahead of (or a long time before) the
+        * next scheduled tick.
+        */
+       BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
+
+       go[MASTER] = 1;
+
+       if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
+               printk(KERN_ERR "sync_itc: failed to get attention of CPU 
%u!\n", master);
+               return;
+       }
+
+       while (go[MASTER])
+               cpu_relax();    /* wait for master to be ready */
+
+       spin_lock_irqsave(&itc_sync_lock, flags);
+       {
+               for (i = 0; i < NUM_ROUNDS; ++i) {
+                       delta = get_delta(&rt, &master_time_stamp);
+                       if (delta == 0) {
+                               done = 1;       /* let's lock on to this... */
+                               bound = rt;
+                       }
+
+                       if (!done) {
+                               if (i > 0) {
+                                       adjust_latency += -delta;
+                                       adj = -delta + adjust_latency/4;
+                               } else
+                                       adj = -delta;
+
+                               ia64_set_itc(ia64_get_itc() + adj);
+                       }
+#if DEBUG_ITC_SYNC
+                       t[i].rt = rt;
+                       t[i].master = master_time_stamp;
+                       t[i].diff = delta;
+                       t[i].lat = adjust_latency/4;
+#endif
+               }
+       }
+       spin_unlock_irqrestore(&itc_sync_lock, flags);
+
+#if DEBUG_ITC_SYNC
+       for (i = 0; i < NUM_ROUNDS; ++i)
+               printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
+                      t[i].rt, t[i].master, t[i].diff, t[i].lat);
+#endif
+
+       printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld 
cycles, "
+              "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
+}
+
+/*
+ * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
+ */
+static inline void __devinit
+smp_setup_percpu_timer (void)
+{
+}
+
+static void __devinit
+smp_callin (void)
+{
+       int cpuid, phys_id;
+       extern void ia64_init_itm(void);
+
+#ifdef CONFIG_PERFMON
+       extern void pfm_init_percpu(void);
+#endif
+
+       cpuid = smp_processor_id();
+       phys_id = hard_smp_processor_id();
+
+       if (cpu_online(cpuid)) {
+               printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already 
present??\n",
+                      phys_id, cpuid);
+               BUG();
+       }
+
+       lock_ipi_calllock();
+       cpu_set(cpuid, cpu_online_map);
+       unlock_ipi_calllock();
+       per_cpu(cpu_state, cpuid) = CPU_ONLINE;
+
+       smp_setup_percpu_timer();
+
+#ifndef XEN
+       ia64_mca_cmc_vector_setup();    /* Setup vector on AP */
+#endif
+
+#ifdef CONFIG_PERFMON
+       pfm_init_percpu();
+#endif
+
+       local_irq_enable();
+
+       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
+               /*
+                * Synchronize the ITC with the BP.  Need to do this after irqs 
are
+                * enabled because ia64_sync_itc() calls 
smp_call_function_single(), which
+                * calls spin_unlock_bh(), which calls spin_unlock_bh(), which 
calls
+                * local_bh_enable(), which bugs out if irqs are not enabled...
+                */
+               Dprintk("Going to syncup ITC with BP.\n");
+               ia64_sync_itc(0);
+       }
+
+       /*
+        * Get our bogomips.
+        */
+       ia64_init_itm();
+#ifndef XEN
+       calibrate_delay();
+#endif
+       local_cpu_data->loops_per_jiffy = loops_per_jiffy;
+
+#ifdef CONFIG_IA32_SUPPORT
+       ia32_gdt_init();
+#endif
+
+       /*
+        * Allow the master to continue.
+        */
+       cpu_set(cpuid, cpu_callin_map);
+       Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
+}
+
+
+/*
+ * Activate a secondary processor.  head.S calls this.
+ */
+int __devinit
+start_secondary (void *unused)
+{
+       /* Early console may use I/O ports */
+       ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
+       Dprintk("start_secondary: starting CPU 0x%x\n", 
hard_smp_processor_id());
+       efi_map_pal_code();
+       cpu_init();
+       smp_callin();
+
+#ifdef XEN
+       startup_cpu_idle_loop();
+#else
+       cpu_idle();
+#endif
+       return 0;
+}
+
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+{
+       return NULL;
+}
+
+#ifndef XEN
+struct create_idle {
+       struct task_struct *idle;
+       struct completion done;
+       int cpu;
+};
+
+void
+do_fork_idle(void *_c_idle)
+{
+       struct create_idle *c_idle = _c_idle;
+
+       c_idle->idle = fork_idle(c_idle->cpu);
+       complete(&c_idle->done);
+}
+#endif
+
+static int __devinit
+do_boot_cpu (int sapicid, int cpu)
+{
+       int timeout;
+#ifndef XEN
+       struct create_idle c_idle = {
+               .cpu    = cpu,
+               .done   = COMPLETION_INITIALIZER(c_idle.done),
+       };
+       DECLARE_WORK(work, do_fork_idle, &c_idle);
+
+       c_idle.idle = get_idle_for_cpu(cpu);
+       if (c_idle.idle) {
+               init_idle(c_idle.idle, cpu);
+               goto do_rest;
+       }
+
+       /*
+        * We can't use kernel_thread since we must avoid to reschedule the 
child.
+        */
+       if (!keventd_up() || current_is_keventd())
+               work.func(work.data);
+       else {
+               schedule_work(&work);
+               wait_for_completion(&c_idle.done);
+       }
+
+       if (IS_ERR(c_idle.idle))
+               panic("failed fork for CPU %d", cpu);
+
+       set_idle_for_cpu(cpu, c_idle.idle);
+
+do_rest:
+       task_for_booting_cpu = c_idle.idle;
+#endif
+
+       Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", 
ap_wakeup_vector, cpu, sapicid);
+
+       set_brendez_area(cpu);
+       platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
+
+       /*
+        * Wait 10s total for the AP to start
+        */
+       Dprintk("Waiting on callin_map ...");
+       for (timeout = 0; timeout < 100000; timeout++) {
+               if (cpu_isset(cpu, cpu_callin_map))
+                       break;  /* It has booted */
+               udelay(100);
+       }
+       Dprintk("\n");
+
+       if (!cpu_isset(cpu, cpu_callin_map)) {
+               printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, 
sapicid);
+               ia64_cpu_to_sapicid[cpu] = -1;
+               cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int __init
+decay (char *str)
+{
+       int ticks;
+       get_option (&str, &ticks);
+       return 1;
+}
+
+__setup("decay=", decay);
+
+/*
+ * Initialize the logical CPU number to SAPICID mapping
+ */
+void __init
+smp_build_cpu_map (void)
+{
+       int sapicid, cpu, i;
+       int boot_cpu_id = hard_smp_processor_id();
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               ia64_cpu_to_sapicid[cpu] = -1;
+#ifdef CONFIG_HOTPLUG_CPU
+               cpu_set(cpu, cpu_possible_map);
+#endif
+       }
+
+       ia64_cpu_to_sapicid[0] = boot_cpu_id;
+       cpus_clear(cpu_present_map);
+       cpu_set(0, cpu_present_map);
+       cpu_set(0, cpu_possible_map);
+       for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
+               sapicid = smp_boot_data.cpu_phys_id[i];
+               if (sapicid == boot_cpu_id)
+                       continue;
+               cpu_set(cpu, cpu_present_map);
+               cpu_set(cpu, cpu_possible_map);
+               ia64_cpu_to_sapicid[cpu] = sapicid;
+               cpu++;
+       }
+}
+
+/*
+ * Cycle through the APs sending Wakeup IPIs to boot each.
+ */
+void __init
+smp_prepare_cpus (unsigned int max_cpus)
+{
+       int boot_cpu_id = hard_smp_processor_id();
+
+       /*
+        * Initialize the per-CPU profiling counter/multiplier
+        */
+
+       smp_setup_percpu_timer();
+
+       /*
+        * We have the boot CPU online for sure.
+        */
+       cpu_set(0, cpu_online_map);
+       cpu_set(0, cpu_callin_map);
+
+       local_cpu_data->loops_per_jiffy = loops_per_jiffy;
+       ia64_cpu_to_sapicid[0] = boot_cpu_id;
+
+       printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
+
+       current_thread_info()->cpu = 0;
+
+       /*
+        * If SMP should be disabled, then really disable it!
+        */
+       if (!max_cpus) {
+               printk(KERN_INFO "SMP mode deactivated.\n");
+               cpus_clear(cpu_online_map);
+               cpus_clear(cpu_present_map);
+               cpus_clear(cpu_possible_map);
+               cpu_set(0, cpu_online_map);
+               cpu_set(0, cpu_present_map);
+               cpu_set(0, cpu_possible_map);
+               return;
+       }
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+       cpu_set(smp_processor_id(), cpu_online_map);
+       cpu_set(smp_processor_id(), cpu_callin_map);
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * mt_info[] is a temporary store for all info returned by
+ * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
+ * specific cpu comes.
+ */
+static struct {
+       __u32   socket_id;
+       __u16   core_id;
+       __u16   thread_id;
+       __u16   proc_fixed_addr;
+       __u8    valid;
+} mt_info[NR_CPUS] __devinitdata;
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void
+remove_from_mtinfo(int cpu)
+{
+       int i;
+
+       for_each_cpu(i)
+               if (mt_info[i].valid &&  mt_info[i].socket_id ==
+                                               cpu_data(cpu)->socket_id)
+                       mt_info[i].valid = 0;
+}
+
+static inline void
+clear_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       for_each_cpu_mask(i, cpu_sibling_map[cpu])
+               cpu_clear(cpu, cpu_sibling_map[i]);
+       for_each_cpu_mask(i, cpu_core_map[cpu])
+               cpu_clear(cpu, cpu_core_map[i]);
+
+       cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
+}
+
+static void
+remove_siblinginfo(int cpu)
+{
+       int last = 0;
+
+       if (cpu_data(cpu)->threads_per_core == 1 &&
+           cpu_data(cpu)->cores_per_socket == 1) {
+               cpu_clear(cpu, cpu_core_map[cpu]);
+               cpu_clear(cpu, cpu_sibling_map[cpu]);
+               return;
+       }
+
+       last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
+
+       /* remove it from all sibling map's */
+       clear_cpu_sibling_map(cpu);
+
+       /* if this cpu is the last in the core group, remove all its info 
+        * from mt_info structure
+        */
+       if (last)
+               remove_from_mtinfo(cpu);
+}
+
+extern void fixup_irqs(void);
+/* must be called with cpucontrol mutex held */
+int __cpu_disable(void)
+{
+       int cpu = smp_processor_id();
+
+       /*
+        * dont permit boot processor for now
+        */
+       if (cpu == 0)
+               return -EBUSY;
+
+       remove_siblinginfo(cpu);
+       cpu_clear(cpu, cpu_online_map);
+       fixup_irqs();
+       local_flush_tlb_all();
+       cpu_clear(cpu, cpu_callin_map);
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       unsigned int i;
+
+       for (i = 0; i < 100; i++) {
+               /* They ack this in play_dead by setting CPU_DEAD */
+               if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+               {
+                       printk ("CPU %d is now offline\n", cpu);
+                       return;
+               }
+               msleep(100);
+       }
+       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+}
+#else /* !CONFIG_HOTPLUG_CPU */
+int __cpu_disable(void)
+{
+       return -ENOSYS;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       /* We said "no" in __cpu_disable */
+       BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void
+smp_cpus_done (unsigned int dummy)
+{
+       int cpu;
+       unsigned long bogosum = 0;
+
+       /*
+        * Allow the user to impress friends.
+        */
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               if (cpu_online(cpu))
+                       bogosum += cpu_data(cpu)->loops_per_jiffy;
+
+       printk(KERN_INFO "Total of %d processors activated (%lu.%02lu 
BogoMIPS).\n",
+              (int)num_online_cpus(), bogosum/(500000/HZ), 
(bogosum/(5000/HZ))%100);
+}
+
+static inline void __devinit
+set_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       for_each_online_cpu(i) {
+               if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
+                       cpu_set(i, cpu_core_map[cpu]);
+                       cpu_set(cpu, cpu_core_map[i]);
+                       if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
+                               cpu_set(i, cpu_sibling_map[cpu]);
+                               cpu_set(cpu, cpu_sibling_map[i]);
+                       }
+               }
+       }
+}
+
+int __devinit
+__cpu_up (unsigned int cpu)
+{
+       int ret;
+       int sapicid;
+
+       sapicid = ia64_cpu_to_sapicid[cpu];
+       if (sapicid == -1)
+               return -EINVAL;
+
+       /*
+        * Already booted cpu? not valid anymore since we dont
+        * do idle loop tightspin anymore.
+        */
+       if (cpu_isset(cpu, cpu_callin_map))
+               return -EINVAL;
+
+       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+       /* Processor goes to start_secondary(), sets online flag */
+       ret = do_boot_cpu(sapicid, cpu);
+       if (ret < 0)
+               return ret;
+
+       if (cpu_data(cpu)->threads_per_core == 1 &&
+           cpu_data(cpu)->cores_per_socket == 1) {
+               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, cpu_core_map[cpu]);
+               return 0;
+       }
+
+       set_cpu_sibling_map(cpu);
+
+       return 0;
+}
+
+/*
+ * Assume that CPU's have been discovered by some platform-dependent 
interface.  For
+ * SoftSDV/Lion, that would be ACPI.
+ *
+ * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
+ */
+void __init
+init_smp_config(void)
+{
+       struct fptr {
+               unsigned long fp;
+               unsigned long gp;
+       } *ap_startup;
+       long sal_ret;
+
+       /* Tell SAL where to drop the AP's.  */
+       ap_startup = (struct fptr *) start_ap;
+       sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
+                                      ia64_tpa(ap_startup->fp), 
ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
+       if (sal_ret < 0)
+               printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
+                      ia64_sal_strerror(sal_ret));
+}
+
+static inline int __devinit
+check_for_mtinfo_index(void)
+{
+       int i;
+       
+       for_each_cpu(i)
+               if (!mt_info[i].valid)
+                       return i;
+
+       return -1;
+}
+
+/*
+ * Search the mt_info to find out if this socket's cid/tid information is
+ * cached or not. If the socket exists, fill in the core_id and thread_id 
+ * in cpuinfo
+ */
+static int __devinit
+check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
+{
+       int i;
+       __u32 sid = c->socket_id;
+
+       for_each_cpu(i) {
+               if (mt_info[i].valid && mt_info[i].proc_fixed_addr == 
logical_address
+                   && mt_info[i].socket_id == sid) {
+                       c->core_id = mt_info[i].core_id;
+                       c->thread_id = mt_info[i].thread_id;
+                       return 1; /* not a new socket */
+               }
+       }
+       return 0;
+}
+
+/*
+ * identify_siblings(cpu) gets called from identify_cpu. This populates the 
+ * information related to logical execution units in per_cpu_data structure.
+ */
+void __devinit
+identify_siblings(struct cpuinfo_ia64 *c)
+{
+       s64 status;
+       u16 pltid;
+       u64 proc_fixed_addr;
+       int count, i;
+       pal_logical_to_physical_t info;
+
+       if (smp_num_cpucores == 1 && smp_num_siblings == 1)
+               return;
+
+       if ((status = ia64_pal_logical_to_phys(0, &info)) != 
PAL_STATUS_SUCCESS) {
+               printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
+                      status);
+               return;
+       }
+       if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) 
{
+               printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
+               return;
+       }
+       if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != 
PAL_STATUS_SUCCESS) {
+               printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", 
status);
+               return;
+       }
+
+       c->socket_id =  (pltid << 8) | info.overview_ppid;
+       c->cores_per_socket = info.overview_cpp;
+       c->threads_per_core = info.overview_tpc;
+       count = c->num_log = info.overview_num_log;
+
+       /* If the thread and core id information is already cached, then
+        * we will simply update cpu_info and return. Otherwise, we will
+        * do the PAL calls and cache core and thread id's of all the siblings.
+        */
+       if (check_for_new_socket(proc_fixed_addr, c))
+               return;
+
+       for (i = 0; i < count; i++) {
+               int index;
+
+               if (i && (status = ia64_pal_logical_to_phys(i, &info))
+                         != PAL_STATUS_SUCCESS) {
+                       printk(KERN_ERR "ia64_pal_logical_to_phys failed"
+                                       " with %ld\n", status);
+                       return;
+               }
+               if (info.log2_la == proc_fixed_addr) {
+                       c->core_id = info.log1_cid;
+                       c->thread_id = info.log1_tid;
+               }
+
+               index = check_for_mtinfo_index();
+               /* We will not do the mt_info caching optimization in this case.
+                */
+               if (index < 0)
+                       continue;
+
+               mt_info[index].valid = 1;
+               mt_info[index].socket_id = c->socket_id;
+               mt_info[index].core_id = info.log1_cid;
+               mt_info[index].thread_id = info.log1_tid;
+               mt_info[index].proc_fixed_addr = info.log2_la;
+       }
+}
+#endif /* CONFIG_SMP ifdef XEN */
diff -r 2b95125015a5 -r f5c4042212b0 
xen/include/asm-ia64/linux-xen/asm/spinlock.h
--- /dev/null   Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Tue Aug 30 18:41:54 2005
@@ -0,0 +1,241 @@
+#ifndef _ASM_IA64_SPINLOCK_H
+#define _ASM_IA64_SPINLOCK_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ *
+ * This file is used for SMP configurations only.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/intrinsics.h>
+#include <asm/system.h>
+
+typedef struct {
+       volatile unsigned int lock;
+#ifdef CONFIG_PREEMPT
+       unsigned int break_lock;
+#endif
+#ifdef XEN
+       unsigned char recurse_cpu;
+       unsigned char recurse_cnt;
+#endif
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED                     (spinlock_t) { 0 }
+#define spin_lock_init(x)                      ((x)->lock = 0)
+
+#ifdef ASM_SUPPORTED
+/*
+ * Try to get the lock.  If we fail to get the lock, make a non-standard call 
to
+ * ia64_spinlock_contention().  We do not use a normal call because that would 
force all
+ * callers of spin_lock() to be non-leaf routines.  Instead, 
ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that spin_lock() marks 
"clobbered".
+ */
+
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", 
"r29", "r30", "b6", "memory"
+
+static inline void
+_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+{
+       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+# ifdef CONFIG_ITANIUM
+       /* don't use brl on Itanium... */
+       asm volatile ("{\n\t"
+                     "  mov ar.ccv = r0\n\t"
+                     "  mov r28 = ip\n\t"
+                     "  mov r30 = 1;;\n\t"
+                     "}\n\t"
+                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+                     "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov b6 = r29;;\n\t"
+                     "mov r27=%2\n\t"
+                     "(p14) br.cond.spnt.many b6"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# else
+       asm volatile ("{\n\t"
+                     "  mov ar.ccv = r0\n\t"
+                     "  mov r28 = ip\n\t"
+                     "  mov r30 = 1;;\n\t"
+                     "}\n\t"
+                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov r27=%2\n\t"
+                     "(p14) brl.cond.spnt.many 
ia64_spinlock_contention_pre3_4;;"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#else
+# ifdef CONFIG_ITANIUM
+       /* don't use brl on Itanium... */
+       /* mis-declare, so we get the entry-point, not it's function 
descriptor: */
+       asm volatile ("mov r30 = 1\n\t"
+                     "mov r27=%2\n\t"
+                     "mov ar.ccv = r0;;\n\t"
+                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
+                     "movl r29 = ia64_spinlock_contention;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov b6 = r29;;\n\t"
+                     "(p14) br.call.spnt.many b6 = b6"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# else
+       asm volatile ("mov r30 = 1\n\t"
+                     "mov r27=%2\n\t"
+                     "mov ar.ccv = r0;;\n\t"
+                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
+                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#endif
+}
+#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+#else /* !ASM_SUPPORTED */
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+# define _raw_spin_lock(x)                                                     
        \
+do {                                                                           
        \
+       __u32 *ia64_spinlock_ptr = (__u32 *) (x);                               
        \
+       __u64 ia64_spinlock_val;                                                
        \
+       ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);         
        \
+       if (unlikely(ia64_spinlock_val)) {                                      
        \
+               do {                                                            
        \
+                       while (*ia64_spinlock_ptr)                              
        \
+                               ia64_barrier();                                 
        \
+                       ia64_spinlock_val = 
ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
+               } while (ia64_spinlock_val);                                    
        \
+       }                                                                       
        \
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+#define spin_is_locked(x)      ((x)->lock != 0)
+#define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } 
while (0)
+#define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
+#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
+
+#ifdef XEN
+/*
+ * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+#define _raw_spin_lock_recursive(_lock)            \
+    do {                                           \
+        int cpu = smp_processor_id();              \
+        if ( likely((_lock)->recurse_cpu != cpu) ) \
+        {                                          \
+            spin_lock(_lock);                      \
+            (_lock)->recurse_cpu = cpu;            \
+        }                                          \
+        (_lock)->recurse_cnt++;                    \
+    } while ( 0 )
+
+#define _raw_spin_unlock_recursive(_lock)          \
+    do {                                           \
+        if ( likely(--(_lock)->recurse_cnt == 0) ) \
+        {                                          \
+            (_lock)->recurse_cpu = -1;             \
+            spin_unlock(_lock);                    \
+        }                                          \
+    } while ( 0 )
+#endif
+
+typedef struct {
+       volatile unsigned int read_counter      : 31;
+       volatile unsigned int write_lock        :  1;
+#ifdef CONFIG_PREEMPT
+       unsigned int break_lock;
+#endif
+} rwlock_t;
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+#define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
+#define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
+
+#define _raw_read_lock(rw)                                                     
        \
+do {                                                                           
        \
+       rwlock_t *__read_lock_ptr = (rw);                                       
        \
+                                                                               
        \
+       while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {  
        \
+               ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                
        \
+               while (*(volatile int *)__read_lock_ptr < 0)                    
        \
+                       cpu_relax();                                            
        \
+       }                                                                       
        \
+} while (0)
+
+#define _raw_read_unlock(rw)                                   \
+do {                                                           \
+       rwlock_t *__read_lock_ptr = (rw);                       \
+       ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
+} while (0)
+
+#ifdef ASM_SUPPORTED
+#define _raw_write_lock(rw)                                                    
\
+do {                                                                           
\
+       __asm__ __volatile__ (                                                  
\
+               "mov ar.ccv = r0\n"                                             
\
+               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
+               "1:\n"                                                          
\
+               "ld4 r2 = [%0];;\n"                                             
\
+               "cmp4.eq p0,p7 = r0,r2\n"                                       
\
+               "(p7) br.cond.spnt.few 1b \n"                                   
\
+               "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"                       
\
+               "cmp4.eq p0,p7 = r0, r2\n"                                      
\
+               "(p7) br.cond.spnt.few 1b;;\n"                                  
\
+               :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            
\
+} while(0)
+
+#define _raw_write_trylock(rw)                                                 
\
+({                                                                             
\
+       register long result;                                                   
\
+                                                                               
\
+       __asm__ __volatile__ (                                                  
\
+               "mov ar.ccv = r0\n"                                             
\
+               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
+               "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                         
\
+               : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");          
\
+       (result == 0);                                                          
\
+})
+
+#else /* !ASM_SUPPORTED */
+
+#define _raw_write_lock(l)                                                     
        \
+({                                                                             
        \
+       __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);               
        \
+       __u32 *ia64_write_lock_ptr = (__u32 *) (l);                             
        \
+       do {                                                                    
        \
+               while (*ia64_write_lock_ptr)                                    
        \
+                       ia64_barrier();                                         
        \
+               ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 
0);     \
+       } while (ia64_val);                                                     
        \
+})
+
+#define _raw_write_trylock(rw)                                         \
+({                                                                     \
+       __u64 ia64_val;                                                 \
+       __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
+       ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);   \
+       (ia64_val == 0);                                                \
+})
+
+#endif /* !ASM_SUPPORTED */
+
+#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+
+#define _raw_write_unlock(x)                                                   
        \
+({                                                                             
        \
+       smp_mb__before_clear_bit();     /* need barrier before releasing 
lock... */     \
+       clear_bit(31, (x));                                                     
        \
+})
+
+#endif /*  _ASM_IA64_SPINLOCK_H */
diff -r 2b95125015a5 -r f5c4042212b0 
xen/include/asm-ia64/linux-xen/asm/tlbflush.h
--- /dev/null   Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h     Tue Aug 30 18:41:54 2005
@@ -0,0 +1,105 @@
+#ifndef _ASM_IA64_TLBFLUSH_H
+#define _ASM_IA64_TLBFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <linux/mm.h>
+
+#include <asm/intrinsics.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+
+/*
+ * Now for some TLB flushing routines.  This is the kind of stuff that
+ * can be very expensive, so try to avoid them whenever possible.
+ */
+
+/*
+ * Flush everything (kernel mapping may also have changed due to
+ * vmalloc/vfree).
+ */
+extern void local_flush_tlb_all (void);
+
+#ifdef CONFIG_SMP
+  extern void smp_flush_tlb_all (void);
+  extern void smp_flush_tlb_mm (struct mm_struct *mm);
+# define flush_tlb_all()       smp_flush_tlb_all()
+#else
+# define flush_tlb_all()       local_flush_tlb_all()
+#endif
+
+static inline void
+local_finish_flush_tlb_mm (struct mm_struct *mm)
+{
+#ifndef XEN
+       if (mm == current->active_mm)
+               activate_context(mm);
+#endif
+}
+
+/*
+ * Flush a specified user mapping.  This is called, e.g., as a result of 
fork() and
+ * exit().  fork() ends up here because the copy-on-write mechanism needs to 
write-protect
+ * the PTEs of the parent task.
+ */
+static inline void
+flush_tlb_mm (struct mm_struct *mm)
+{
+       if (!mm)
+               return;
+
+#ifndef XEN
+       mm->context = 0;
+#endif
+
+       if (atomic_read(&mm->mm_users) == 0)
+               return;         /* happens as a result of exit_mmap() */
+
+#ifdef CONFIG_SMP
+       smp_flush_tlb_mm(mm);
+#else
+       local_finish_flush_tlb_mm(mm);
+#endif
+}
+
+extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, 
unsigned long end);
+
+/*
+ * Page-granular tlb flush.
+ */
+static inline void
+flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
+{
+#ifdef CONFIG_SMP
+       flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + 
PAGE_SIZE);
+#else
+       if (vma->vm_mm == current->active_mm)
+               ia64_ptcl(addr, (PAGE_SHIFT << 2));
+#ifndef XEN
+       else
+               vma->vm_mm->context = 0;
+#endif
+#endif
+}
+
+/*
+ * Flush the TLB entries mapping the virtually mapped linear page
+ * table corresponding to address range [START-END).
+ */
+static inline void
+flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long 
end)
+{
+       /*
+        * Deprecated.  The virtual page table is now flushed via the normal 
gather/flush
+        * interface (see tlb.h).
+        */
+}
+
+#define flush_tlb_kernel_range(start, end)     flush_tlb_all() /* XXX fix me */
+
+#endif /* _ASM_IA64_TLBFLUSH_H */
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/linux/notifier.h
--- /dev/null   Fri Aug 26 13:06:49 2005
+++ b/xen/include/asm-ia64/linux/notifier.h     Tue Aug 30 18:41:54 2005
@@ -0,0 +1,76 @@
+/*
+ *     Routines to manage notifier chains for passing status changes to any
+ *     interested routines. We need this instead of hard coded call lists so
+ *     that modules can poke their nose into the innards. The network devices
+ *     needed them so here they are for the rest of you.
+ *
+ *                             Alan Cox <Alan.Cox@xxxxxxxxx>
+ */
+ 
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+       int (*notifier_call)(struct notifier_block *self, unsigned long, void 
*);
+       struct notifier_block *next;
+       int priority;
+};
+
+
+#ifdef __KERNEL__
+
+extern int notifier_chain_register(struct notifier_block **list, struct 
notifier_block *n);
+extern int notifier_chain_unregister(struct notifier_block **nl, struct 
notifier_block *n);
+extern int notifier_call_chain(struct notifier_block **n, unsigned long val, 
void *v);
+
+#define NOTIFY_DONE            0x0000          /* Don't care */
+#define NOTIFY_OK              0x0001          /* Suits me */
+#define NOTIFY_STOP_MASK       0x8000          /* Don't call further */
+#define NOTIFY_BAD             (NOTIFY_STOP_MASK|0x0002)       /* Bad/Veto 
action      */
+/*
+ * Clean way to return from the notifier and stop further calls.
+ */
+#define NOTIFY_STOP            (NOTIFY_OK|NOTIFY_STOP_MASK)
+
+/*
+ *     Declared notifiers so far. I can imagine quite a few more chains
+ *     over time (eg laptop power reset chains, reboot chain (to clean 
+ *     device units up), device [un]mount chain, module load/unload chain,
+ *     low memory chain, screenblank chain (for plug in modular 
screenblankers) 
+ *     VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+ 
+/* netdevice notifier chain */
+#define NETDEV_UP      0x0001  /* For now you can't veto a device up/down */
+#define NETDEV_DOWN    0x0002
+#define NETDEV_REBOOT  0x0003  /* Tell a protocol stack a network interface
+                                  detected a hardware crash and restarted
+                                  - we can use this eg to kick tcp sessions
+                                  once done */
+#define NETDEV_CHANGE  0x0004  /* Notify device state change */
+#define NETDEV_REGISTER 0x0005
+#define NETDEV_UNREGISTER      0x0006
+#define NETDEV_CHANGEMTU       0x0007
+#define NETDEV_CHANGEADDR      0x0008
+#define NETDEV_GOING_DOWN      0x0009
+#define NETDEV_CHANGENAME      0x000A
+#define NETDEV_FEAT_CHANGE     0x000B
+
+#define SYS_DOWN       0x0001  /* Notify of system down */
+#define SYS_RESTART    SYS_DOWN
+#define SYS_HALT       0x0002  /* Notify of system halt */
+#define SYS_POWER_OFF  0x0003  /* Notify of system power off */
+
+#define NETLINK_URELEASE       0x0001  /* Unicast netlink socket released */
+
+#define CPU_ONLINE             0x0002 /* CPU (unsigned)v is up */
+#define CPU_UP_PREPARE         0x0003 /* CPU (unsigned)v coming up */
+#define CPU_UP_CANCELED                0x0004 /* CPU (unsigned)v NOT coming up 
*/
+#define CPU_DOWN_PREPARE       0x0005 /* CPU (unsigned)v going down */
+#define CPU_DOWN_FAILED                0x0006 /* CPU (unsigned)v NOT going 
down */
+#define CPU_DEAD               0x0007 /* CPU (unsigned)v dead */
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_NOTIFIER_H */
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/linux/sal.c
--- a/xen/arch/ia64/linux/sal.c Fri Aug 26 13:06:49 2005
+++ /dev/null   Tue Aug 30 18:41:54 2005
@@ -1,302 +0,0 @@
-/*
- * System Abstraction Layer (SAL) interface routines.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- */
-#include <linux/config.h>
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-
-#include <asm/page.h>
-#include <asm/sal.h>
-#include <asm/pal.h>
-
- __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
-unsigned long sal_platform_features;
-
-unsigned short sal_revision;
-unsigned short sal_version;
-
-#define SAL_MAJOR(x) ((x) >> 8)
-#define SAL_MINOR(x) ((x) & 0xff)
-
-static struct {
-       void *addr;     /* function entry point */
-       void *gpval;    /* gp value to use */
-} pdesc;
-
-static long
-default_handler (void)
-{
-       return -1;
-}
-
-ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
-ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
-
-const char *
-ia64_sal_strerror (long status)
-{
-       const char *str;
-       switch (status) {
-             case 0: str = "Call completed without error"; break;
-             case 1: str = "Effect a warm boot of the system to complete "
-                             "the update"; break;
-             case -1: str = "Not implemented"; break;
-             case -2: str = "Invalid argument"; break;
-             case -3: str = "Call completed with error"; break;
-             case -4: str = "Virtual address not registered"; break;
-             case -5: str = "No information available"; break;
-             case -6: str = "Insufficient space to add the entry"; break;
-             case -7: str = "Invalid entry_addr value"; break;
-             case -8: str = "Invalid interrupt vector"; break;
-             case -9: str = "Requested memory not available"; break;
-             case -10: str = "Unable to write to the NVM device"; break;
-             case -11: str = "Invalid partition type specified"; break;
-             case -12: str = "Invalid NVM_Object id specified"; break;
-             case -13: str = "NVM_Object already has the maximum number "
-                               "of partitions"; break;
-             case -14: str = "Insufficient space in partition for the "
-                               "requested write sub-function"; break;
-             case -15: str = "Insufficient data buffer space for the "
-                               "requested read record sub-function"; break;
-             case -16: str = "Scratch buffer required for the write/delete "
-                               "sub-function"; break;
-             case -17: str = "Insufficient space in the NVM_Object for the "
-                               "requested create sub-function"; break;
-             case -18: str = "Invalid value specified in the partition_rec "
-                               "argument"; break;
-             case -19: str = "Record oriented I/O not supported for this "
-                               "partition"; break;
-             case -20: str = "Bad format of record to be written or "
-                               "required keyword variable not "
-                               "specified"; break;
-             default: str = "Unknown SAL status code"; break;
-       }
-       return str;
-}
-
-void __init
-ia64_sal_handler_init (void *entry_point, void *gpval)
-{
-       /* fill in the SAL procedure descriptor and point ia64_sal to it: */
-       pdesc.addr = entry_point;
-       pdesc.gpval = gpval;
-       ia64_sal = (ia64_sal_handler) &pdesc;
-}
-
-static void __init
-check_versions (struct ia64_sal_systab *systab)
-{
-       sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
-       sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
-
-       /* Check for broken firmware */
-       if ((sal_revision == SAL_VERSION_CODE(49, 29))
-           && (sal_version == SAL_VERSION_CODE(49, 29)))
-       {
-               /*
-                * Old firmware for zx2000 prototypes have this weird version 
number,
-                * reset it to something sane.
-                */
-               sal_revision = SAL_VERSION_CODE(2, 8);
-               sal_version = SAL_VERSION_CODE(0, 0);
-       }
-}
-
-static void __init
-sal_desc_entry_point (void *p)
-{
-       struct ia64_sal_desc_entry_point *ep = p;
-       ia64_pal_handler_init(__va(ep->pal_proc));
-       ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
-}
-
-#ifdef CONFIG_SMP
-static void __init
-set_smp_redirect (int flag)
-{
-#ifndef CONFIG_HOTPLUG_CPU
-       if (no_int_routing)
-               smp_int_redirect &= ~flag;
-       else
-               smp_int_redirect |= flag;
-#else
-       /*
-        * For CPU Hotplug we dont want to do any chipset supported
-        * interrupt redirection. The reason is this would require that
-        * All interrupts be stopped and hard bind the irq to a cpu.
-        * Later when the interrupt is fired we need to set the redir hint
-        * on again in the vector. This is combersome for something that the
-        * user mode irq balancer will solve anyways.
-        */
-       no_int_routing=1;
-       smp_int_redirect &= ~flag;
-#endif
-}
-#else
-#define set_smp_redirect(flag) do { } while (0)
-#endif
-
-static void __init
-sal_desc_platform_feature (void *p)
-{
-       struct ia64_sal_desc_platform_feature *pf = p;
-       sal_platform_features = pf->feature_mask;
-
-       printk(KERN_INFO "SAL Platform features:");
-       if (!sal_platform_features) {
-               printk(" None\n");
-               return;
-       }
-
-       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
-               printk(" BusLock");
-       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
-               printk(" IRQ_Redirection");
-               set_smp_redirect(SMP_IRQ_REDIRECTION);
-       }
-       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
-               printk(" IPI_Redirection");
-               set_smp_redirect(SMP_IPI_REDIRECTION);
-       }
-       if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
-               printk(" ITC_Drift");
-       printk("\n");
-}
-
-#ifdef CONFIG_SMP
-static void __init
-sal_desc_ap_wakeup (void *p)
-{
-       struct ia64_sal_desc_ap_wakeup *ap = p;
-
-       switch (ap->mechanism) {
-       case IA64_SAL_AP_EXTERNAL_INT:
-               ap_wakeup_vector = ap->vector;
-               printk(KERN_INFO "SAL: AP wakeup using external interrupt "
-                               "vector 0x%lx\n", ap_wakeup_vector);
-               break;
-       default:
-               printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
-               break;
-       }
-}
-
-static void __init
-chk_nointroute_opt(void)
-{
-       char *cp;
-       extern char saved_command_line[];
-
-       for (cp = saved_command_line; *cp; ) {
-               if (memcmp(cp, "nointroute", 10) == 0) {
-                       no_int_routing = 1;
-                       printk ("no_int_routing on\n");
-                       break;
-               } else {
-                       while (*cp != ' ' && *cp)
-                               ++cp;
-                       while (*cp == ' ')
-                               ++cp;
-               }
-       }
-}
-
-#else
-static void __init sal_desc_ap_wakeup(void *p) { }
-#endif
-
-void __init
-ia64_sal_init (struct ia64_sal_systab *systab)
-{
-       char *p;
-       int i;
-
-       if (!systab) {
-               printk(KERN_WARNING "Hmm, no SAL System Table.\n");
-               return;
-       }
-
-       if (strncmp(systab->signature, "SST_", 4) != 0)
-               printk(KERN_ERR "bad signature in system table!");
-
-       check_versions(systab);
-#ifdef CONFIG_SMP
-       chk_nointroute_opt();
-#endif
-
-       /* revisions are coded in BCD, so %x does the job for us */
-       printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
-                       SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
-                       systab->oem_id, systab->product_id,
-                       systab->product_id[0] ? " " : "",
-                       SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
-
-       p = (char *) (systab + 1);
-       for (i = 0; i < systab->entry_count; i++) {
-               /*
-                * The first byte of each entry type contains the type
-                * descriptor.
-                */
-               switch (*p) {
-               case SAL_DESC_ENTRY_POINT:
-                       sal_desc_entry_point(p);
-                       break;
-               case SAL_DESC_PLATFORM_FEATURE:
-                       sal_desc_platform_feature(p);
-                       break;
-               case SAL_DESC_PTC:
-                       ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
-                       break;
-               case SAL_DESC_AP_WAKEUP:
-                       sal_desc_ap_wakeup(p);
-                       break;
-               }
-               p += SAL_DESC_SIZE(*p);
-       }
-}
-
-int
-ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
-                u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
-{
-       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
-               return -1;
-       SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
-       return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall);
-
-int
-ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
-                       u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
-                       u64 arg7)
-{
-       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
-               return -1;
-       SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
-                       arg7);
-       return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
-
-int
-ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
-                          u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
-                          u64 arg6, u64 arg7)
-{
-       if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
-               return -1;
-       SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
-                          arg7);
-       return 0;
-}
-EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/smp.c
--- a/xen/arch/ia64/smp.c       Fri Aug 26 13:06:49 2005
+++ /dev/null   Tue Aug 30 18:41:54 2005
@@ -1,43 +0,0 @@
-/*
- *     Intel SMP support routines.
- *
- *     (c) 1995 Alan Cox, Building #3 <alan@xxxxxxxxxx>
- *     (c) 1998-99, 2000 Ingo Molnar <mingo@xxxxxxxxxx>
- *
- *     This code is released under the GNU General Public License version 2 or
- *     later.
- */
-
-//#include <xen/irq.h>
-#include <xen/sched.h>
-#include <xen/delay.h>
-#include <xen/spinlock.h>
-#include <asm/smp.h>
-//#include <asm/mc146818rtc.h>
-#include <asm/pgalloc.h>
-//#include <asm/smpboot.h>
-#include <asm/hardirq.h>
-
-
-//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void flush_tlb_mask(cpumask_t mask)
-{
-       dummy();
-}
-//#if CONFIG_SMP || IA64
-#if CONFIG_SMP
-//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void smp_send_event_check_mask(cpumask_t mask)
-{
-       dummy();
-       //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
-}
-
-
-//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-int try_flush_tlb_mask(cpumask_t mask)
-{
-       dummy();
-       return 1;
-}
-#endif
diff -r 2b95125015a5 -r f5c4042212b0 xen/arch/ia64/smpboot.c
--- a/xen/arch/ia64/smpboot.c   Fri Aug 26 13:06:49 2005
+++ /dev/null   Tue Aug 30 18:41:54 2005
@@ -1,2 +0,0 @@
-// expand later
-int ht_per_core = 1;
diff -r 2b95125015a5 -r f5c4042212b0 xen/include/asm-ia64/linux/asm/spinlock.h
--- a/xen/include/asm-ia64/linux/asm/spinlock.h Fri Aug 26 13:06:49 2005
+++ /dev/null   Tue Aug 30 18:41:54 2005
@@ -1,208 +0,0 @@
-#ifndef _ASM_IA64_SPINLOCK_H
-#define _ASM_IA64_SPINLOCK_H
-
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- *
- * This file is used for SMP configurations only.
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-
-#include <asm/atomic.h>
-#include <asm/bitops.h>
-#include <asm/intrinsics.h>
-#include <asm/system.h>
-
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED                     (spinlock_t) { 0 }
-#define spin_lock_init(x)                      ((x)->lock = 0)
-
-#ifdef ASM_SUPPORTED
-/*
- * Try to get the lock.  If we fail to get the lock, make a non-standard call 
to
- * ia64_spinlock_contention().  We do not use a normal call because that would 
force all
- * callers of spin_lock() to be non-leaf routines.  Instead, 
ia64_spinlock_contention() is
- * carefully coded to touch only those registers that spin_lock() marks 
"clobbered".
- */
-
-#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", 
"r29", "r30", "b6", "memory"
-
-static inline void
-_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-{
-       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
-
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-# ifdef CONFIG_ITANIUM
-       /* don't use brl on Itanium... */
-       asm volatile ("{\n\t"
-                     "  mov ar.ccv = r0\n\t"
-                     "  mov r28 = ip\n\t"
-                     "  mov r30 = 1;;\n\t"
-                     "}\n\t"
-                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
-                     "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "mov b6 = r29;;\n\t"
-                     "mov r27=%2\n\t"
-                     "(p14) br.cond.spnt.many b6"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# else
-       asm volatile ("{\n\t"
-                     "  mov ar.ccv = r0\n\t"
-                     "  mov r28 = ip\n\t"
-                     "  mov r30 = 1;;\n\t"
-                     "}\n\t"
-                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "mov r27=%2\n\t"
-                     "(p14) brl.cond.spnt.many 
ia64_spinlock_contention_pre3_4;;"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#else
-# ifdef CONFIG_ITANIUM
-       /* don't use brl on Itanium... */
-       /* mis-declare, so we get the entry-point, not it's function 
descriptor: */
-       asm volatile ("mov r30 = 1\n\t"
-                     "mov r27=%2\n\t"
-                     "mov ar.ccv = r0;;\n\t"
-                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
-                     "movl r29 = ia64_spinlock_contention;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "mov b6 = r29;;\n\t"
-                     "(p14) br.call.spnt.many b6 = b6"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# else
-       asm volatile ("mov r30 = 1\n\t"
-                     "mov r27=%2\n\t"
-                     "mov ar.ccv = r0;;\n\t"
-                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
-                     "cmp4.ne p14, p0 = r30, r0\n\t"
-                     "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
-                     : "=r"(ptr) : "r"(ptr), "r" (flags) : 
IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#endif
-}
-#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
-#else /* !ASM_SUPPORTED */
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-# define _raw_spin_lock(x)                                                     
        \
-do {                                                                           
        \
-       __u32 *ia64_spinlock_ptr = (__u32 *) (x);                               
        \
-       __u64 ia64_spinlock_val;                                                
        \
-       ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);         
        \
-       if (unlikely(ia64_spinlock_val)) {                                      
        \
-               do {                                                            
        \
-                       while (*ia64_spinlock_ptr)                              
        \
-                               ia64_barrier();                                 
        \
-                       ia64_spinlock_val = 
ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
-               } while (ia64_spinlock_val);                                    
        \
-       }                                                                       
        \
-} while (0)
-#endif /* !ASM_SUPPORTED */
-
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } 
while (0)
-#define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-
-typedef struct {
-       volatile unsigned int read_counter      : 31;
-       volatile unsigned int write_lock        :  1;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
-
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
-#define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
-
-#define _raw_read_lock(rw)                                                     
        \
-do {                                                                           
        \
-       rwlock_t *__read_lock_ptr = (rw);                                       
        \
-                                                                               
        \
-       while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {  
        \
-               ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                
        \
-               while (*(volatile int *)__read_lock_ptr < 0)                    
        \
-                       cpu_relax();                                            
        \
-       }                                                                       
        \
-} while (0)
-
-#define _raw_read_unlock(rw)                                   \
-do {                                                           \
-       rwlock_t *__read_lock_ptr = (rw);                       \
-       ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
-} while (0)
-
-#ifdef ASM_SUPPORTED
-#define _raw_write_lock(rw)                                                    
\
-do {                                                                           
\
-       __asm__ __volatile__ (                                                  
\
-               "mov ar.ccv = r0\n"                                             
\
-               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
-               "1:\n"                                                          
\
-               "ld4 r2 = [%0];;\n"                                             
\
-               "cmp4.eq p0,p7 = r0,r2\n"                                       
\
-               "(p7) br.cond.spnt.few 1b \n"                                   
\
-               "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"                       
\
-               "cmp4.eq p0,p7 = r0, r2\n"                                      
\
-               "(p7) br.cond.spnt.few 1b;;\n"                                  
\
-               :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            
\
-} while(0)
-
-#define _raw_write_trylock(rw)                                                 
\
-({                                                                             
\
-       register long result;                                                   
\
-                                                                               
\
-       __asm__ __volatile__ (                                                  
\
-               "mov ar.ccv = r0\n"                                             
\
-               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
-               "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                         
\
-               : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");          
\
-       (result == 0);                                                          
\
-})
-
-#else /* !ASM_SUPPORTED */
-
-#define _raw_write_lock(l)                                                     
        \
-({                                                                             
        \
-       __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);               
        \
-       __u32 *ia64_write_lock_ptr = (__u32 *) (l);                             
        \
-       do {                                                                    
        \
-               while (*ia64_write_lock_ptr)                                    
        \
-                       ia64_barrier();                                         
        \
-               ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 
0);     \
-       } while (ia64_val);                                                     
        \
-})
-
-#define _raw_write_trylock(rw)                                         \
-({                                                                     \
-       __u64 ia64_val;                                                 \
-       __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
-       ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);   \
-       (ia64_val == 0);                                                \
-})
-
-#endif /* !ASM_SUPPORTED */
-
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
-
-#define _raw_write_unlock(x)                                                   
        \
-({                                                                             
        \
-       smp_mb__before_clear_bit();     /* need barrier before releasing 
lock... */     \
-       clear_bit(31, (x));                                                     
        \
-})
-
-#endif /*  _ASM_IA64_SPINLOCK_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.