[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Move copy+patched files to linux-xen directory.



# HG changeset patch
# User adsharma@xxxxxxxxxxxxxxxxxxxxx
# Node ID f242de2e5a3c50cbb353fcc7c2171875451403da
# Parent  e2127f19861b842e572682619dd37651c2b5441e
Move copy+patched files to linux-xen directory.

Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>

diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/Makefile    Wed Aug  3 00:25:11 2005
@@ -1,6 +1,6 @@
 include $(BASEDIR)/Rules.mk
 
-VPATH = linux
+VPATH = linux linux-xen
 
 # libs-y       += arch/ia64/lib/lib.a
 
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/Rules.mk
--- a/xen/arch/ia64/Rules.mk    Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/Rules.mk    Wed Aug  3 00:25:11 2005
@@ -6,8 +6,11 @@
 CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux-
 endif
 AFLAGS  += -D__ASSEMBLY__
-CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64 \
-             -I$(BASEDIR)/include/asm-ia64/linux -I$(BASEDIR)/arch/ia64/linux
+CPPFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64        \
+             -I$(BASEDIR)/include/asm-ia64/linux                       \
+            -I$(BASEDIR)/include/asm-ia64/linux-xen                    \
+             -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
+
 CFLAGS  := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
 #CFLAGS  += -O3                # -O3 over-inlines making debugging tough!
 CFLAGS  += -O2         # but no optimization causes compile errors!
@@ -15,7 +18,9 @@
 CFLAGS  += -iwithprefix include -Wall
 CFLAGS  += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
 CFLAGS  += -I$(BASEDIR)/include/asm-ia64 -I$(BASEDIR)/include/asm-ia64/linux \
-           -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64
+           -I$(BASEDIR)/include/asm-ia64/linux                                 
\
+           -I$(BASEDIR)/include/asm-ia64/linux-xen                     \
+           -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
 CFLAGS  += -Wno-pointer-arith -Wredundant-decls
 CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/efi.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/efi.c     Wed Aug  3 00:25:11 2005
@@ -0,0 +1,866 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 0.9 April 30, 
1999
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999-2003 Hewlett-Packard Co.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * All EFI Runtime Services are not implemented yet as EFI only
+ * supports physical mode addressing on SoftSDV. This is to be fixed
+ * in a future version.  --drummond 1999-07-20
+ *
+ * Implemented EFI runtime services and virtual mode calls.  --davidm
+ *
+ * Goutham Rao: <goutham.rao@xxxxxxxxx>
+ *     Skip non-WB memory and ignore empty memory ranges.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/efi.h>
+
+#include <asm/io.h>
+#include <asm/kregs.h>
+#include <asm/meminit.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mca.h>
+
+#define EFI_DEBUG      0
+
+extern efi_status_t efi_call_phys (void *, ...);
+
+struct efi efi;
+EXPORT_SYMBOL(efi);
+static efi_runtime_services_t *runtime;
+static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
+
+#define efi_call_virt(f, args...)      (*(f))(args)
+
+#define STUB_GET_TIME(prefix, adjust_arg)                                      
                  \
+static efi_status_t                                                            
                  \
+prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)                         
                  \
+{                                                                              
                  \
+       struct ia64_fpreg fr[6];                                                
                  \
+       efi_time_cap_t *atc = NULL;                                             
                  \
+       efi_status_t ret;                                                       
                  \
+                                                                               
                  \
+       if (tc)                                                                 
                  \
+               atc = adjust_arg(tc);                                           
                  \
+       ia64_save_scratch_fpregs(fr);                                           
                  \
+       ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), 
adjust_arg(tm), atc); \
+       ia64_load_scratch_fpregs(fr);                                           
                  \
+       return ret;                                                             
                  \
+}
+
+#define STUB_SET_TIME(prefix, adjust_arg)                                      
                \
+static efi_status_t                                                            
                \
+prefix##_set_time (efi_time_t *tm)                                             
                \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), 
adjust_arg(tm));    \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)                               
                \
+static efi_status_t                                                            
                \
+prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t 
*tm)            \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_get_wakeup_time_t *) 
__va(runtime->get_wakeup_time),       \
+                               adjust_arg(enabled), adjust_arg(pending), 
adjust_arg(tm));      \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)                               
                \
+static efi_status_t                                                            
                \
+prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)                  
                \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_time_t *atm = NULL;                                                 
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       if (tm)                                                                 
                \
+               atm = adjust_arg(tm);                                           
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_set_wakeup_time_t *) 
__va(runtime->set_wakeup_time),       \
+                               enabled, atm);                                  
                \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_GET_VARIABLE(prefix, adjust_arg)                                  
        \
+static efi_status_t                                                            
        \
+prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,      
        \
+                      unsigned long *data_size, void *data)                    
        \
+{                                                                              
        \
+       struct ia64_fpreg fr[6];                                                
        \
+       u32 *aattr = NULL;                                                      
                \
+       efi_status_t ret;                                                       
        \
+                                                                               
        \
+       if (attr)                                                               
        \
+               aattr = adjust_arg(attr);                                       
        \
+       ia64_save_scratch_fpregs(fr);                                           
        \
+       ret = efi_call_##prefix((efi_get_variable_t *) 
__va(runtime->get_variable),     \
+                               adjust_arg(name), adjust_arg(vendor), aattr,    
        \
+                               adjust_arg(data_size), adjust_arg(data));       
        \
+       ia64_load_scratch_fpregs(fr);                                           
        \
+       return ret;                                                             
        \
+}
+
+#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)                             
                \
+static efi_status_t                                                            
                \
+prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, 
efi_guid_t *vendor)  \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_get_next_variable_t *) 
__va(runtime->get_next_variable),   \
+                               adjust_arg(name_size), adjust_arg(name), 
adjust_arg(vendor));   \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_SET_VARIABLE(prefix, adjust_arg)                                  
        \
+static efi_status_t                                                            
        \
+prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long 
attr,     \
+                      unsigned long data_size, void *data)                     
        \
+{                                                                              
        \
+       struct ia64_fpreg fr[6];                                                
        \
+       efi_status_t ret;                                                       
        \
+                                                                               
        \
+       ia64_save_scratch_fpregs(fr);                                           
        \
+       ret = efi_call_##prefix((efi_set_variable_t *) 
__va(runtime->set_variable),     \
+                               adjust_arg(name), adjust_arg(vendor), attr, 
data_size,  \
+                               adjust_arg(data));                              
        \
+       ia64_load_scratch_fpregs(fr);                                           
        \
+       return ret;                                                             
        \
+}
+
+#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)                      
                \
+static efi_status_t                                                            
                \
+prefix##_get_next_high_mono_count (u32 *count)                                 
                \
+{                                                                              
                \
+       struct ia64_fpreg fr[6];                                                
                \
+       efi_status_t ret;                                                       
                \
+                                                                               
                \
+       ia64_save_scratch_fpregs(fr);                                           
                \
+       ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)              
                \
+                               __va(runtime->get_next_high_mono_count), 
adjust_arg(count));    \
+       ia64_load_scratch_fpregs(fr);                                           
                \
+       return ret;                                                             
                \
+}
+
+#define STUB_RESET_SYSTEM(prefix, adjust_arg)                                  
\
+static void                                                                    
\
+prefix##_reset_system (int reset_type, efi_status_t status,                    
\
+                      unsigned long data_size, efi_char16_t *data)             
\
+{                                                                              
\
+       struct ia64_fpreg fr[6];                                                
\
+       efi_char16_t *adata = NULL;                                             
\
+                                                                               
\
+       if (data)                                                               
\
+               adata = adjust_arg(data);                                       
\
+                                                                               
\
+       ia64_save_scratch_fpregs(fr);                                           
\
+       efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),   
\
+                         reset_type, status, data_size, adata);                
\
+       /* should not return, but just in case... */                            
\
+       ia64_load_scratch_fpregs(fr);                                           
\
+}
+
+#define phys_ptr(arg)  ((__typeof__(arg)) ia64_tpa(arg))
+
+STUB_GET_TIME(phys, phys_ptr)
+STUB_SET_TIME(phys, phys_ptr)
+STUB_GET_WAKEUP_TIME(phys, phys_ptr)
+STUB_SET_WAKEUP_TIME(phys, phys_ptr)
+STUB_GET_VARIABLE(phys, phys_ptr)
+STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
+STUB_SET_VARIABLE(phys, phys_ptr)
+STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
+STUB_RESET_SYSTEM(phys, phys_ptr)
+
+#define id(arg)        arg
+
+STUB_GET_TIME(virt, id)
+STUB_SET_TIME(virt, id)
+STUB_GET_WAKEUP_TIME(virt, id)
+STUB_SET_WAKEUP_TIME(virt, id)
+STUB_GET_VARIABLE(virt, id)
+STUB_GET_NEXT_VARIABLE(virt, id)
+STUB_SET_VARIABLE(virt, id)
+STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
+STUB_RESET_SYSTEM(virt, id)
+
+void
+efi_gettimeofday (struct timespec *ts)
+{
+       efi_time_t tm;
+
+       memset(ts, 0, sizeof(ts));
+       if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
+               return;
+
+       ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, 
tm.second);
+       ts->tv_nsec = tm.nanosecond;
+}
+
+static int
+is_available_memory (efi_memory_desc_t *md)
+{
+       if (!(md->attribute & EFI_MEMORY_WB))
+               return 0;
+
+       switch (md->type) {
+             case EFI_LOADER_CODE:
+             case EFI_LOADER_DATA:
+             case EFI_BOOT_SERVICES_CODE:
+             case EFI_BOOT_SERVICES_DATA:
+             case EFI_CONVENTIONAL_MEMORY:
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * Trim descriptor MD so its starts at address START_ADDR.  If the descriptor 
covers
+ * memory that is normally available to the kernel, issue a warning that some 
memory
+ * is being ignored.
+ */
+static void
+trim_bottom (efi_memory_desc_t *md, u64 start_addr)
+{
+       u64 num_skipped_pages;
+
+       if (md->phys_addr >= start_addr || !md->num_pages)
+               return;
+
+       num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
+       if (num_skipped_pages > md->num_pages)
+               num_skipped_pages = md->num_pages;
+
+       if (is_available_memory(md))
+               printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx 
due to granule hole "
+                      "at 0x%lx\n", __FUNCTION__,
+                      (num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
+                      md->phys_addr, start_addr - IA64_GRANULE_SIZE);
+       /*
+        * NOTE: Don't set md->phys_addr to START_ADDR because that could cause 
the memory
+        * descriptor list to become unsorted.  In such a case, md->num_pages 
will be
+        * zero, so the Right Thing will happen.
+        */
+       md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
+       md->num_pages -= num_skipped_pages;
+}
+
+static void
+trim_top (efi_memory_desc_t *md, u64 end_addr)
+{
+       u64 num_dropped_pages, md_end_addr;
+
+       md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+       if (md_end_addr <= end_addr || !md->num_pages)
+               return;
+
+       num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
+       if (num_dropped_pages > md->num_pages)
+               num_dropped_pages = md->num_pages;
+
+       if (is_available_memory(md))
+               printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx 
due to granule hole "
+                      "at 0x%lx\n", __FUNCTION__,
+                      (num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
+                      md->phys_addr, end_addr);
+       md->num_pages -= num_dropped_pages;
+}
+
+/*
+ * Walks the EFI memory map and calls CALLBACK once for each EFI memory 
descriptor that
+ * has memory that is available for OS use.
+ */
+void
+efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
+{
+       int prev_valid = 0;
+       struct range {
+               u64 start;
+               u64 end;
+       } prev, curr;
+       void *efi_map_start, *efi_map_end, *p, *q;
+       efi_memory_desc_t *md, *check_md;
+       u64 efi_desc_size, start, end, granule_addr, last_granule_addr, 
first_non_wb_addr = 0;
+       unsigned long total_mem = 0;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               /* skip over non-WB memory descriptors; that's all we're 
interested in... */
+               if (!(md->attribute & EFI_MEMORY_WB))
+                       continue;
+
+#ifdef XEN
+// this works around a problem in the ski bootloader
+{
+               extern long running_on_sim;
+               if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
+                       continue;
+}
+// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
+               if (md->phys_addr >= 0x100000000) continue;
+#endif
+               /*
+                * granule_addr is the base of md's first granule.
+                * [granule_addr - first_non_wb_addr) is guaranteed to
+                * be contiguous WB memory.
+                */
+               granule_addr = GRANULEROUNDDOWN(md->phys_addr);
+               first_non_wb_addr = max(first_non_wb_addr, granule_addr);
+
+               if (first_non_wb_addr < md->phys_addr) {
+                       trim_bottom(md, granule_addr + IA64_GRANULE_SIZE);
+                       granule_addr = GRANULEROUNDDOWN(md->phys_addr);
+                       first_non_wb_addr = max(first_non_wb_addr, 
granule_addr);
+               }
+
+               for (q = p; q < efi_map_end; q += efi_desc_size) {
+                       check_md = q;
+
+                       if ((check_md->attribute & EFI_MEMORY_WB) &&
+                           (check_md->phys_addr == first_non_wb_addr))
+                               first_non_wb_addr += check_md->num_pages << 
EFI_PAGE_SHIFT;
+                       else
+                               break;          /* non-WB or hole */
+               }
+
+               last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr);
+               if (last_granule_addr < md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT))
+                       trim_top(md, last_granule_addr);
+
+               if (is_available_memory(md)) {
+                       if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) 
>= max_addr) {
+                               if (md->phys_addr >= max_addr)
+                                       continue;
+                               md->num_pages = (max_addr - md->phys_addr) >> 
EFI_PAGE_SHIFT;
+                               first_non_wb_addr = max_addr;
+                       }
+
+                       if (total_mem >= mem_limit)
+                               continue;
+
+                       if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > 
mem_limit) {
+                               unsigned long limit_addr = md->phys_addr;
+
+                               limit_addr += mem_limit - total_mem;
+                               limit_addr = GRANULEROUNDDOWN(limit_addr);
+
+                               if (md->phys_addr > limit_addr)
+                                       continue;
+
+                               md->num_pages = (limit_addr - md->phys_addr) >>
+                                               EFI_PAGE_SHIFT;
+                               first_non_wb_addr = max_addr = md->phys_addr +
+                                             (md->num_pages << EFI_PAGE_SHIFT);
+                       }
+                       total_mem += (md->num_pages << EFI_PAGE_SHIFT);
+
+                       if (md->num_pages == 0)
+                               continue;
+
+                       curr.start = PAGE_OFFSET + md->phys_addr;
+                       curr.end   = curr.start + (md->num_pages << 
EFI_PAGE_SHIFT);
+
+                       if (!prev_valid) {
+                               prev = curr;
+                               prev_valid = 1;
+                       } else {
+                               if (curr.start < prev.start)
+                                       printk(KERN_ERR "Oops: EFI memory table 
not ordered!\n");
+
+                               if (prev.end == curr.start) {
+                                       /* merge two consecutive memory ranges 
*/
+                                       prev.end = curr.end;
+                               } else {
+                                       start = PAGE_ALIGN(prev.start);
+                                       end = prev.end & PAGE_MASK;
+                                       if ((end > start) && (*callback)(start, 
end, arg) < 0)
+                                               return;
+                                       prev = curr;
+                               }
+                       }
+               }
+       }
+       if (prev_valid) {
+               start = PAGE_ALIGN(prev.start);
+               end = prev.end & PAGE_MASK;
+               if (end > start)
+                       (*callback)(start, end, arg);
+       }
+}
+
+/*
+ * Look for the PAL_CODE region reported by EFI and maps it using an
+ * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
+ * Abstraction Layer chapter 11 in ADAG
+ */
+
+void *
+efi_get_pal_addr (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+       int pal_code_count = 0;
+       u64 vaddr, mask;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->type != EFI_PAL_CODE)
+                       continue;
+
+               if (++pal_code_count > 1) {
+                       printk(KERN_ERR "Too many EFI Pal Code memory ranges, 
dropped @ %lx\n",
+                              md->phys_addr);
+                       continue;
+               }
+               /*
+                * The only ITLB entry in region 7 that is used is the one 
installed by
+                * __start().  That entry covers a 64MB range.
+                */
+               mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
+               vaddr = PAGE_OFFSET + md->phys_addr;
+
+               /*
+                * We must check that the PAL mapping won't overlap with the 
kernel
+                * mapping.
+                *
+                * PAL code is guaranteed to be aligned on a power of 2 between 
4k and
+                * 256KB and that only one ITR is needed to map it. This 
implies that the
+                * PAL code is always aligned on its size, i.e., the closest 
matching page
+                * size supported by the TLB. Therefore PAL code is guaranteed 
never to
+                * cross a 64MB unless it is bigger than 64MB (very unlikely!). 
 So for
+                * now the following test is enough to determine whether or not 
we need a
+                * dedicated ITR for the PAL code.
+                */
+               if ((vaddr & mask) == (KERNEL_START & mask)) {
+                       printk(KERN_INFO "%s: no need to install ITR for PAL 
code\n",
+                              __FUNCTION__);
+                       continue;
+               }
+
+               if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
+                       panic("Woah!  PAL code size bigger than a granule!");
+
+#if EFI_DEBUG
+               mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
+               printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into 
[0x%lx-0x%lx)\n",
+                       smp_processor_id(), md->phys_addr,
+                       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+#endif
+               return __va(md->phys_addr);
+       }
+       printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
+              __FUNCTION__);
+       return NULL;
+}
+
+void
+efi_map_pal_code (void)
+{
+       void *pal_vaddr = efi_get_pal_addr ();
+       u64 psr;
+
+       if (!pal_vaddr)
+               return;
+
+       /*
+        * Cannot write to CRx with PSR.ic=1
+        */
+       psr = ia64_clear_ic();
+       ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) 
pal_vaddr),
+                pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
+                IA64_GRANULE_SHIFT);
+       ia64_set_psr(psr);              /* restore psr */
+       ia64_srlz_i();
+}
+
+void __init
+efi_init (void)
+{
+       void *efi_map_start, *efi_map_end;
+       efi_config_table_t *config_tables;
+       efi_char16_t *c16;
+       u64 efi_desc_size;
+       char *cp, *end, vendor[100] = "unknown";
+       extern char saved_command_line[];
+       int i;
+
+       /* it's too early to be able to use the standard kernel command line 
support... */
+       for (cp = saved_command_line; *cp; ) {
+               if (memcmp(cp, "mem=", 4) == 0) {
+                       cp += 4;
+                       mem_limit = memparse(cp, &end);
+                       if (end != cp)
+                               break;
+                       cp = end;
+               } else if (memcmp(cp, "max_addr=", 9) == 0) {
+                       cp += 9;
+                       max_addr = GRANULEROUNDDOWN(memparse(cp, &end));
+                       if (end != cp)
+                               break;
+                       cp = end;
+               } else {
+                       while (*cp != ' ' && *cp)
+                               ++cp;
+                       while (*cp == ' ')
+                               ++cp;
+               }
+       }
+       if (max_addr != ~0UL)
+               printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 
20);
+
+       efi.systab = __va(ia64_boot_param->efi_systab);
+
+       /*
+        * Verify the EFI Table
+        */
+       if (efi.systab == NULL)
+               panic("Woah! Can't find EFI system table.\n");
+       if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+               panic("Woah! EFI system table signature incorrect\n");
+       if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0)
+               printk(KERN_WARNING "Warning: EFI system table major version 
mismatch: "
+                      "got %d.%02d, expected %d.%02d\n",
+                      efi.systab->hdr.revision >> 16, efi.systab->hdr.revision 
& 0xffff,
+                      EFI_SYSTEM_TABLE_REVISION >> 16, 
EFI_SYSTEM_TABLE_REVISION & 0xffff);
+
+       config_tables = __va(efi.systab->tables);
+
+       /* Show what we know for posterity */
+       c16 = __va(efi.systab->fw_vendor);
+       if (c16) {
+               for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
+                       vendor[i] = *c16++;
+               vendor[i] = '\0';
+       }
+
+       printk(KERN_INFO "EFI v%u.%.02u by %s:",
+              efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 
0xffff, vendor);
+
+       for (i = 0; i < (int) efi.systab->nr_tables; i++) {
+               if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
+                       efi.mps = __va(config_tables[i].table);
+                       printk(" MPS=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, 
ACPI_20_TABLE_GUID) == 0) {
+                       efi.acpi20 = __va(config_tables[i].table);
+                       printk(" ACPI 2.0=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) 
== 0) {
+                       efi.acpi = __va(config_tables[i].table);
+                       printk(" ACPI=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, 
SMBIOS_TABLE_GUID) == 0) {
+                       efi.smbios = __va(config_tables[i].table);
+                       printk(" SMBIOS=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, 
SAL_SYSTEM_TABLE_GUID) == 0) {
+                       efi.sal_systab = __va(config_tables[i].table);
+                       printk(" SALsystab=0x%lx", config_tables[i].table);
+               } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) 
== 0) {
+                       efi.hcdp = __va(config_tables[i].table);
+                       printk(" HCDP=0x%lx", config_tables[i].table);
+               }
+       }
+       printk("\n");
+
+       runtime = __va(efi.systab->runtime);
+       efi.get_time = phys_get_time;
+       efi.set_time = phys_set_time;
+       efi.get_wakeup_time = phys_get_wakeup_time;
+       efi.set_wakeup_time = phys_set_wakeup_time;
+       efi.get_variable = phys_get_variable;
+       efi.get_next_variable = phys_get_next_variable;
+       efi.set_variable = phys_set_variable;
+       efi.get_next_high_mono_count = phys_get_next_high_mono_count;
+       efi.reset_system = phys_reset_system;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+#if EFI_DEBUG
+       /* print EFI memory map: */
+       {
+               efi_memory_desc_t *md;
+               void *p;
+
+               for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += 
efi_desc_size) {
+                       md = p;
+                       printk("mem%02u: type=%u, attr=0x%lx, 
range=[0x%016lx-0x%016lx) (%luMB)\n",
+                              i, md->type, md->attribute, md->phys_addr,
+                              md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT),
+                              md->num_pages >> (20 - EFI_PAGE_SHIFT));
+               }
+       }
+#endif
+
+       efi_map_pal_code();
+       efi_enter_virtual_mode();
+}
+
+void
+efi_enter_virtual_mode (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       efi_status_t status;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->attribute & EFI_MEMORY_RUNTIME) {
+                       /*
+                        * Some descriptors have multiple bits set, so the 
order of
+                        * the tests is relevant.
+                        */
+                       if (md->attribute & EFI_MEMORY_WB) {
+                               md->virt_addr = (u64) __va(md->phys_addr);
+                       } else if (md->attribute & EFI_MEMORY_UC) {
+                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
+                       } else if (md->attribute & EFI_MEMORY_WC) {
+#if 0
+                               md->virt_addr = ia64_remap(md->phys_addr, 
(_PAGE_A | _PAGE_P
+                                                                          | 
_PAGE_D
+                                                                          | 
_PAGE_MA_WC
+                                                                          | 
_PAGE_PL_0
+                                                                          | 
_PAGE_AR_RW));
+#else
+                               printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
+                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
+#endif
+                       } else if (md->attribute & EFI_MEMORY_WT) {
+#if 0
+                               md->virt_addr = ia64_remap(md->phys_addr, 
(_PAGE_A | _PAGE_P
+                                                                          | 
_PAGE_D | _PAGE_MA_WT
+                                                                          | 
_PAGE_PL_0
+                                                                          | 
_PAGE_AR_RW));
+#else
+                               printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
+                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
+#endif
+                       }
+               }
+       }
+
+       status = efi_call_phys(__va(runtime->set_virtual_address_map),
+                              ia64_boot_param->efi_memmap_size,
+                              efi_desc_size, 
ia64_boot_param->efi_memdesc_version,
+                              ia64_boot_param->efi_memmap);
+       if (status != EFI_SUCCESS) {
+               printk(KERN_WARNING "warning: unable to switch EFI into virtual 
mode "
+                      "(status=%lu)\n", status);
+               return;
+       }
+
+       /*
+        * Now that EFI is in virtual mode, we call the EFI functions more 
efficiently:
+        */
+       efi.get_time = virt_get_time;
+       efi.set_time = virt_set_time;
+       efi.get_wakeup_time = virt_get_wakeup_time;
+       efi.set_wakeup_time = virt_set_wakeup_time;
+       efi.get_variable = virt_get_variable;
+       efi.get_next_variable = virt_get_next_variable;
+       efi.set_variable = virt_set_variable;
+       efi.get_next_high_mono_count = virt_get_next_high_mono_count;
+       efi.reset_system = virt_reset_system;
+}
+
+/*
+ * Walk the EFI memory map looking for the I/O port range.  There can only be 
one entry of
+ * this type, other I/O port ranges should be described via ACPI.
+ */
+u64
+efi_get_iobase (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
+                       if (md->attribute & EFI_MEMORY_UC)
+                               return md->phys_addr;
+               }
+       }
+       return 0;
+}
+
+#ifdef XEN
+// variation of efi_get_iobase which returns entire memory descriptor
+efi_memory_desc_t *
+efi_get_io_md (void)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
+                       if (md->attribute & EFI_MEMORY_UC)
+                               return md;
+               }
+       }
+       return 0;
+}
+#endif
+
+u32
+efi_mem_type (unsigned long phys_addr)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT))
+                        return md->type;
+       }
+       return 0;
+}
+
+u64
+efi_mem_attributes (unsigned long phys_addr)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT))
+                       return md->attribute;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(efi_mem_attributes);
+
+int
+valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+
+               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT)) {
+                       if (!(md->attribute & EFI_MEMORY_WB))
+                               return 0;
+
+                       if (*size > md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT) - phys_addr)
+                               *size = md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT) - phys_addr;
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+int __init
+efi_uart_console_only(void)
+{
+       efi_status_t status;
+       char *s, name[] = "ConOut";
+       efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
+       efi_char16_t *utf16, name_utf16[32];
+       unsigned char data[1024];
+       unsigned long size = sizeof(data);
+       struct efi_generic_dev_path *hdr, *end_addr;
+       int uart = 0;
+
+       /* Convert to UTF-16 */
+       utf16 = name_utf16;
+       s = name;
+       while (*s)
+               *utf16++ = *s++ & 0x7f;
+       *utf16 = 0;
+
+       status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
+       if (status != EFI_SUCCESS) {
+               printk(KERN_ERR "No EFI %s variable?\n", name);
+               return 0;
+       }
+
+       hdr = (struct efi_generic_dev_path *) data;
+       end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
+       while (hdr < end_addr) {
+               if (hdr->type == EFI_DEV_MSG &&
+                   hdr->sub_type == EFI_DEV_MSG_UART)
+                       uart = 1;
+               else if (hdr->type == EFI_DEV_END_PATH ||
+                         hdr->type == EFI_DEV_END_PATH2) {
+                       if (!uart)
+                               return 0;
+                       if (hdr->sub_type == EFI_DEV_END_ENTIRE)
+                               return 1;
+                       uart = 0;
+               }
+               hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + 
hdr->length);
+       }
+       printk(KERN_ERR "Malformed %s value\n", name);
+       return 0;
+}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/entry.S
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/entry.S   Wed Aug  3 00:25:11 2005
@@ -0,0 +1,1653 @@
+/*
+ * ia64/kernel/entry.S
+ *
+ * Kernel entry points.
+ *
+ * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999, 2002-2003
+ *     Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
+ *     Don Dugger <Don.Dugger@xxxxxxxxx>
+ *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ *     Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ */
+/*
+ * ia64_switch_to now places correct virtual mapping in in TR2 for
+ * kernel stack. This allows us to handle interrupts without changing
+ * to physical mode.
+ *
+ * Jonathan Nicklin    <nicklin@xxxxxxxxxxxxxxxxxxxxxxxx>
+ * Patrick O'Rourke    <orourke@xxxxxxxxxxxxxxxxxxxxxxxx>
+ * 11/07/2000
+ */
+/*
+ * Global (preserved) predicate usage on syscall entry/exit path:
+ *
+ *     pKStk:          See entry.h.
+ *     pUStk:          See entry.h.
+ *     pSys:           See entry.h.
+ *     pNonSys:        !pSys
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/kregs.h>
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
+#include <asm/percpu.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+#include "minstate.h"
+
+#ifndef XEN
+       /*
+        * execve() is special because in case of success, we need to
+        * setup a null register window frame.
+        */
+ENTRY(ia64_execve)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,4,0
+       mov loc0=rp
+       .body
+       mov out0=in0                    // filename
+       ;;                              // stop bit between alloc and call
+       mov out1=in1                    // argv
+       mov out2=in2                    // envp
+       add out3=16,sp                  // regs
+       br.call.sptk.many rp=sys_execve
+.ret0:
+#ifdef CONFIG_IA32_SUPPORT
+       /*
+        * Check if we're returning to ia32 mode. If so, we need to restore 
ia32 registers
+        * from pt_regs.
+        */
+       adds r16=PT(CR_IPSR)+16,sp
+       ;;
+       ld8 r16=[r16]
+#endif
+       cmp4.ge p6,p7=r8,r0
+       mov ar.pfs=loc1                 // restore ar.pfs
+       sxt4 r8=r8                      // return 64-bit result
+       ;;
+       stf.spill [sp]=f0
+(p6)   cmp.ne pKStk,pUStk=r0,r0        // a successful execve() lands us in 
user-mode...
+       mov rp=loc0
+(p6)   mov ar.pfs=r0                   // clear ar.pfs on success
+(p7)   br.ret.sptk.many rp
+
+       /*
+        * In theory, we'd have to zap this state only to prevent leaking of
+        * security sensitive state (e.g., if current->mm->dumpable is zero).  
However,
+        * this executes in less than 20 cycles even on Itanium, so it's not 
worth
+        * optimizing for...).
+        */
+       mov ar.unat=0;          mov ar.lc=0
+       mov r4=0;               mov f2=f0;              mov b1=r0
+       mov r5=0;               mov f3=f0;              mov b2=r0
+       mov r6=0;               mov f4=f0;              mov b3=r0
+       mov r7=0;               mov f5=f0;              mov b4=r0
+       ldf.fill f12=[sp];      mov f13=f0;             mov b5=r0
+       ldf.fill f14=[sp];      ldf.fill f15=[sp];      mov f16=f0
+       ldf.fill f17=[sp];      ldf.fill f18=[sp];      mov f19=f0
+       ldf.fill f20=[sp];      ldf.fill f21=[sp];      mov f22=f0
+       ldf.fill f23=[sp];      ldf.fill f24=[sp];      mov f25=f0
+       ldf.fill f26=[sp];      ldf.fill f27=[sp];      mov f28=f0
+       ldf.fill f29=[sp];      ldf.fill f30=[sp];      mov f31=f0
+#ifdef CONFIG_IA32_SUPPORT
+       tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
+       movl loc0=ia64_ret_from_ia32_execve
+       ;;
+(p6)   mov rp=loc0
+#endif
+       br.ret.sptk.many rp
+END(ia64_execve)
+
+/*
+ * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, 
u64 child_tidptr,
+ *           u64 tls)
+ */
+GLOBAL_ENTRY(sys_clone2)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc r16=ar.pfs,8,2,6,0
+       DO_SAVE_SWITCH_STACK
+       adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
+       mov loc0=rp
+       mov loc1=r16                            // save ar.pfs across do_fork
+       .body
+       mov out1=in1
+       mov out3=in2
+       tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
+       mov out4=in3    // parent_tidptr: valid only w/CLONE_PARENT_SETTID
+       ;;
+(p6)   st8 [r2]=in5                            // store TLS in r16 for 
copy_thread()
+       mov out5=in4    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or 
CLONE_CHILD_CLEARTID
+       adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
+       mov out0=in0                            // out0 = clone_flags
+       br.call.sptk.many rp=do_fork
+.ret1: .restore sp
+       adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
+       mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(sys_clone2)
+
+/*
+ * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, 
u64 tls)
+ *     Deprecated.  Use sys_clone2() instead.
+ */
+GLOBAL_ENTRY(sys_clone)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc r16=ar.pfs,8,2,6,0
+       DO_SAVE_SWITCH_STACK
+       adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
+       mov loc0=rp
+       mov loc1=r16                            // save ar.pfs across do_fork
+       .body
+       mov out1=in1
+       mov out3=16                             // stacksize (compensates for 
16-byte scratch area)
+       tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
+       mov out4=in2    // parent_tidptr: valid only w/CLONE_PARENT_SETTID
+       ;;
+(p6)   st8 [r2]=in4                            // store TLS in r13 (tp)
+       mov out5=in3    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or 
CLONE_CHILD_CLEARTID
+       adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
+       mov out0=in0                            // out0 = clone_flags
+       br.call.sptk.many rp=do_fork
+.ret2: .restore sp
+       adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
+       mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(sys_clone)
+#endif /* !XEN */
+
+/*
+ * prev_task <- ia64_switch_to(struct task_struct *next)
+ *     With Ingo's new scheduler, interrupts are disabled when this routine 
gets
+ *     called.  The code starting at .map relies on this.  The rest of the code
+ *     doesn't care about the interrupt masking status.
+ */
+GLOBAL_ENTRY(ia64_switch_to)
+       .prologue
+       alloc r16=ar.pfs,1,0,0,0
+       DO_SAVE_SWITCH_STACK
+       .body
+
+       adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
+       movl r25=init_task
+       mov r27=IA64_KR(CURRENT_STACK)
+       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
+#ifdef XEN
+       dep r20=0,in0,60,4              // physical address of "next"
+#else
+       dep r20=0,in0,61,3              // physical address of "next"
+#endif
+       ;;
+       st8 [r22]=sp                    // save kernel stack pointer of old task
+       shr.u r26=r20,IA64_GRANULE_SHIFT
+       cmp.eq p7,p6=r25,in0
+       ;;
+       /*
+        * If we've already mapped this task's page, we can skip doing it again.
+        */
+(p6)   cmp.eq p7,p6=r26,r27
+(p6)   br.cond.dpnt .map
+       ;;
+.done:
+(p6)   ssm psr.ic                      // if we had to map, reenable the 
psr.ic bit FIRST!!!
+       ;;
+(p6)   srlz.d
+       ld8 sp=[r21]                    // load kernel stack pointer of new task
+       mov IA64_KR(CURRENT)=in0        // update "current" application register
+       mov r8=r13                      // return pointer to previously running 
task
+       mov r13=in0                     // set "current" pointer
+       ;;
+       DO_LOAD_SWITCH_STACK
+
+#ifdef CONFIG_SMP
+       sync.i                          // ensure "fc"s done by this CPU are 
visible on other CPUs
+#endif
+       br.ret.sptk.many rp             // boogie on out in new context
+
+.map:
+#ifdef XEN
+       // avoid overlapping with kernel TR
+       movl r25=KERNEL_START
+       dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
+       ;;
+       cmp.eq p7,p0=r25,r23
+       ;;
+(p7)   mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
+(p7)   br.cond.sptk .done
+#endif
+       rsm psr.ic                      // interrupts (psr.i) are already 
disabled here
+       movl r25=PAGE_KERNEL
+       ;;
+       srlz.d
+       or r23=r25,r20                  // construct PA | page properties
+       mov r25=IA64_GRANULE_SHIFT<<2
+       ;;
+       mov cr.itir=r25
+       mov cr.ifa=in0                  // VA of next task...
+       ;;
+       mov r25=IA64_TR_CURRENT_STACK
+       mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
+       ;;
+       itr.d dtr[r25]=r23              // wire in new mapping...
+       br.cond.sptk .done
+END(ia64_switch_to)
+
+/*
+ * Note that interrupts are enabled during save_switch_stack and 
load_switch_stack.  This
+ * means that we may get an interrupt with "sp" pointing to the new kernel 
stack while
+ * ar.bspstore is still pointing to the old kernel backing store area.  Since 
ar.rsc,
+ * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is 
not a
+ * problem.  Also, we don't need to specify unwind information for preserved 
registers
+ * that are not modified in save_switch_stack as the right unwind information 
is already
+ * specified at the call-site of save_switch_stack.
+ */
+
+/*
+ * save_switch_stack:
+ *     - r16 holds ar.pfs
+ *     - b7 holds address to return to
+ *     - rp (b0) holds return address to save
+ */
+GLOBAL_ENTRY(save_switch_stack)
+       .prologue
+       .altrp b7
+       flushrs                 // flush dirty regs to backing store (must be 
first in insn group)
+       .save @priunat,r17
+       mov r17=ar.unat         // preserve caller's
+       .body
+#ifdef CONFIG_ITANIUM
+       adds r2=16+128,sp
+       adds r3=16+64,sp
+       adds r14=SW(R4)+16,sp
+       ;;
+       st8.spill [r14]=r4,16           // spill r4
+       lfetch.fault.excl.nt1 [r3],128
+       ;;
+       lfetch.fault.excl.nt1 [r2],128
+       lfetch.fault.excl.nt1 [r3],128
+       ;;
+       lfetch.fault.excl [r2]
+       lfetch.fault.excl [r3]
+       adds r15=SW(R5)+16,sp
+#else
+       add r2=16+3*128,sp
+       add r3=16,sp
+       add r14=SW(R4)+16,sp
+       ;;
+       st8.spill [r14]=r4,SW(R6)-SW(R4)        // spill r4 and prefetch offset 
0x1c0
+       lfetch.fault.excl.nt1 [r3],128  //              prefetch offset 0x010
+       ;;
+       lfetch.fault.excl.nt1 [r3],128  //              prefetch offset 0x090
+       lfetch.fault.excl.nt1 [r2],128  //              prefetch offset 0x190
+       ;;
+       lfetch.fault.excl.nt1 [r3]      //              prefetch offset 0x110
+       lfetch.fault.excl.nt1 [r2]      //              prefetch offset 0x210
+       adds r15=SW(R5)+16,sp
+#endif
+       ;;
+       st8.spill [r15]=r5,SW(R7)-SW(R5)        // spill r5
+       mov.m ar.rsc=0                  // put RSE in mode: enforced lazy, 
little endian, pl 0
+       add r2=SW(F2)+16,sp             // r2 = &sw->f2
+       ;;
+       st8.spill [r14]=r6,SW(B0)-SW(R6)        // spill r6
+       mov.m r18=ar.fpsr               // preserve fpsr
+       add r3=SW(F3)+16,sp             // r3 = &sw->f3
+       ;;
+       stf.spill [r2]=f2,32
+       mov.m r19=ar.rnat
+       mov r21=b0
+
+       stf.spill [r3]=f3,32
+       st8.spill [r15]=r7,SW(B2)-SW(R7)        // spill r7
+       mov r22=b1
+       ;;
+       // since we're done with the spills, read and save ar.unat:
+       mov.m r29=ar.unat
+       mov.m r20=ar.bspstore
+       mov r23=b2
+       stf.spill [r2]=f4,32
+       stf.spill [r3]=f5,32
+       mov r24=b3
+       ;;
+       st8 [r14]=r21,SW(B1)-SW(B0)             // save b0
+       st8 [r15]=r23,SW(B3)-SW(B2)             // save b2
+       mov r25=b4
+       mov r26=b5
+       ;;
+       st8 [r14]=r22,SW(B4)-SW(B1)             // save b1
+       st8 [r15]=r24,SW(AR_PFS)-SW(B3)         // save b3
+       mov r21=ar.lc           // I-unit
+       stf.spill [r2]=f12,32
+       stf.spill [r3]=f13,32
+       ;;
+       st8 [r14]=r25,SW(B5)-SW(B4)             // save b4
+       st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)      // save ar.pfs
+       stf.spill [r2]=f14,32
+       stf.spill [r3]=f15,32
+       ;;
+       st8 [r14]=r26                           // save b5
+       st8 [r15]=r21                           // save ar.lc
+       stf.spill [r2]=f16,32
+       stf.spill [r3]=f17,32
+       ;;
+       stf.spill [r2]=f18,32
+       stf.spill [r3]=f19,32
+       ;;
+       stf.spill [r2]=f20,32
+       stf.spill [r3]=f21,32
+       ;;
+       stf.spill [r2]=f22,32
+       stf.spill [r3]=f23,32
+       ;;
+       stf.spill [r2]=f24,32
+       stf.spill [r3]=f25,32
+       ;;
+       stf.spill [r2]=f26,32
+       stf.spill [r3]=f27,32
+       ;;
+       stf.spill [r2]=f28,32
+       stf.spill [r3]=f29,32
+       ;;
+       stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
+       stf.spill [r3]=f31,SW(PR)-SW(F31)
+       add r14=SW(CALLER_UNAT)+16,sp
+       ;;
+       st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)    // save ar.unat
+       st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
+       mov r21=pr
+       ;;
+       st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
+       st8 [r3]=r21                            // save predicate registers
+       ;;
+       st8 [r2]=r20                            // save ar.bspstore
+       st8 [r14]=r18                           // save fpsr
+       mov ar.rsc=3            // put RSE back into eager mode, pl 0
+       br.cond.sptk.many b7
+END(save_switch_stack)
+
+/*
+ * load_switch_stack:
+ *     - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
+ *     - b7 holds address to return to
+ *     - must not touch r8-r11
+ */
+#ifdef XEN
+GLOBAL_ENTRY(load_switch_stack)
+#else
+ENTRY(load_switch_stack)
+#endif
+       .prologue
+       .altrp b7
+
+       .body
+       lfetch.fault.nt1 [sp]
+       adds r2=SW(AR_BSPSTORE)+16,sp
+       adds r3=SW(AR_UNAT)+16,sp
+       mov ar.rsc=0                                            // put RSE into 
enforced lazy mode
+       adds r14=SW(CALLER_UNAT)+16,sp
+       adds r15=SW(AR_FPSR)+16,sp
+       ;;
+       ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))   // bspstore
+       ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))       // unat
+       ;;
+       ld8 r21=[r2],16         // restore b0
+       ld8 r22=[r3],16         // restore b1
+       ;;
+       ld8 r23=[r2],16         // restore b2
+       ld8 r24=[r3],16         // restore b3
+       ;;
+       ld8 r25=[r2],16         // restore b4
+       ld8 r26=[r3],16         // restore b5
+       ;;
+       ld8 r16=[r2],(SW(PR)-SW(AR_PFS))        // ar.pfs
+       ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))    // ar.lc
+       ;;
+       ld8 r28=[r2]            // restore pr
+       ld8 r30=[r3]            // restore rnat
+       ;;
+       ld8 r18=[r14],16        // restore caller's unat
+       ld8 r19=[r15],24        // restore fpsr
+       ;;
+       ldf.fill f2=[r14],32
+       ldf.fill f3=[r15],32
+       ;;
+       ldf.fill f4=[r14],32
+       ldf.fill f5=[r15],32
+       ;;
+       ldf.fill f12=[r14],32
+       ldf.fill f13=[r15],32
+       ;;
+       ldf.fill f14=[r14],32
+       ldf.fill f15=[r15],32
+       ;;
+       ldf.fill f16=[r14],32
+       ldf.fill f17=[r15],32
+       ;;
+       ldf.fill f18=[r14],32
+       ldf.fill f19=[r15],32
+       mov b0=r21
+       ;;
+       ldf.fill f20=[r14],32
+       ldf.fill f21=[r15],32
+       mov b1=r22
+       ;;
+       ldf.fill f22=[r14],32
+       ldf.fill f23=[r15],32
+       mov b2=r23
+       ;;
+       mov ar.bspstore=r27
+       mov ar.unat=r29         // establish unat holding the NaT bits for r4-r7
+       mov b3=r24
+       ;;
+       ldf.fill f24=[r14],32
+       ldf.fill f25=[r15],32
+       mov b4=r25
+       ;;
+       ldf.fill f26=[r14],32
+       ldf.fill f27=[r15],32
+       mov b5=r26
+       ;;
+       ldf.fill f28=[r14],32
+       ldf.fill f29=[r15],32
+       mov ar.pfs=r16
+       ;;
+       ldf.fill f30=[r14],32
+       ldf.fill f31=[r15],24
+       mov ar.lc=r17
+       ;;
+       ld8.fill r4=[r14],16
+       ld8.fill r5=[r15],16
+       mov pr=r28,-1
+       ;;
+       ld8.fill r6=[r14],16
+       ld8.fill r7=[r15],16
+
+       mov ar.unat=r18                         // restore caller's unat
+       mov ar.rnat=r30                         // must restore after bspstore 
but before rsc!
+       mov ar.fpsr=r19                         // restore fpsr
+       mov ar.rsc=3                            // put RSE back into eager 
mode, pl 0
+       br.cond.sptk.many b7
+END(load_switch_stack)
+
+#ifndef XEN
+GLOBAL_ENTRY(__ia64_syscall)
+       .regstk 6,0,0,0
+       mov r15=in5                             // put syscall number in place
+       break __BREAK_SYSCALL
+       movl r2=errno
+       cmp.eq p6,p7=-1,r10
+       ;;
+(p6)   st4 [r2]=r8
+(p6)   mov r8=-1
+       br.ret.sptk.many rp
+END(__ia64_syscall)
+
+GLOBAL_ENTRY(execve)
+       mov r15=__NR_execve                     // put syscall number in place
+       break __BREAK_SYSCALL
+       br.ret.sptk.many rp
+END(execve)
+
+GLOBAL_ENTRY(clone)
+       mov r15=__NR_clone                      // put syscall number in place
+       break __BREAK_SYSCALL
+       br.ret.sptk.many rp
+END(clone)
+
+       /*
+        * Invoke a system call, but do some tracing before and after the call.
+        * We MUST preserve the current register frame throughout this routine
+        * because some system calls (such as ia64_execve) directly
+        * manipulate ar.pfs.
+        */
+GLOBAL_ENTRY(ia64_trace_syscall)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * We need to preserve the scratch registers f6-f11 in case the system
+        * call is sigreturn.
+        */
+       adds r16=PT(F6)+16,sp
+       adds r17=PT(F7)+16,sp
+       ;;
+       stf.spill [r16]=f6,32
+       stf.spill [r17]=f7,32
+       ;;
+       stf.spill [r16]=f8,32
+       stf.spill [r17]=f9,32
+       ;;
+       stf.spill [r16]=f10
+       stf.spill [r17]=f11
+       br.call.sptk.many rp=syscall_trace_enter // give parent a chance to 
catch syscall args
+       adds r16=PT(F6)+16,sp
+       adds r17=PT(F7)+16,sp
+       ;;
+       ldf.fill f6=[r16],32
+       ldf.fill f7=[r17],32
+       ;;
+       ldf.fill f8=[r16],32
+       ldf.fill f9=[r17],32
+       ;;
+       ldf.fill f10=[r16]
+       ldf.fill f11=[r17]
+       // the syscall number may have changed, so re-load it and re-calculate 
the
+       // syscall entry-point:
+       adds r15=PT(R15)+16,sp                  // r15 = &pt_regs.r15 (syscall 
#)
+       ;;
+       ld8 r15=[r15]
+       mov r3=NR_syscalls - 1
+       ;;
+       adds r15=-1024,r15
+       movl r16=sys_call_table
+       ;;
+       shladd r20=r15,3,r16                    // r20 = sys_call_table + 
8*(syscall-1024)
+       cmp.leu p6,p7=r15,r3
+       ;;
+(p6)   ld8 r20=[r20]                           // load address of syscall 
entry point
+(p7)   movl r20=sys_ni_syscall
+       ;;
+       mov b6=r20
+       br.call.sptk.many rp=b6                 // do the syscall
+.strace_check_retval:
+       cmp.lt p6,p0=r8,r0                      // syscall failed?
+       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
+       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
+       mov r10=0
+(p6)   br.cond.sptk strace_error               // syscall failed ->
+       ;;                                      // avoid RAW on r10
+.strace_save_retval:
+.mem.offset 0,0; st8.spill [r2]=r8             // store return value in slot 
for r8
+.mem.offset 8,0; st8.spill [r3]=r10            // clear error indication in 
slot for r10
+       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to 
catch return value
+.ret3: br.cond.sptk .work_pending_syscall_end
+
+strace_error:
+       ld8 r3=[r2]                             // load pt_regs.r8
+       sub r9=0,r8                             // negate return value to get 
errno value
+       ;;
+       cmp.ne p6,p0=r3,r0                      // is pt_regs.r8!=0?
+       adds r3=16,r2                           // r3=&pt_regs.r10
+       ;;
+(p6)   mov r10=-1
+(p6)   mov r8=r9
+       br.cond.sptk .strace_save_retval
+END(ia64_trace_syscall)
+
+       /*
+        * When traced and returning from sigreturn, we invoke syscall_trace 
but then
+        * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
+        */
+GLOBAL_ENTRY(ia64_strace_leave_kernel)
+       PT_REGS_UNWIND_INFO(0)
+{      /*
+        * Some versions of gas generate bad unwind info if the first 
instruction of a
+        * procedure doesn't go into the first slot of a bundle.  This is a 
workaround.
+        */
+       nop.m 0
+       nop.i 0
+       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to 
catch return value
+}
+.ret4: br.cond.sptk ia64_leave_kernel
+END(ia64_strace_leave_kernel)
+#endif
+
+GLOBAL_ENTRY(ia64_ret_from_clone)
+       PT_REGS_UNWIND_INFO(0)
+{      /*
+        * Some versions of gas generate bad unwind info if the first 
instruction of a
+        * procedure doesn't go into the first slot of a bundle.  This is a 
workaround.
+        */
+       nop.m 0
+       nop.i 0
+       /*
+        * We need to call schedule_tail() to complete the scheduling process.
+        * Called by ia64_switch_to() after do_fork()->copy_thread().  r8 
contains the
+        * address of the previously executing task.
+        */
+       br.call.sptk.many rp=ia64_invoke_schedule_tail
+}
+#ifdef XEN
+       // new domains are cloned but not exec'ed so switch to user mode here
+       cmp.ne pKStk,pUStk=r0,r0
+#ifdef CONFIG_VTI
+       br.cond.spnt ia64_leave_hypervisor
+#else // CONFIG_VTI
+       br.cond.spnt ia64_leave_kernel
+#endif // CONFIG_VTI
+#else
+.ret8:
+       adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
+       ;;
+       ld4 r2=[r2]
+       ;;
+       mov r8=0
+       and r2=_TIF_SYSCALL_TRACEAUDIT,r2
+       ;;
+       cmp.ne p6,p0=r2,r0
+(p6)   br.cond.spnt .strace_check_retval
+#endif
+       ;;                                      // added stop bits to prevent 
r8 dependency
+END(ia64_ret_from_clone)
+       // fall through
+GLOBAL_ENTRY(ia64_ret_from_syscall)
+       PT_REGS_UNWIND_INFO(0)
+       cmp.ge p6,p7=r8,r0                      // syscall executed 
successfully?
+       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
+       mov r10=r0                              // clear error indication in r10
+(p7)   br.cond.spnt handle_syscall_error       // handle potential syscall 
failure
+END(ia64_ret_from_syscall)
+       // fall through
+/*
+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+ *     need to switch to bank 0 and doesn't restore the scratch registers.
+ *     To avoid leaking kernel bits, the scratch registers are set to
+ *     the following known-to-be-safe values:
+ *
+ *               r1: restored (global pointer)
+ *               r2: cleared
+ *               r3: 1 (when returning to user-level)
+ *           r8-r11: restored (syscall return value(s))
+ *              r12: restored (user-level stack pointer)
+ *              r13: restored (user-level thread pointer)
+ *              r14: cleared
+ *              r15: restored (syscall #)
+ *          r16-r17: cleared
+ *              r18: user-level b6
+ *              r19: cleared
+ *              r20: user-level ar.fpsr
+ *              r21: user-level b0
+ *              r22: cleared
+ *              r23: user-level ar.bspstore
+ *              r24: user-level ar.rnat
+ *              r25: user-level ar.unat
+ *              r26: user-level ar.pfs
+ *              r27: user-level ar.rsc
+ *              r28: user-level ip
+ *              r29: user-level psr
+ *              r30: user-level cfm
+ *              r31: user-level pr
+ *           f6-f11: cleared
+ *               pr: restored (user-level pr)
+ *               b0: restored (user-level rp)
+ *               b6: restored
+ *               b7: cleared
+ *          ar.unat: restored (user-level ar.unat)
+ *           ar.pfs: restored (user-level ar.pfs)
+ *           ar.rsc: restored (user-level ar.rsc)
+ *          ar.rnat: restored (user-level ar.rnat)
+ *      ar.bspstore: restored (user-level ar.bspstore)
+ *          ar.fpsr: restored (user-level ar.fpsr)
+ *           ar.ccv: cleared
+ *           ar.csd: cleared
+ *           ar.ssd: cleared
+ */
+ENTRY(ia64_leave_syscall)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
+        * user- or fsys-mode, hence we disable interrupts early on.
+        *
+        * p6 controls whether current_thread_info()->flags needs to be check 
for
+        * extra work.  We always check for extra work when returning to 
user-level.
+        * With CONFIG_PREEMPT, we also check for extra work when the 
preempt_count
+        * is 0.  After extra work processing has been completed, execution
+        * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
+        * needs to be redone.
+        */
+#ifdef CONFIG_PREEMPT
+       rsm psr.i                               // disable interrupts
+       cmp.eq pLvSys,p0=r0,r0                  // pLvSys=1: leave from syscall
+(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+       ;;
+       .pred.rel.mutex pUStk,pKStk
+(pKStk) ld4 r21=[r20]                  // r21 <- preempt_count
+(pUStk)        mov r21=0                       // r21 <- 0
+       ;;
+       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
+#else /* !CONFIG_PREEMPT */
+(pUStk)        rsm psr.i
+       cmp.eq pLvSys,p0=r0,r0          // pLvSys=1: leave from syscall
+(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
+#endif
+.work_processed_syscall:
+       adds r2=PT(LOADRS)+16,r12
+       adds r3=PT(AR_BSPSTORE)+16,r12
+#ifdef XEN
+       ;;
+#else
+       adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
+       ;;
+(p6)   ld4 r31=[r18]                           // load 
current_thread_info()->flags
+#endif
+       ld8 r19=[r2],PT(B6)-PT(LOADRS)          // load ar.rsc value for 
"loadrs"
+       mov b7=r0               // clear b7
+       ;;
+       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be 
garbage)
+       ld8 r18=[r2],PT(R9)-PT(B6)              // load b6
+#ifndef XEN
+(p6)   and r15=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
+#endif
+       ;;
+       mov r16=ar.bsp                          // M2  get existing backing 
store pointer
+#ifndef XEN
+(p6)   cmp4.ne.unc p6,p0=r15, r0               // any special work pending?
+(p6)   br.cond.spnt .work_pending_syscall
+#endif
+       ;;
+       // start restoring the state saved on the kernel stack (struct pt_regs):
+       ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
+       ld8 r11=[r3],PT(CR_IIP)-PT(R11)
+       mov f6=f0               // clear f6
+       ;;
+       invala                  // M0|1 invalidate ALAT
+       rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and 
interruption collection
+       mov f9=f0               // clear f9
+
+       ld8 r29=[r2],16         // load cr.ipsr
+       ld8 r28=[r3],16                 // load cr.iip
+       mov f8=f0               // clear f8
+       ;;
+       ld8 r30=[r2],16         // M0|1 load cr.ifs
+       mov.m ar.ssd=r0         // M2 clear ar.ssd
+       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
+       ;;
+       ld8 r25=[r3],16         // M0|1 load ar.unat
+       mov.m ar.csd=r0         // M2 clear ar.csd
+       mov r22=r0              // clear r22
+       ;;
+       ld8 r26=[r2],PT(B0)-PT(AR_PFS)  // M0|1 load ar.pfs
+(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
+       mov f10=f0              // clear f10
+       ;;
+       ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
+       ld8 r27=[r3],PT(PR)-PT(AR_RSC)  // load ar.rsc
+       mov f11=f0              // clear f11
+       ;;
+       ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)    // load ar.rnat (may be garbage)
+       ld8 r31=[r3],PT(R1)-PT(PR)              // load predicates
+(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
+       ;;
+       ld8 r20=[r2],PT(R12)-PT(AR_FPSR)        // load ar.fpsr
+       ld8.fill r1=[r3],16     // load r1
+(pUStk) mov r17=1
+       ;;
+       srlz.d                  // M0  ensure interruption collection is off
+       ld8.fill r13=[r3],16
+       mov f7=f0               // clear f7
+       ;;
+       ld8.fill r12=[r2]       // restore r12 (sp)
+       ld8.fill r15=[r3]       // restore r15
+#ifdef XEN
+       movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
+#else
+       addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
+#endif
+       ;;
+(pUStk)        ld4 r3=[r3]             // r3 = cpu_data->phys_stacked_size_p8
+(pUStk) st1 [r14]=r17
+       mov b6=r18              // I0  restore b6
+       ;;
+       mov r14=r0              // clear r14
+       shr.u r18=r19,16        // I0|1 get byte size of existing "dirty" 
partition
+(pKStk) br.cond.dpnt.many skip_rbs_switch
+
+       mov.m ar.ccv=r0         // clear ar.ccv
+(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
+       br.cond.sptk.many rbs_switch
+END(ia64_leave_syscall)
+
+#ifdef CONFIG_IA32_SUPPORT
+GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+       PT_REGS_UNWIND_INFO(0)
+       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
+       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
+       ;;
+       .mem.offset 0,0
+       st8.spill [r2]=r8       // store return value in slot for r8 and set 
unat bit
+       .mem.offset 8,0
+       st8.spill [r3]=r0       // clear error indication in slot for r10 and 
set unat bit
+END(ia64_ret_from_ia32_execve_syscall)
+       // fall through
+#endif /* CONFIG_IA32_SUPPORT */
+GLOBAL_ENTRY(ia64_leave_kernel)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
+        * user- or fsys-mode, hence we disable interrupts early on.
+        *
+        * p6 controls whether current_thread_info()->flags needs to be check 
for
+        * extra work.  We always check for extra work when returning to 
user-level.
+        * With CONFIG_PREEMPT, we also check for extra work when the 
preempt_count
+        * is 0.  After extra work processing has been completed, execution
+        * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
+        * needs to be redone.
+        */
+#ifdef CONFIG_PREEMPT
+       rsm psr.i                               // disable interrupts
+       cmp.eq p0,pLvSys=r0,r0                  // pLvSys=0: leave from kernel
+(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+       ;;
+       .pred.rel.mutex pUStk,pKStk
+(pKStk)        ld4 r21=[r20]                   // r21 <- preempt_count
+(pUStk)        mov r21=0                       // r21 <- 0
+       ;;
+       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
+#else
+(pUStk)        rsm psr.i
+       cmp.eq p0,pLvSys=r0,r0          // pLvSys=0: leave from kernel
+(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
+#endif
+.work_processed_kernel:
+#ifdef XEN
+       alloc loc0=ar.pfs,0,1,1,0
+       adds out0=16,r12
+       ;;
+(p6)   br.call.sptk.many b0=deliver_pending_interrupt
+       mov ar.pfs=loc0
+       mov r31=r0
+#else
+       adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
+       ;;
+(p6)   ld4 r31=[r17]                           // load 
current_thread_info()->flags
+#endif
+       adds r21=PT(PR)+16,r12
+       ;;
+
+       lfetch [r21],PT(CR_IPSR)-PT(PR)
+       adds r2=PT(B6)+16,r12
+       adds r3=PT(R16)+16,r12
+       ;;
+       lfetch [r21]
+       ld8 r28=[r2],8          // load b6
+       adds r29=PT(R24)+16,r12
+
+       ld8.fill r16=[r3]
+       adds r30=PT(AR_CCV)+16,r12
+(p6)   and r19=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
+       ;;
+       adds r3=PT(AR_CSD)-PT(R16),r3
+       ld8.fill r24=[r29]
+       ld8 r15=[r30]           // load ar.ccv
+(p6)   cmp4.ne.unc p6,p0=r19, r0               // any special work pending?
+       ;;
+       ld8 r29=[r2],16         // load b7
+       ld8 r30=[r3],16         // load ar.csd
+#ifndef XEN
+(p6)   br.cond.spnt .work_pending
+#endif
+       ;;
+       ld8 r31=[r2],16         // load ar.ssd
+       ld8.fill r8=[r3],16
+       ;;
+       ld8.fill r9=[r2],16
+       ld8.fill r10=[r3],PT(R17)-PT(R10)
+       ;;
+       ld8.fill r11=[r2],PT(R18)-PT(R11)
+       ld8.fill r17=[r3],16
+       ;;
+       ld8.fill r18=[r2],16
+       ld8.fill r19=[r3],16
+       ;;
+       ld8.fill r20=[r2],16
+       ld8.fill r21=[r3],16
+       mov ar.csd=r30
+       mov ar.ssd=r31
+       ;;
+       rsm psr.i | psr.ic      // initiate turning off of interrupt and 
interruption collection
+       invala                  // invalidate ALAT
+       ;;
+       ld8.fill r22=[r2],24
+       ld8.fill r23=[r3],24
+       mov b6=r28
+       ;;
+       ld8.fill r25=[r2],16
+       ld8.fill r26=[r3],16
+       mov b7=r29
+       ;;
+       ld8.fill r27=[r2],16
+       ld8.fill r28=[r3],16
+       ;;
+       ld8.fill r29=[r2],16
+       ld8.fill r30=[r3],24
+       ;;
+       ld8.fill r31=[r2],PT(F9)-PT(R31)
+       adds r3=PT(F10)-PT(F6),r3
+       ;;
+       ldf.fill f9=[r2],PT(F6)-PT(F9)
+       ldf.fill f10=[r3],PT(F8)-PT(F10)
+       ;;
+       ldf.fill f6=[r2],PT(F7)-PT(F6)
+       ;;
+       ldf.fill f7=[r2],PT(F11)-PT(F7)
+       ldf.fill f8=[r3],32
+       ;;
+       srlz.i                  // ensure interruption collection is off
+       mov ar.ccv=r15
+       ;;
+       ldf.fill f11=[r2]
+       bsw.0                   // switch back to bank 0 (no stop bit required 
beforehand...)
+       ;;
+(pUStk)        mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
+       adds r16=PT(CR_IPSR)+16,r12
+       adds r17=PT(CR_IIP)+16,r12
+
+(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
+       nop.i 0
+       nop.i 0
+       ;;
+       ld8 r29=[r16],16        // load cr.ipsr
+       ld8 r28=[r17],16        // load cr.iip
+       ;;
+       ld8 r30=[r16],16        // load cr.ifs
+       ld8 r25=[r17],16        // load ar.unat
+       ;;
+       ld8 r26=[r16],16        // load ar.pfs
+       ld8 r27=[r17],16        // load ar.rsc
+       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
+       ;;
+       ld8 r24=[r16],16        // load ar.rnat (may be garbage)
+       ld8 r23=[r17],16        // load ar.bspstore (may be garbage)
+       ;;
+       ld8 r31=[r16],16        // load predicates
+       ld8 r21=[r17],16        // load b0
+       ;;
+       ld8 r19=[r16],16        // load ar.rsc value for "loadrs"
+       ld8.fill r1=[r17],16    // load r1
+       ;;
+       ld8.fill r12=[r16],16
+       ld8.fill r13=[r17],16
+(pUStk)        adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
+       ;;
+       ld8 r20=[r16],16        // ar.fpsr
+       ld8.fill r15=[r17],16
+       ;;
+       ld8.fill r14=[r16],16
+       ld8.fill r2=[r17]
+(pUStk)        mov r17=1
+       ;;
+       ld8.fill r3=[r16]
+(pUStk)        st1 [r18]=r17           // restore current->thread.on_ustack
+       shr.u r18=r19,16        // get byte size of existing "dirty" partition
+       ;;
+       mov r16=ar.bsp          // get existing backing store pointer
+#ifdef XEN
+       movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
+#else
+       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
+#endif
+       ;;
+       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
+(pKStk)        br.cond.dpnt skip_rbs_switch
+
+       /*
+        * Restore user backing store.
+        *
+        * NOTE: alloc, loadrs, and cover can't be predicated.
+        */
+(pNonSys) br.cond.dpnt dont_preserve_current_frame
+
+rbs_switch:
+       cover                           // add current frame into dirty 
partition and set cr.ifs
+       ;;
+       mov r19=ar.bsp                  // get new backing store pointer
+       sub r16=r16,r18                 // krbs = old bsp - size of dirty 
partition
+       cmp.ne p9,p0=r0,r0              // clear p9 to skip restore of cr.ifs
+       ;;
+       sub r19=r19,r16                 // calculate total byte size of dirty 
partition
+       add r18=64,r18                  // don't force in0-in7 into memory...
+       ;;
+       shl r19=r19,16                  // shift size of dirty partition into 
loadrs position
+       ;;
+dont_preserve_current_frame:
+       /*
+        * To prevent leaking bits between the kernel and user-space,
+        * we must clear the stacked registers in the "invalid" partition here.
+        * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
+        * 5 registers/cycle on McKinley).
+        */
+#      define pRecurse p6
+#      define pReturn  p7
+#ifdef CONFIG_ITANIUM
+#      define Nregs    10
+#else
+#      define Nregs    14
+#endif
+       alloc loc0=ar.pfs,2,Nregs-2,2,0
+       shr.u loc1=r18,9                // RNaTslots <= floor(dirtySize / 
(64*8))
+       sub r17=r17,r18                 // r17 = (physStackedSize + 8) - 
dirtySize
+       ;;
+       mov ar.rsc=r19                  // load ar.rsc to be used for "loadrs"
+       shladd in0=loc1,3,r17
+       mov in1=0
+       ;;
+       TEXT_ALIGN(32)
+rse_clear_invalid:
+#ifdef CONFIG_ITANIUM
+       // cycle 0
+ { .mii
+       alloc loc0=ar.pfs,2,Nregs-2,2,0
+       cmp.lt pRecurse,p0=Nregs*8,in0  // if more than Nregs regs left to 
clear, (re)curse
+       add out0=-Nregs*8,in0
+}{ .mfb
+       add out1=1,in1                  // increment recursion count
+       nop.f 0
+       nop.b 0                         // can't do br.call here because of 
alloc (WAW on CFM)
+       ;;
+}{ .mfi        // cycle 1
+       mov loc1=0
+       nop.f 0
+       mov loc2=0
+}{ .mib
+       mov loc3=0
+       mov loc4=0
+(pRecurse) br.call.sptk.many b0=rse_clear_invalid
+
+}{ .mfi        // cycle 2
+       mov loc5=0
+       nop.f 0
+       cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
+}{ .mib
+       mov loc6=0
+       mov loc7=0
+(pReturn) br.ret.sptk.many b0
+}
+#else /* !CONFIG_ITANIUM */
+       alloc loc0=ar.pfs,2,Nregs-2,2,0
+       cmp.lt pRecurse,p0=Nregs*8,in0  // if more than Nregs regs left to 
clear, (re)curse
+       add out0=-Nregs*8,in0
+       add out1=1,in1                  // increment recursion count
+       mov loc1=0
+       mov loc2=0
+       ;;
+       mov loc3=0
+       mov loc4=0
+       mov loc5=0
+       mov loc6=0
+       mov loc7=0
+(pRecurse) br.call.sptk.few b0=rse_clear_invalid
+       ;;
+       mov loc8=0
+       mov loc9=0
+       cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
+       mov loc10=0
+       mov loc11=0
+(pReturn) br.ret.sptk.many b0
+#endif /* !CONFIG_ITANIUM */
+#      undef pRecurse
+#      undef pReturn
+       ;;
+       alloc r17=ar.pfs,0,0,0,0        // drop current register frame
+       ;;
+       loadrs
+       ;;
+skip_rbs_switch:
+       mov ar.unat=r25         // M2
+(pKStk)        extr.u r22=r22,21,1     // I0 extract current value of psr.pp 
from r22
+(pLvSys)mov r19=r0             // A  clear r19 for leave_syscall, no-op 
otherwise
+       ;;
+(pUStk)        mov ar.bspstore=r23     // M2
+(pKStk)        dep r29=r22,r29,21,1    // I0 update ipsr.pp with psr.pp
+(pLvSys)mov r16=r0             // A  clear r16 for leave_syscall, no-op 
otherwise
+       ;;
+       mov cr.ipsr=r29         // M2
+       mov ar.pfs=r26          // I0
+(pLvSys)mov r17=r0             // A  clear r17 for leave_syscall, no-op 
otherwise
+
+(p9)   mov cr.ifs=r30          // M2
+       mov b0=r21              // I0
+(pLvSys)mov r18=r0             // A  clear r18 for leave_syscall, no-op 
otherwise
+
+       mov ar.fpsr=r20         // M2
+       mov cr.iip=r28          // M2
+       nop 0
+       ;;
+(pUStk)        mov ar.rnat=r24         // M2 must happen with RSE in lazy mode
+       nop 0
+(pLvSys)mov r2=r0
+
+       mov ar.rsc=r27          // M2
+       mov pr=r31,-1           // I0
+       rfi                     // B
+
+#ifndef XEN
+       /*
+        * On entry:
+        *      r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
+        *      r31 = current->thread_info->flags
+        * On exit:
+        *      p6 = TRUE if work-pending-check needs to be redone
+        */
+.work_pending_syscall:
+       add r2=-8,r2
+       add r3=-8,r3
+       ;;
+       st8 [r2]=r8
+       st8 [r3]=r10
+.work_pending:
+       tbit.nz p6,p0=r31,TIF_SIGDELAYED                // signal delayed from  
MCA/INIT/NMI/PMI context?
+(p6)   br.cond.sptk.few .sigdelayed
+       ;;
+       tbit.z p6,p0=r31,TIF_NEED_RESCHED               // 
current_thread_info()->need_resched==0?
+(p6)   br.cond.sptk.few .notify
+#ifdef CONFIG_PREEMPT
+(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
+       ;;
+(pKStk) st4 [r20]=r21
+       ssm psr.i               // enable interrupts
+#endif
+       br.call.spnt.many rp=schedule
+.ret9: cmp.eq p6,p0=r0,r0                              // p6 <- 1
+       rsm psr.i               // disable interrupts
+       ;;
+#ifdef CONFIG_PREEMPT
+(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+       ;;
+(pKStk)        st4 [r20]=r0            // preempt_count() <- 0
+#endif
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // re-check
+
+.notify:
+(pUStk)        br.call.spnt.many rp=notify_resume_user
+.ret10:        cmp.ne p6,p0=r0,r0                              // p6 <- 0
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // don't re-check
+
+// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context 
where
+// it could not be delivered.  Deliver it now.  The signal might be for us and
+// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
+// signal.
+
+.sigdelayed:
+       br.call.sptk.many rp=do_sigdelayed
+       cmp.eq p6,p0=r0,r0                              // p6 <- 1, always 
re-check
+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
+       br.cond.sptk.many .work_processed_kernel        // re-check
+
+.work_pending_syscall_end:
+       adds r2=PT(R8)+16,r12
+       adds r3=PT(R10)+16,r12
+       ;;
+       ld8 r8=[r2]
+       ld8 r10=[r3]
+       br.cond.sptk.many .work_processed_syscall       // re-check
+#endif
+
+END(ia64_leave_kernel)
+
+ENTRY(handle_syscall_error)
+       /*
+        * Some system calls (e.g., ptrace, mmap) can return arbitrary values 
which could
+        * lead us to mistake a negative return value as a failed syscall.  
Those syscall
+        * must deposit a non-zero value in pt_regs.r8 to indicate an error.  If
+        * pt_regs.r8 is zero, we assume that the call completed successfully.
+        */
+       PT_REGS_UNWIND_INFO(0)
+       ld8 r3=[r2]             // load pt_regs.r8
+       ;;
+       cmp.eq p6,p7=r3,r0      // is pt_regs.r8==0?
+       ;;
+(p7)   mov r10=-1
+(p7)   sub r8=0,r8             // negate return value to get errno
+       br.cond.sptk ia64_leave_syscall
+END(handle_syscall_error)
+
+       /*
+        * Invoke schedule_tail(task) while preserving in0-in7, which may be 
needed
+        * in case a system call gets restarted.
+        */
+GLOBAL_ENTRY(ia64_invoke_schedule_tail)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,1,0
+       mov loc0=rp
+       mov out0=r8                             // Address of previous task
+       ;;
+       br.call.sptk.many rp=schedule_tail
+.ret11:        mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(ia64_invoke_schedule_tail)
+
+#ifndef XEN
+       /*
+        * Setup stack and call do_notify_resume_user().  Note that pSys and 
pNonSys need to
+        * be set up by the caller.  We declare 8 input registers so the system 
call
+        * args get preserved, in case we need to restart a system call.
+        */
+ENTRY(notify_resume_user)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of 
syscall restart!
+       mov r9=ar.unat
+       mov loc0=rp                             // save return address
+       mov out0=0                              // there is no "oldset"
+       adds out1=8,sp                          // out1=&sigscratch->ar_pfs
+(pSys) mov out2=1                              // out2==1 => we're in a syscall
+       ;;
+(pNonSys) mov out2=0                           // out2==0 => not a syscall
+       .fframe 16
+       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
+       st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
+       st8 [out1]=loc1,-8                      // save ar.pfs, out1=&sigscratch
+       .body
+       br.call.sptk.many rp=do_notify_resume_user
+.ret15:        .restore sp
+       adds sp=16,sp                           // pop scratch stack space
+       ;;
+       ld8 r9=[sp]                             // load new unat from 
sigscratch->scratch_unat
+       mov rp=loc0
+       ;;
+       mov ar.unat=r9
+       mov ar.pfs=loc1
+       br.ret.sptk.many rp
+END(notify_resume_user)
+
+GLOBAL_ENTRY(sys_rt_sigsuspend)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+       alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of 
syscall restart!
+       mov r9=ar.unat
+       mov loc0=rp                             // save return address
+       mov out0=in0                            // mask
+       mov out1=in1                            // sigsetsize
+       adds out2=8,sp                          // out2=&sigscratch->ar_pfs
+       ;;
+       .fframe 16
+       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
+       st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
+       st8 [out2]=loc1,-8                      // save ar.pfs, out2=&sigscratch
+       .body
+       br.call.sptk.many rp=ia64_rt_sigsuspend
+.ret17:        .restore sp
+       adds sp=16,sp                           // pop scratch stack space
+       ;;
+       ld8 r9=[sp]                             // load new unat from 
sw->caller_unat
+       mov rp=loc0
+       ;;
+       mov ar.unat=r9
+       mov ar.pfs=loc1
+       br.ret.sptk.many rp
+END(sys_rt_sigsuspend)
+
+ENTRY(sys_rt_sigreturn)
+       PT_REGS_UNWIND_INFO(0)
+       /*
+        * Allocate 8 input registers since ptrace() may clobber them
+        */
+       alloc r2=ar.pfs,8,0,1,0
+       .prologue
+       PT_REGS_SAVES(16)
+       adds sp=-16,sp
+       .body
+       cmp.eq pNonSys,pSys=r0,r0               // sigreturn isn't a normal 
syscall...
+       ;;
+       /*
+        * leave_kernel() restores f6-f11 from pt_regs, but since the 
streamlined
+        * syscall-entry path does not save them we save them here instead.  
Note: we
+        * don't need to save any other registers that are not saved by the 
stream-lined
+        * syscall path, because restore_sigcontext() restores them.
+        */
+       adds r16=PT(F6)+32,sp
+       adds r17=PT(F7)+32,sp
+       ;;
+       stf.spill [r16]=f6,32
+       stf.spill [r17]=f7,32
+       ;;
+       stf.spill [r16]=f8,32
+       stf.spill [r17]=f9,32
+       ;;
+       stf.spill [r16]=f10
+       stf.spill [r17]=f11
+       adds out0=16,sp                         // out0 = &sigscratch
+       br.call.sptk.many rp=ia64_rt_sigreturn
+.ret19:        .restore sp 0
+       adds sp=16,sp
+       ;;
+       ld8 r9=[sp]                             // load new ar.unat
+       mov.sptk b7=r8,ia64_leave_kernel
+       ;;
+       mov ar.unat=r9
+       br.many b7
+END(sys_rt_sigreturn)
+#endif
+
+GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
+       .prologue
+       /*
+        * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
+        */
+       mov r16=r0
+       DO_SAVE_SWITCH_STACK
+       br.call.sptk.many rp=ia64_handle_unaligned      // stack frame setup in 
ivt
+.ret21:        .body
+       DO_LOAD_SWITCH_STACK
+       br.cond.sptk.many rp                            // goes to 
ia64_leave_kernel
+END(ia64_prepare_handle_unaligned)
+
+#ifndef XEN
+       //
+       // unw_init_running(void (*callback)(info, arg), void *arg)
+       //
+#      define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
+
+GLOBAL_ENTRY(unw_init_running)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+       alloc loc1=ar.pfs,2,3,3,0
+       ;;
+       ld8 loc2=[in0],8
+       mov loc0=rp
+       mov r16=loc1
+       DO_SAVE_SWITCH_STACK
+       .body
+
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+       .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
+       SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
+       adds sp=-EXTRA_FRAME_SIZE,sp
+       .body
+       ;;
+       adds out0=16,sp                         // &info
+       mov out1=r13                            // current
+       adds out2=16+EXTRA_FRAME_SIZE,sp        // &switch_stack
+       br.call.sptk.many rp=unw_init_frame_info
+1:     adds out0=16,sp                         // &info
+       mov b6=loc2
+       mov loc2=gp                             // save gp across indirect 
function call
+       ;;
+       ld8 gp=[in0]
+       mov out1=in1                            // arg
+       br.call.sptk.many rp=b6                 // invoke the callback function
+1:     mov gp=loc2                             // restore gp
+
+       // For now, we don't allow changing registers from within
+       // unw_init_running; if we ever want to allow that, we'd
+       // have to do a load_switch_stack here:
+       .restore sp
+       adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
+
+       mov ar.pfs=loc1
+       mov rp=loc0
+       br.ret.sptk.many rp
+END(unw_init_running)
+
+       .rodata
+       .align 8
+       .globl sys_call_table
+sys_call_table:
+       data8 sys_ni_syscall            //  This must be sys_ni_syscall!  See 
ivt.S.
+       data8 sys_exit                          // 1025
+       data8 sys_read
+       data8 sys_write
+       data8 sys_open
+       data8 sys_close
+       data8 sys_creat                         // 1030
+       data8 sys_link
+       data8 sys_unlink
+       data8 ia64_execve
+       data8 sys_chdir
+       data8 sys_fchdir                        // 1035
+       data8 sys_utimes
+       data8 sys_mknod
+       data8 sys_chmod
+       data8 sys_chown
+       data8 sys_lseek                         // 1040
+       data8 sys_getpid
+       data8 sys_getppid
+       data8 sys_mount
+       data8 sys_umount
+       data8 sys_setuid                        // 1045
+       data8 sys_getuid
+       data8 sys_geteuid
+       data8 sys_ptrace
+       data8 sys_access
+       data8 sys_sync                          // 1050
+       data8 sys_fsync
+       data8 sys_fdatasync
+       data8 sys_kill
+       data8 sys_rename
+       data8 sys_mkdir                         // 1055
+       data8 sys_rmdir
+       data8 sys_dup
+       data8 sys_pipe
+       data8 sys_times
+       data8 ia64_brk                          // 1060
+       data8 sys_setgid
+       data8 sys_getgid
+       data8 sys_getegid
+       data8 sys_acct
+       data8 sys_ioctl                         // 1065
+       data8 sys_fcntl
+       data8 sys_umask
+       data8 sys_chroot
+       data8 sys_ustat
+       data8 sys_dup2                          // 1070
+       data8 sys_setreuid
+       data8 sys_setregid
+       data8 sys_getresuid
+       data8 sys_setresuid
+       data8 sys_getresgid                     // 1075
+       data8 sys_setresgid
+       data8 sys_getgroups
+       data8 sys_setgroups
+       data8 sys_getpgid
+       data8 sys_setpgid                       // 1080
+       data8 sys_setsid
+       data8 sys_getsid
+       data8 sys_sethostname
+       data8 sys_setrlimit
+       data8 sys_getrlimit                     // 1085
+       data8 sys_getrusage
+       data8 sys_gettimeofday
+       data8 sys_settimeofday
+       data8 sys_select
+       data8 sys_poll                          // 1090
+       data8 sys_symlink
+       data8 sys_readlink
+       data8 sys_uselib
+       data8 sys_swapon
+       data8 sys_swapoff                       // 1095
+       data8 sys_reboot
+       data8 sys_truncate
+       data8 sys_ftruncate
+       data8 sys_fchmod
+       data8 sys_fchown                        // 1100
+       data8 ia64_getpriority
+       data8 sys_setpriority
+       data8 sys_statfs
+       data8 sys_fstatfs
+       data8 sys_gettid                        // 1105
+       data8 sys_semget
+       data8 sys_semop
+       data8 sys_semctl
+       data8 sys_msgget
+       data8 sys_msgsnd                        // 1110
+       data8 sys_msgrcv
+       data8 sys_msgctl
+       data8 sys_shmget
+       data8 ia64_shmat
+       data8 sys_shmdt                         // 1115
+       data8 sys_shmctl
+       data8 sys_syslog
+       data8 sys_setitimer
+       data8 sys_getitimer
+       data8 sys_ni_syscall                    // 1120         /* was: 
ia64_oldstat */
+       data8 sys_ni_syscall                                    /* was: 
ia64_oldlstat */
+       data8 sys_ni_syscall                                    /* was: 
ia64_oldfstat */
+       data8 sys_vhangup
+       data8 sys_lchown
+       data8 sys_remap_file_pages              // 1125
+       data8 sys_wait4
+       data8 sys_sysinfo
+       data8 sys_clone
+       data8 sys_setdomainname
+       data8 sys_newuname                      // 1130
+       data8 sys_adjtimex
+       data8 sys_ni_syscall                                    /* was: 
ia64_create_module */
+       data8 sys_init_module
+       data8 sys_delete_module
+       data8 sys_ni_syscall                    // 1135         /* was: 
sys_get_kernel_syms */
+       data8 sys_ni_syscall                                    /* was: 
sys_query_module */
+       data8 sys_quotactl
+       data8 sys_bdflush
+       data8 sys_sysfs
+       data8 sys_personality                   // 1140
+       data8 sys_ni_syscall            // sys_afs_syscall
+       data8 sys_setfsuid
+       data8 sys_setfsgid
+       data8 sys_getdents
+       data8 sys_flock                         // 1145
+       data8 sys_readv
+       data8 sys_writev
+       data8 sys_pread64
+       data8 sys_pwrite64
+       data8 sys_sysctl                        // 1150
+       data8 sys_mmap
+       data8 sys_munmap
+       data8 sys_mlock
+       data8 sys_mlockall
+       data8 sys_mprotect                      // 1155
+       data8 ia64_mremap
+       data8 sys_msync
+       data8 sys_munlock
+       data8 sys_munlockall
+       data8 sys_sched_getparam                // 1160
+       data8 sys_sched_setparam
+       data8 sys_sched_getscheduler
+       data8 sys_sched_setscheduler
+       data8 sys_sched_yield
+       data8 sys_sched_get_priority_max        // 1165
+       data8 sys_sched_get_priority_min
+       data8 sys_sched_rr_get_interval
+       data8 sys_nanosleep
+       data8 sys_nfsservctl
+       data8 sys_prctl                         // 1170
+       data8 sys_getpagesize
+       data8 sys_mmap2
+       data8 sys_pciconfig_read
+       data8 sys_pciconfig_write
+       data8 sys_perfmonctl                    // 1175
+       data8 sys_sigaltstack
+       data8 sys_rt_sigaction
+       data8 sys_rt_sigpending
+       data8 sys_rt_sigprocmask
+       data8 sys_rt_sigqueueinfo               // 1180
+       data8 sys_rt_sigreturn
+       data8 sys_rt_sigsuspend
+       data8 sys_rt_sigtimedwait
+       data8 sys_getcwd
+       data8 sys_capget                        // 1185
+       data8 sys_capset
+       data8 sys_sendfile64
+       data8 sys_ni_syscall            // sys_getpmsg (STREAMS)
+       data8 sys_ni_syscall            // sys_putpmsg (STREAMS)
+       data8 sys_socket                        // 1190
+       data8 sys_bind
+       data8 sys_connect
+       data8 sys_listen
+       data8 sys_accept
+       data8 sys_getsockname                   // 1195
+       data8 sys_getpeername
+       data8 sys_socketpair
+       data8 sys_send
+       data8 sys_sendto
+       data8 sys_recv                          // 1200
+       data8 sys_recvfrom
+       data8 sys_shutdown
+       data8 sys_setsockopt
+       data8 sys_getsockopt
+       data8 sys_sendmsg                       // 1205
+       data8 sys_recvmsg
+       data8 sys_pivot_root
+       data8 sys_mincore
+       data8 sys_madvise
+       data8 sys_newstat                       // 1210
+       data8 sys_newlstat
+       data8 sys_newfstat
+       data8 sys_clone2
+       data8 sys_getdents64
+       data8 sys_getunwind                     // 1215
+       data8 sys_readahead
+       data8 sys_setxattr
+       data8 sys_lsetxattr
+       data8 sys_fsetxattr
+       data8 sys_getxattr                      // 1220
+       data8 sys_lgetxattr
+       data8 sys_fgetxattr
+       data8 sys_listxattr
+       data8 sys_llistxattr
+       data8 sys_flistxattr                    // 1225
+       data8 sys_removexattr
+       data8 sys_lremovexattr
+       data8 sys_fremovexattr
+       data8 sys_tkill
+       data8 sys_futex                         // 1230
+       data8 sys_sched_setaffinity
+       data8 sys_sched_getaffinity
+       data8 sys_set_tid_address
+       data8 sys_fadvise64_64
+       data8 sys_tgkill                        // 1235
+       data8 sys_exit_group
+       data8 sys_lookup_dcookie
+       data8 sys_io_setup
+       data8 sys_io_destroy
+       data8 sys_io_getevents                  // 1240
+       data8 sys_io_submit
+       data8 sys_io_cancel
+       data8 sys_epoll_create
+       data8 sys_epoll_ctl
+       data8 sys_epoll_wait                    // 1245
+       data8 sys_restart_syscall
+       data8 sys_semtimedop
+       data8 sys_timer_create
+       data8 sys_timer_settime
+       data8 sys_timer_gettime                 // 1250
+       data8 sys_timer_getoverrun
+       data8 sys_timer_delete
+       data8 sys_clock_settime
+       data8 sys_clock_gettime
+       data8 sys_clock_getres                  // 1255
+       data8 sys_clock_nanosleep
+       data8 sys_fstatfs64
+       data8 sys_statfs64
+       data8 sys_mbind
+       data8 sys_get_mempolicy                 // 1260
+       data8 sys_set_mempolicy
+       data8 sys_mq_open
+       data8 sys_mq_unlink
+       data8 sys_mq_timedsend
+       data8 sys_mq_timedreceive               // 1265
+       data8 sys_mq_notify
+       data8 sys_mq_getsetattr
+       data8 sys_ni_syscall                    // reserved for kexec_load
+       data8 sys_ni_syscall                    // reserved for vserver
+       data8 sys_waitid                        // 1270
+       data8 sys_add_key
+       data8 sys_request_key
+       data8 sys_keyctl
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall                    // 1275
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall
+       data8 sys_ni_syscall
+
+       .org sys_call_table + 8*NR_syscalls     // guard against failures to 
increase NR_syscalls
+#endif
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/entry.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/entry.h   Wed Aug  3 00:25:11 2005
@@ -0,0 +1,97 @@
+#include <linux/config.h>
+
+/*
+ * Preserved registers that are shared between code in ivt.S and
+ * entry.S.  Be careful not to step on these!
+ */
+#define PRED_LEAVE_SYSCALL     1 /* TRUE iff leave from syscall */
+#define PRED_KERNEL_STACK      2 /* returning to kernel-stacks? */
+#define PRED_USER_STACK                3 /* returning to user-stacks? */
+#ifdef CONFIG_VTI
+#define PRED_EMUL              2 /* Need to save r4-r7 for inst emulation */
+#define PRED_NON_EMUL          3 /* No need to save r4-r7 for normal path */
+#define PRED_BN0               6 /* Guest is in bank 0 */
+#define PRED_BN1               7 /* Guest is in bank 1 */
+#endif // CONFIG_VTI
+#define PRED_SYSCALL           4 /* inside a system call? */
+#define PRED_NON_SYSCALL       5 /* complement of PRED_SYSCALL */
+
+#ifdef __ASSEMBLY__
+# define PASTE2(x,y)   x##y
+# define PASTE(x,y)    PASTE2(x,y)
+
+# define pLvSys                PASTE(p,PRED_LEAVE_SYSCALL)
+# define pKStk         PASTE(p,PRED_KERNEL_STACK)
+# define pUStk         PASTE(p,PRED_USER_STACK)
+#ifdef CONFIG_VTI
+# define pEml          PASTE(p,PRED_EMUL)
+# define pNonEml       PASTE(p,PRED_NON_EMUL)
+# define pBN0          PASTE(p,PRED_BN0)
+# define pBN1          PASTE(p,PRED_BN1)
+#endif // CONFIG_VTI
+# define pSys          PASTE(p,PRED_SYSCALL)
+# define pNonSys       PASTE(p,PRED_NON_SYSCALL)
+#endif
+
+#define PT(f)          (IA64_PT_REGS_##f##_OFFSET)
+#define SW(f)          (IA64_SWITCH_STACK_##f##_OFFSET)
+#ifdef CONFIG_VTI
+#define VPD(f)      (VPD_##f##_START_OFFSET)
+#endif // CONFIG_VTI
+
+#define PT_REGS_SAVES(off)                     \
+       .unwabi 3, 'i';                         \
+       .fframe IA64_PT_REGS_SIZE+16+(off);     \
+       .spillsp rp, PT(CR_IIP)+16+(off);       \
+       .spillsp ar.pfs, PT(CR_IFS)+16+(off);   \
+       .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
+       .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
+       .spillsp pr, PT(PR)+16+(off);
+
+#define PT_REGS_UNWIND_INFO(off)               \
+       .prologue;                              \
+       PT_REGS_SAVES(off);                     \
+       .body
+
+#define SWITCH_STACK_SAVES(off)                                                
        \
+       .savesp ar.unat,SW(CALLER_UNAT)+16+(off);                               
\
+       .savesp ar.fpsr,SW(AR_FPSR)+16+(off);                                   
\
+       .spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off);               
\
+       .spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off);               
\
+       .spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off);           
\
+       .spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off);           
\
+       .spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off);           
\
+       .spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off);           
\
+       .spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off);           
\
+       .spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off);           
\
+       .spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off);           
\
+       .spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off);           
\
+       .spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off);               
\
+       .spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off);               
\
+       .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off);               
\
+       .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off);               
\
+       .spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off);               
\
+       .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); 
\
+       .spillsp @priunat,SW(AR_UNAT)+16+(off);                                 
\
+       .spillsp ar.rnat,SW(AR_RNAT)+16+(off);                                  
\
+       .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);                          
\
+       .spillsp pr,SW(PR)+16+(off))
+
+#define DO_SAVE_SWITCH_STACK                   \
+       movl r28=1f;                            \
+       ;;                                      \
+       .fframe IA64_SWITCH_STACK_SIZE;         \
+       adds sp=-IA64_SWITCH_STACK_SIZE,sp;     \
+       mov.ret.sptk b7=r28,1f;                 \
+       SWITCH_STACK_SAVES(0);                  \
+       br.cond.sptk.many save_switch_stack;    \
+1:
+
+#define DO_LOAD_SWITCH_STACK                   \
+       movl r28=1f;                            \
+       ;;                                      \
+       invala;                                 \
+       mov.ret.sptk b7=r28,1f;                 \
+       br.cond.sptk.many load_switch_stack;    \
+1:     .restore sp;                            \
+       adds sp=IA64_SWITCH_STACK_SIZE,sp
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/head.S
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/head.S    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,1026 @@
+/*
+ * Here is where the ball gets rolling as far as the kernel is concerned.
+ * When control is transferred to _start, the bootload has already
+ * loaded us to the correct address.  All that's left to do here is
+ * to set up the kernel's global pointer and jump to the kernel
+ * entry point.
+ *
+ * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999 Intel Corp.
+ * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <Don.Dugger@xxxxxxxxx>
+ * Copyright (C) 2002 Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ *   -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/fpu.h>
+#include <asm/kregs.h>
+#include <asm/mmu_context.h>
+#include <asm/offsets.h>
+#include <asm/pal.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+       .section __special_page_section,"ax"
+
+       .global empty_zero_page
+empty_zero_page:
+       .skip PAGE_SIZE
+
+       .global swapper_pg_dir
+swapper_pg_dir:
+       .skip PAGE_SIZE
+
+       .rodata
+halt_msg:
+       stringz "Halting kernel\n"
+
+       .text
+
+       .global start_ap
+
+       /*
+        * Start the kernel.  When the bootloader passes control to _start(), 
r28
+        * points to the address of the boot parameter area.  Execution reaches
+        * here in physical mode.
+        */
+GLOBAL_ENTRY(_start)
+start_ap:
+       .prologue
+       .save rp, r0            // terminate unwind chain with a NULL rp
+       .body
+
+       rsm psr.i | psr.ic
+       ;;
+       srlz.i
+       ;;
+       /*
+        * Initialize kernel region registers:
+        *      rr[0]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[1]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[2]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[3]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[4]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[5]: VHPT enabled, page size = PAGE_SHIFT
+        *      rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+        *      rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+        * We initialize all of them to prevent inadvertently assuming
+        * something about the state of address translation early in boot.
+        */
+       movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r7=(0<<61)
+       movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r9=(1<<61)
+       movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r11=(2<<61)
+       movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r13=(3<<61)
+       movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r15=(4<<61)
+       movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
+       movl r17=(5<<61)
+       movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
+       movl r19=(6<<61)
+       movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
+       movl r21=(7<<61)
+       ;;
+       mov rr[r7]=r6
+       mov rr[r9]=r8
+       mov rr[r11]=r10
+       mov rr[r13]=r12
+       mov rr[r15]=r14
+       mov rr[r17]=r16
+       mov rr[r19]=r18
+       mov rr[r21]=r20
+       ;;
+       /*
+        * Now pin mappings into the TLB for kernel text and data
+        */
+       mov r18=KERNEL_TR_PAGE_SHIFT<<2
+       movl r17=KERNEL_START
+       ;;
+       mov cr.itir=r18
+       mov cr.ifa=r17
+       mov r16=IA64_TR_KERNEL
+       mov r3=ip
+       movl r18=PAGE_KERNEL
+       ;;
+       dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
+       ;;
+       or r18=r2,r18
+       ;;
+       srlz.i
+       ;;
+       itr.i itr[r16]=r18
+       ;;
+       itr.d dtr[r16]=r18
+       ;;
+       srlz.i
+
+       /*
+        * Switch into virtual mode:
+        */
+#ifdef CONFIG_VTI
+       movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
+                 |IA64_PSR_DI)
+#else // CONFIG_VTI
+       movl 
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
+                 |IA64_PSR_DI)
+#endif // CONFIG_VTI
+       ;;
+       mov cr.ipsr=r16
+       movl r17=1f
+       ;;
+       mov cr.iip=r17
+       mov cr.ifs=r0
+       ;;
+       rfi
+       ;;
+1:     // now we are in virtual mode
+
+       // set IVT entry point---can't access I/O ports without it
+#ifdef CONFIG_VTI
+    movl r3=vmx_ia64_ivt
+#else // CONFIG_VTI
+       movl r3=ia64_ivt
+#endif // CONFIG_VTI
+       ;;
+       mov cr.iva=r3
+       movl r2=FPSR_DEFAULT
+       ;;
+       srlz.i
+       movl gp=__gp
+
+       mov ar.fpsr=r2
+       ;;
+
+#define isAP   p2      // are we an Application Processor?
+#define isBP   p3      // are we the Bootstrap Processor?
+
+#ifdef CONFIG_SMP
+       /*
+        * Find the init_task for the currently booting CPU.  At poweron, and in
+        * UP mode, task_for_booting_cpu is NULL.
+        */
+       movl r3=task_for_booting_cpu
+       ;;
+       ld8 r3=[r3]
+       movl r2=init_task
+       ;;
+       cmp.eq isBP,isAP=r3,r0
+       ;;
+(isAP) mov r2=r3
+#else
+       movl r2=init_task
+       cmp.eq isBP,isAP=r0,r0
+#endif
+       ;;
+       tpa r3=r2               // r3 == phys addr of task struct
+       mov r16=-1
+(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to 
map it
+
+       // load mapping for stack (virtaddr in r2, physaddr in r3)
+       rsm psr.ic
+       movl r17=PAGE_KERNEL
+       ;;
+       srlz.d
+       dep r18=0,r3,0,12
+       ;;
+       or r18=r17,r18
+#ifdef XEN
+       dep r2=-1,r3,60,4       // IMVA of task
+#else
+       dep r2=-1,r3,61,3       // IMVA of task
+#endif
+       ;;
+       mov r17=rr[r2]
+       shr.u r16=r3,IA64_GRANULE_SHIFT
+       ;;
+       dep r17=0,r17,8,24
+       ;;
+       mov cr.itir=r17
+       mov cr.ifa=r2
+
+       mov r19=IA64_TR_CURRENT_STACK
+       ;;
+       itr.d dtr[r19]=r18
+       ;;
+       ssm psr.ic
+       srlz.d
+       ;;
+
+.load_current:
+       // load the "current" pointer (r13) and ar.k6 with the current task
+#ifdef CONFIG_VTI
+       mov r21=r2              // virtual address
+       ;;
+       bsw.1
+       ;;
+#else // CONFIG_VTI
+       mov IA64_KR(CURRENT)=r2         // virtual address
+       mov IA64_KR(CURRENT_STACK)=r16
+#endif // CONFIG_VTI
+       mov r13=r2
+       /*
+        * Reserve space at the top of the stack for "struct pt_regs".  Kernel 
threads
+        * don't store interesting values in that structure, but the space 
still needs
+        * to be there because time-critical stuff such as the context 
switching can
+        * be implemented more efficiently (for example, __switch_to()
+        * always sets the psr.dfh bit of the task it is switching to).
+        */
+       addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
+       addl r2=IA64_RBS_OFFSET,r2      // initialize the RSE
+       mov ar.rsc=0            // place RSE in enforced lazy mode
+       ;;
+       loadrs                  // clear the dirty partition
+       ;;
+       mov ar.bspstore=r2      // establish the new RSE stack
+       ;;
+       mov ar.rsc=0x3          // place RSE in eager mode
+
+#ifdef XEN
+(isBP) dep r28=-1,r28,60,4     // make address virtual
+#else
+(isBP) dep r28=-1,r28,61,3     // make address virtual
+#endif
+(isBP) movl r2=ia64_boot_param
+       ;;
+(isBP) st8 [r2]=r28            // save the address of the boot param area 
passed by the bootloader
+
+#ifdef CONFIG_SMP
+(isAP) br.call.sptk.many rp=start_secondary
+.ret0:
+(isAP) br.cond.sptk self
+#endif
+
+       // This is executed by the bootstrap processor (bsp) only:
+
+#ifdef CONFIG_IA64_FW_EMU
+       // initialize PAL & SAL emulator:
+       br.call.sptk.many rp=sys_fw_init
+.ret1:
+#endif
+       br.call.sptk.many rp=start_kernel
+.ret2: addl r3=@ltoff(halt_msg),gp
+       ;;
+       alloc r2=ar.pfs,8,0,2,0
+       ;;
+       ld8 out0=[r3]
+       br.call.sptk.many b0=console_print
+
+self:  hint @pause
+       ;;
+       br.sptk.many self               // endless loop
+       ;;
+END(_start)
+
+GLOBAL_ENTRY(ia64_save_debug_regs)
+       alloc r16=ar.pfs,1,0,0,0
+       mov r20=ar.lc                   // preserve ar.lc
+       mov ar.lc=IA64_NUM_DBG_REGS-1
+       mov r18=0
+       add r19=IA64_NUM_DBG_REGS*8,in0
+       ;;
+1:     mov r16=dbr[r18]
+#ifdef CONFIG_ITANIUM
+       ;;
+       srlz.d
+#endif
+       mov r17=ibr[r18]
+       add r18=1,r18
+       ;;
+       st8.nta [in0]=r16,8
+       st8.nta [r19]=r17,8
+       br.cloop.sptk.many 1b
+       ;;
+       mov ar.lc=r20                   // restore ar.lc
+       br.ret.sptk.many rp
+END(ia64_save_debug_regs)
+
+GLOBAL_ENTRY(ia64_load_debug_regs)
+       alloc r16=ar.pfs,1,0,0,0
+       lfetch.nta [in0]
+       mov r20=ar.lc                   // preserve ar.lc
+       add r19=IA64_NUM_DBG_REGS*8,in0
+       mov ar.lc=IA64_NUM_DBG_REGS-1
+       mov r18=-1
+       ;;
+1:     ld8.nta r16=[in0],8
+       ld8.nta r17=[r19],8
+       add r18=1,r18
+       ;;
+       mov dbr[r18]=r16
+#ifdef CONFIG_ITANIUM
+       ;;
+       srlz.d                          // Errata 132 (NoFix status)
+#endif
+       mov ibr[r18]=r17
+       br.cloop.sptk.many 1b
+       ;;
+       mov ar.lc=r20                   // restore ar.lc
+       br.ret.sptk.many rp
+END(ia64_load_debug_regs)
+
+GLOBAL_ENTRY(__ia64_save_fpu)
+       alloc r2=ar.pfs,1,4,0,0
+       adds loc0=96*16-16,in0
+       adds loc1=96*16-16-128,in0
+       ;;
+       stf.spill.nta [loc0]=f127,-256
+       stf.spill.nta [loc1]=f119,-256
+       ;;
+       stf.spill.nta [loc0]=f111,-256
+       stf.spill.nta [loc1]=f103,-256
+       ;;
+       stf.spill.nta [loc0]=f95,-256
+       stf.spill.nta [loc1]=f87,-256
+       ;;
+       stf.spill.nta [loc0]=f79,-256
+       stf.spill.nta [loc1]=f71,-256
+       ;;
+       stf.spill.nta [loc0]=f63,-256
+       stf.spill.nta [loc1]=f55,-256
+       adds loc2=96*16-32,in0
+       ;;
+       stf.spill.nta [loc0]=f47,-256
+       stf.spill.nta [loc1]=f39,-256
+       adds loc3=96*16-32-128,in0
+       ;;
+       stf.spill.nta [loc2]=f126,-256
+       stf.spill.nta [loc3]=f118,-256
+       ;;
+       stf.spill.nta [loc2]=f110,-256
+       stf.spill.nta [loc3]=f102,-256
+       ;;
+       stf.spill.nta [loc2]=f94,-256
+       stf.spill.nta [loc3]=f86,-256
+       ;;
+       stf.spill.nta [loc2]=f78,-256
+       stf.spill.nta [loc3]=f70,-256
+       ;;
+       stf.spill.nta [loc2]=f62,-256
+       stf.spill.nta [loc3]=f54,-256
+       adds loc0=96*16-48,in0
+       ;;
+       stf.spill.nta [loc2]=f46,-256
+       stf.spill.nta [loc3]=f38,-256
+       adds loc1=96*16-48-128,in0
+       ;;
+       stf.spill.nta [loc0]=f125,-256
+       stf.spill.nta [loc1]=f117,-256
+       ;;
+       stf.spill.nta [loc0]=f109,-256
+       stf.spill.nta [loc1]=f101,-256
+       ;;
+       stf.spill.nta [loc0]=f93,-256
+       stf.spill.nta [loc1]=f85,-256
+       ;;
+       stf.spill.nta [loc0]=f77,-256
+       stf.spill.nta [loc1]=f69,-256
+       ;;
+       stf.spill.nta [loc0]=f61,-256
+       stf.spill.nta [loc1]=f53,-256
+       adds loc2=96*16-64,in0
+       ;;
+       stf.spill.nta [loc0]=f45,-256
+       stf.spill.nta [loc1]=f37,-256
+       adds loc3=96*16-64-128,in0
+       ;;
+       stf.spill.nta [loc2]=f124,-256
+       stf.spill.nta [loc3]=f116,-256
+       ;;
+       stf.spill.nta [loc2]=f108,-256
+       stf.spill.nta [loc3]=f100,-256
+       ;;
+       stf.spill.nta [loc2]=f92,-256
+       stf.spill.nta [loc3]=f84,-256
+       ;;
+       stf.spill.nta [loc2]=f76,-256
+       stf.spill.nta [loc3]=f68,-256
+       ;;
+       stf.spill.nta [loc2]=f60,-256
+       stf.spill.nta [loc3]=f52,-256
+       adds loc0=96*16-80,in0
+       ;;
+       stf.spill.nta [loc2]=f44,-256
+       stf.spill.nta [loc3]=f36,-256
+       adds loc1=96*16-80-128,in0
+       ;;
+       stf.spill.nta [loc0]=f123,-256
+       stf.spill.nta [loc1]=f115,-256
+       ;;
+       stf.spill.nta [loc0]=f107,-256
+       stf.spill.nta [loc1]=f99,-256
+       ;;
+       stf.spill.nta [loc0]=f91,-256
+       stf.spill.nta [loc1]=f83,-256
+       ;;
+       stf.spill.nta [loc0]=f75,-256
+       stf.spill.nta [loc1]=f67,-256
+       ;;
+       stf.spill.nta [loc0]=f59,-256
+       stf.spill.nta [loc1]=f51,-256
+       adds loc2=96*16-96,in0
+       ;;
+       stf.spill.nta [loc0]=f43,-256
+       stf.spill.nta [loc1]=f35,-256
+       adds loc3=96*16-96-128,in0
+       ;;
+       stf.spill.nta [loc2]=f122,-256
+       stf.spill.nta [loc3]=f114,-256
+       ;;
+       stf.spill.nta [loc2]=f106,-256
+       stf.spill.nta [loc3]=f98,-256
+       ;;
+       stf.spill.nta [loc2]=f90,-256
+       stf.spill.nta [loc3]=f82,-256
+       ;;
+       stf.spill.nta [loc2]=f74,-256
+       stf.spill.nta [loc3]=f66,-256
+       ;;
+       stf.spill.nta [loc2]=f58,-256
+       stf.spill.nta [loc3]=f50,-256
+       adds loc0=96*16-112,in0
+       ;;
+       stf.spill.nta [loc2]=f42,-256
+       stf.spill.nta [loc3]=f34,-256
+       adds loc1=96*16-112-128,in0
+       ;;
+       stf.spill.nta [loc0]=f121,-256
+       stf.spill.nta [loc1]=f113,-256
+       ;;
+       stf.spill.nta [loc0]=f105,-256
+       stf.spill.nta [loc1]=f97,-256
+       ;;
+       stf.spill.nta [loc0]=f89,-256
+       stf.spill.nta [loc1]=f81,-256
+       ;;
+       stf.spill.nta [loc0]=f73,-256
+       stf.spill.nta [loc1]=f65,-256
+       ;;
+       stf.spill.nta [loc0]=f57,-256
+       stf.spill.nta [loc1]=f49,-256
+       adds loc2=96*16-128,in0
+       ;;
+       stf.spill.nta [loc0]=f41,-256
+       stf.spill.nta [loc1]=f33,-256
+       adds loc3=96*16-128-128,in0
+       ;;
+       stf.spill.nta [loc2]=f120,-256
+       stf.spill.nta [loc3]=f112,-256
+       ;;
+       stf.spill.nta [loc2]=f104,-256
+       stf.spill.nta [loc3]=f96,-256
+       ;;
+       stf.spill.nta [loc2]=f88,-256
+       stf.spill.nta [loc3]=f80,-256
+       ;;
+       stf.spill.nta [loc2]=f72,-256
+       stf.spill.nta [loc3]=f64,-256
+       ;;
+       stf.spill.nta [loc2]=f56,-256
+       stf.spill.nta [loc3]=f48,-256
+       ;;
+       stf.spill.nta [loc2]=f40
+       stf.spill.nta [loc3]=f32
+       br.ret.sptk.many rp
+END(__ia64_save_fpu)
+
+GLOBAL_ENTRY(__ia64_load_fpu)
+       alloc r2=ar.pfs,1,2,0,0
+       adds r3=128,in0
+       adds r14=256,in0
+       adds r15=384,in0
+       mov loc0=512
+       mov loc1=-1024+16
+       ;;
+       ldf.fill.nta f32=[in0],loc0
+       ldf.fill.nta f40=[ r3],loc0
+       ldf.fill.nta f48=[r14],loc0
+       ldf.fill.nta f56=[r15],loc0
+       ;;
+       ldf.fill.nta f64=[in0],loc0
+       ldf.fill.nta f72=[ r3],loc0
+       ldf.fill.nta f80=[r14],loc0
+       ldf.fill.nta f88=[r15],loc0
+       ;;
+       ldf.fill.nta f96=[in0],loc1
+       ldf.fill.nta f104=[ r3],loc1
+       ldf.fill.nta f112=[r14],loc1
+       ldf.fill.nta f120=[r15],loc1
+       ;;
+       ldf.fill.nta f33=[in0],loc0
+       ldf.fill.nta f41=[ r3],loc0
+       ldf.fill.nta f49=[r14],loc0
+       ldf.fill.nta f57=[r15],loc0
+       ;;
+       ldf.fill.nta f65=[in0],loc0
+       ldf.fill.nta f73=[ r3],loc0
+       ldf.fill.nta f81=[r14],loc0
+       ldf.fill.nta f89=[r15],loc0
+       ;;
+       ldf.fill.nta f97=[in0],loc1
+       ldf.fill.nta f105=[ r3],loc1
+       ldf.fill.nta f113=[r14],loc1
+       ldf.fill.nta f121=[r15],loc1
+       ;;
+       ldf.fill.nta f34=[in0],loc0
+       ldf.fill.nta f42=[ r3],loc0
+       ldf.fill.nta f50=[r14],loc0
+       ldf.fill.nta f58=[r15],loc0
+       ;;
+       ldf.fill.nta f66=[in0],loc0
+       ldf.fill.nta f74=[ r3],loc0
+       ldf.fill.nta f82=[r14],loc0
+       ldf.fill.nta f90=[r15],loc0
+       ;;
+       ldf.fill.nta f98=[in0],loc1
+       ldf.fill.nta f106=[ r3],loc1
+       ldf.fill.nta f114=[r14],loc1
+       ldf.fill.nta f122=[r15],loc1
+       ;;
+       ldf.fill.nta f35=[in0],loc0
+       ldf.fill.nta f43=[ r3],loc0
+       ldf.fill.nta f51=[r14],loc0
+       ldf.fill.nta f59=[r15],loc0
+       ;;
+       ldf.fill.nta f67=[in0],loc0
+       ldf.fill.nta f75=[ r3],loc0
+       ldf.fill.nta f83=[r14],loc0
+       ldf.fill.nta f91=[r15],loc0
+       ;;
+       ldf.fill.nta f99=[in0],loc1
+       ldf.fill.nta f107=[ r3],loc1
+       ldf.fill.nta f115=[r14],loc1
+       ldf.fill.nta f123=[r15],loc1
+       ;;
+       ldf.fill.nta f36=[in0],loc0
+       ldf.fill.nta f44=[ r3],loc0
+       ldf.fill.nta f52=[r14],loc0
+       ldf.fill.nta f60=[r15],loc0
+       ;;
+       ldf.fill.nta f68=[in0],loc0
+       ldf.fill.nta f76=[ r3],loc0
+       ldf.fill.nta f84=[r14],loc0
+       ldf.fill.nta f92=[r15],loc0
+       ;;
+       ldf.fill.nta f100=[in0],loc1
+       ldf.fill.nta f108=[ r3],loc1
+       ldf.fill.nta f116=[r14],loc1
+       ldf.fill.nta f124=[r15],loc1
+       ;;
+       ldf.fill.nta f37=[in0],loc0
+       ldf.fill.nta f45=[ r3],loc0
+       ldf.fill.nta f53=[r14],loc0
+       ldf.fill.nta f61=[r15],loc0
+       ;;
+       ldf.fill.nta f69=[in0],loc0
+       ldf.fill.nta f77=[ r3],loc0
+       ldf.fill.nta f85=[r14],loc0
+       ldf.fill.nta f93=[r15],loc0
+       ;;
+       ldf.fill.nta f101=[in0],loc1
+       ldf.fill.nta f109=[ r3],loc1
+       ldf.fill.nta f117=[r14],loc1
+       ldf.fill.nta f125=[r15],loc1
+       ;;
+       ldf.fill.nta f38 =[in0],loc0
+       ldf.fill.nta f46 =[ r3],loc0
+       ldf.fill.nta f54 =[r14],loc0
+       ldf.fill.nta f62 =[r15],loc0
+       ;;
+       ldf.fill.nta f70 =[in0],loc0
+       ldf.fill.nta f78 =[ r3],loc0
+       ldf.fill.nta f86 =[r14],loc0
+       ldf.fill.nta f94 =[r15],loc0
+       ;;
+       ldf.fill.nta f102=[in0],loc1
+       ldf.fill.nta f110=[ r3],loc1
+       ldf.fill.nta f118=[r14],loc1
+       ldf.fill.nta f126=[r15],loc1
+       ;;
+       ldf.fill.nta f39 =[in0],loc0
+       ldf.fill.nta f47 =[ r3],loc0
+       ldf.fill.nta f55 =[r14],loc0
+       ldf.fill.nta f63 =[r15],loc0
+       ;;
+       ldf.fill.nta f71 =[in0],loc0
+       ldf.fill.nta f79 =[ r3],loc0
+       ldf.fill.nta f87 =[r14],loc0
+       ldf.fill.nta f95 =[r15],loc0
+       ;;
+       ldf.fill.nta f103=[in0]
+       ldf.fill.nta f111=[ r3]
+       ldf.fill.nta f119=[r14]
+       ldf.fill.nta f127=[r15]
+       br.ret.sptk.many rp
+END(__ia64_load_fpu)
+
+GLOBAL_ENTRY(__ia64_init_fpu)
+       stf.spill [sp]=f0               // M3
+       mov      f32=f0                 // F
+       nop.b    0
+
+       ldfps    f33,f34=[sp]           // M0
+       ldfps    f35,f36=[sp]           // M1
+       mov      f37=f0                 // F
+       ;;
+
+       setf.s   f38=r0                 // M2
+       setf.s   f39=r0                 // M3
+       mov      f40=f0                 // F
+
+       ldfps    f41,f42=[sp]           // M0
+       ldfps    f43,f44=[sp]           // M1
+       mov      f45=f0                 // F
+
+       setf.s   f46=r0                 // M2
+       setf.s   f47=r0                 // M3
+       mov      f48=f0                 // F
+
+       ldfps    f49,f50=[sp]           // M0
+       ldfps    f51,f52=[sp]           // M1
+       mov      f53=f0                 // F
+
+       setf.s   f54=r0                 // M2
+       setf.s   f55=r0                 // M3
+       mov      f56=f0                 // F
+
+       ldfps    f57,f58=[sp]           // M0
+       ldfps    f59,f60=[sp]           // M1
+       mov      f61=f0                 // F
+
+       setf.s   f62=r0                 // M2
+       setf.s   f63=r0                 // M3
+       mov      f64=f0                 // F
+
+       ldfps    f65,f66=[sp]           // M0
+       ldfps    f67,f68=[sp]           // M1
+       mov      f69=f0                 // F
+
+       setf.s   f70=r0                 // M2
+       setf.s   f71=r0                 // M3
+       mov      f72=f0                 // F
+
+       ldfps    f73,f74=[sp]           // M0
+       ldfps    f75,f76=[sp]           // M1
+       mov      f77=f0                 // F
+
+       setf.s   f78=r0                 // M2
+       setf.s   f79=r0                 // M3
+       mov      f80=f0                 // F
+
+       ldfps    f81,f82=[sp]           // M0
+       ldfps    f83,f84=[sp]           // M1
+       mov      f85=f0                 // F
+
+       setf.s   f86=r0                 // M2
+       setf.s   f87=r0                 // M3
+       mov      f88=f0                 // F
+
+       /*
+        * When the instructions are cached, it would be faster to initialize
+        * the remaining registers with simply mov instructions (F-unit).
+        * This gets the time down to ~29 cycles.  However, this would use up
+        * 33 bundles, whereas continuing with the above pattern yields
+        * 10 bundles and ~30 cycles.
+        */
+
+       ldfps    f89,f90=[sp]           // M0
+       ldfps    f91,f92=[sp]           // M1
+       mov      f93=f0                 // F
+
+       setf.s   f94=r0                 // M2
+       setf.s   f95=r0                 // M3
+       mov      f96=f0                 // F
+
+       ldfps    f97,f98=[sp]           // M0
+       ldfps    f99,f100=[sp]          // M1
+       mov      f101=f0                // F
+
+       setf.s   f102=r0                // M2
+       setf.s   f103=r0                // M3
+       mov      f104=f0                // F
+
+       ldfps    f105,f106=[sp]         // M0
+       ldfps    f107,f108=[sp]         // M1
+       mov      f109=f0                // F
+
+       setf.s   f110=r0                // M2
+       setf.s   f111=r0                // M3
+       mov      f112=f0                // F
+
+       ldfps    f113,f114=[sp]         // M0
+       ldfps    f115,f116=[sp]         // M1
+       mov      f117=f0                // F
+
+       setf.s   f118=r0                // M2
+       setf.s   f119=r0                // M3
+       mov      f120=f0                // F
+
+       ldfps    f121,f122=[sp]         // M0
+       ldfps    f123,f124=[sp]         // M1
+       mov      f125=f0                // F
+
+       setf.s   f126=r0                // M2
+       setf.s   f127=r0                // M3
+       br.ret.sptk.many rp             // F
+END(__ia64_init_fpu)
+
+/*
+ * Switch execution mode from virtual to physical
+ *
+ * Inputs:
+ *     r16 = new psr to establish
+ * Output:
+ *     r19 = old virtual address of ar.bsp
+ *     r20 = old virtual address of sp
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode_phys)
+ {
+       alloc r2=ar.pfs,0,0,0,0
+       rsm psr.i | psr.ic              // disable interrupts and interrupt 
collection
+       mov r15=ip
+ }
+       ;;
+ {
+       flushrs                         // must be first insn in group
+       srlz.i
+ }
+       ;;
+       mov cr.ipsr=r16                 // set new PSR
+       add r3=1f-ia64_switch_mode_phys,r15
+
+       mov r19=ar.bsp
+       mov r20=sp
+       mov r14=rp                      // get return address into a general 
register
+       ;;
+
+       // going to physical mode, use tpa to translate virt->phys
+       tpa r17=r19
+       tpa r3=r3
+       tpa sp=sp
+       tpa r14=r14
+       ;;
+
+       mov r18=ar.rnat                 // save ar.rnat
+       mov ar.bspstore=r17             // this steps on ar.rnat
+       mov cr.iip=r3
+       mov cr.ifs=r0
+       ;;
+       mov ar.rnat=r18                 // restore ar.rnat
+       rfi                             // must be last insn in group
+       ;;
+1:     mov rp=r14
+       br.ret.sptk.many rp
+END(ia64_switch_mode_phys)
+
+/*
+ * Switch execution mode from physical to virtual
+ *
+ * Inputs:
+ *     r16 = new psr to establish
+ *     r19 = new bspstore to establish
+ *     r20 = new sp to establish
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode_virt)
+ {
+       alloc r2=ar.pfs,0,0,0,0
+       rsm psr.i | psr.ic              // disable interrupts and interrupt 
collection
+       mov r15=ip
+ }
+       ;;
+ {
+       flushrs                         // must be first insn in group
+       srlz.i
+ }
+       ;;
+       mov cr.ipsr=r16                 // set new PSR
+       add r3=1f-ia64_switch_mode_virt,r15
+
+       mov r14=rp                      // get return address into a general 
register
+       ;;
+
+       // going to virtual
+       //   - for code addresses, set upper bits of addr to KERNEL_START
+       //   - for stack addresses, copy from input argument
+       movl r18=KERNEL_START
+       dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+       dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+       mov sp=r20
+       ;;
+       or r3=r3,r18
+       or r14=r14,r18
+       ;;
+
+       mov r18=ar.rnat                 // save ar.rnat
+       mov ar.bspstore=r19             // this steps on ar.rnat
+       mov cr.iip=r3
+       mov cr.ifs=r0
+       ;;
+       mov ar.rnat=r18                 // restore ar.rnat
+       rfi                             // must be last insn in group
+       ;;
+1:     mov rp=r14
+       br.ret.sptk.many rp
+END(ia64_switch_mode_virt)
+
+GLOBAL_ENTRY(ia64_delay_loop)
+       .prologue
+{      nop 0                   // work around GAS unwind info generation bug...
+       .save ar.lc,r2
+       mov r2=ar.lc
+       .body
+       ;;
+       mov ar.lc=r32
+}
+       ;;
+       // force loop to be 32-byte aligned (GAS bug means we cannot use .align
+       // inside function body without corrupting unwind info).
+{      nop 0 }
+1:     br.cloop.sptk.few 1b
+       ;;
+       mov ar.lc=r2
+       br.ret.sptk.many rp
+END(ia64_delay_loop)
+
+/*
+ * Return a CPU-local timestamp in nano-seconds.  This timestamp is
+ * NOT synchronized across CPUs its return value must never be
+ * compared against the values returned on another CPU.  The usage in
+ * kernel/sched.c ensures that.
+ *
+ * The return-value of sched_clock() is NOT supposed to wrap-around.
+ * If it did, it would cause some scheduling hiccups (at the worst).
+ * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
+ * that would happen only once every 5+ years.
+ *
+ * The code below basically calculates:
+ *
+ *   (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
+ *
+ * except that the multiplication and the shift are done with 128-bit
+ * intermediate precision so that we can produce a full 64-bit result.
+ */
+GLOBAL_ENTRY(sched_clock)
+#ifdef XEN
+       movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
+#else
+       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+#endif
+       mov.m r9=ar.itc         // fetch cycle-counter                          
(35 cyc)
+       ;;
+       ldf8 f8=[r8]
+       ;;
+       setf.sig f9=r9          // certain to stall, so issue it _after_ ldf8...
+       ;;
+       xmpy.lu f10=f9,f8       // calculate low 64 bits of 128-bit product     
(4 cyc)
+       xmpy.hu f11=f9,f8       // calculate high 64 bits of 128-bit product
+       ;;
+       getf.sig r8=f10         //                                              
(5 cyc)
+       getf.sig r9=f11
+       ;;
+       shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
+       br.ret.sptk.many rp
+END(sched_clock)
+
+GLOBAL_ENTRY(start_kernel_thread)
+       .prologue
+       .save rp, r0                            // this is the end of the 
call-chain
+       .body
+       alloc r2 = ar.pfs, 0, 0, 2, 0
+       mov out0 = r9
+       mov out1 = r11;;
+       br.call.sptk.many rp = kernel_thread_helper;;
+       mov out0 = r8
+       br.call.sptk.many rp = sys_exit;;
+1:     br.sptk.few 1b                          // not reached
+END(start_kernel_thread)
+
+#ifdef CONFIG_IA64_BRL_EMU
+
+/*
+ *  Assembly routines used by brl_emu.c to set preserved register state.
+ */
+
+#define SET_REG(reg)                           \
+ GLOBAL_ENTRY(ia64_set_##reg);                 \
+       alloc r16=ar.pfs,1,0,0,0;               \
+       mov reg=r32;                            \
+       ;;                                      \
+       br.ret.sptk.many rp;                    \
+ END(ia64_set_##reg)
+
+SET_REG(b1);
+SET_REG(b2);
+SET_REG(b3);
+SET_REG(b4);
+SET_REG(b5);
+
+#endif /* CONFIG_IA64_BRL_EMU */
+
+#ifdef CONFIG_SMP
+       /*
+        * This routine handles spinlock contention.  It uses a non-standard 
calling
+        * convention to avoid converting leaf routines into interior routines. 
 Because
+        * of this special convention, there are several restrictions:
+        *
+        * - do not use gp relative variables, this code is called from the 
kernel
+        *   and from modules, r1 is undefined.
+        * - do not use stacked registers, the caller owns them.
+        * - do not use the scratch stack space, the caller owns it.
+        * - do not use any registers other than the ones listed below
+        *
+        * Inputs:
+        *   ar.pfs - saved CFM of caller
+        *   ar.ccv - 0 (and available for use)
+        *   r27    - flags from spin_lock_irqsave or 0.  Must be preserved.
+        *   r28    - available for use.
+        *   r29    - available for use.
+        *   r30    - available for use.
+        *   r31    - address of lock, available for use.
+        *   b6     - return address
+        *   p14    - available for use.
+        *   p15    - used to track flag status.
+        *
+        * If you patch this code to use more registers, do not forget to update
+        * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
+        */
+
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+
+GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
+       .prologue
+       .save ar.pfs, r0        // this code effectively has a zero frame size
+       .save rp, r28
+       .body
+       nop 0
+       tbit.nz p15,p0=r27,IA64_PSR_I_BIT
+       .restore sp             // pop existing prologue after next insn
+       mov b6 = r28
+       .prologue
+       .save ar.pfs, r0
+       .altrp b6
+       .body
+       ;;
+(p15)  ssm psr.i               // reenable interrupts if they were on
+                               // DavidM says that srlz.d is slow and is not 
required in this case
+.wait:
+       // exponential backoff, kdb, lockmeter etc. go in here
+       hint @pause
+       ld4 r30=[r31]           // don't use ld4.bias; if it's contended, we 
won't write the word
+       nop 0
+       ;;
+       cmp4.ne p14,p0=r30,r0
+(p14)  br.cond.sptk.few .wait
+(p15)  rsm psr.i               // disable interrupts if we reenabled them
+       br.cond.sptk.few b6     // lock is now free, try to acquire
+       .global ia64_spinlock_contention_pre3_4_end     // for kernprof
+ia64_spinlock_contention_pre3_4_end:
+END(ia64_spinlock_contention_pre3_4)
+
+#else
+
+GLOBAL_ENTRY(ia64_spinlock_contention)
+       .prologue
+       .altrp b6
+       .body
+       tbit.nz p15,p0=r27,IA64_PSR_I_BIT
+       ;;
+.wait:
+(p15)  ssm psr.i               // reenable interrupts if they were on
+                               // DavidM says that srlz.d is slow and is not 
required in this case
+.wait2:
+       // exponential backoff, kdb, lockmeter etc. go in here
+       hint @pause
+       ld4 r30=[r31]           // don't use ld4.bias; if it's contended, we 
won't write the word
+       ;;
+       cmp4.ne p14,p0=r30,r0
+       mov r30 = 1
+(p14)  br.cond.sptk.few .wait2
+(p15)  rsm psr.i               // disable interrupts if we reenabled them
+       ;;
+       cmpxchg4.acq r30=[r31], r30, ar.ccv
+       ;;
+       cmp4.ne p14,p0=r0,r30
+(p14)  br.cond.sptk.few .wait
+
+       br.ret.sptk.many b6     // lock is now taken
+END(ia64_spinlock_contention)
+
+#endif
+
+#endif /* CONFIG_SMP */
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/irq_ia64.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c        Wed Aug  3 00:25:11 2005
@@ -0,0 +1,381 @@
+/*
+ * linux/arch/ia64/kernel/irq.c
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ *  6/10/99: Updated to bring in sync with x86 version to facilitate
+ *          support for SMP and different interrupt controllers.
+ *
+ * 09/15/00 Goutham Rao <goutham.rao@xxxxxxxxx> Implemented pci_irq_to_vector
+ *                      PCI to vector allocation routine.
+ * 04/14/2004 Ashok Raj <ashok.raj@xxxxxxxxx>
+ *                                             Added CPU Hotplug handling for 
IPF.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/jiffies.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel_stat.h>
+#include <linux/slab.h>
+#include <linux/ptrace.h>
+#include <linux/random.h>      /* for rand_initialize_irq() */
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/threads.h>
+#include <linux/bitops.h>
+
+#include <asm/delay.h>
+#include <asm/intrinsics.h>
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+#include <asm/machvec.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_PERFMON
+# include <asm/perfmon.h>
+#endif
+
+#define IRQ_DEBUG      0
+
+/* default base addr of IPI table */
+void __iomem *ipi_base_addr = ((void __iomem *)
+                              (__IA64_UNCACHED_OFFSET | 
IA64_IPI_DEFAULT_BASE_ADDR));
+
+/*
+ * Legacy IRQ to IA-64 vector translation table.
+ */
+__u8 isa_irq_to_vector_map[16] = {
+       /* 8259 IRQ translation, first 16 entries */
+       0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
+       0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
+};
+EXPORT_SYMBOL(isa_irq_to_vector_map);
+
+static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
+
+int
+assign_irq_vector (int irq)
+{
+       int pos, vector;
+ again:
+       pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
+       vector = IA64_FIRST_DEVICE_VECTOR + pos;
+       if (vector > IA64_LAST_DEVICE_VECTOR)
+               /* XXX could look for sharable vectors instead of panic'ing... 
*/
+               panic("assign_irq_vector: out of interrupt vectors!");
+       if (test_and_set_bit(pos, ia64_vector_mask))
+               goto again;
+       return vector;
+}
+
+void
+free_irq_vector (int vector)
+{
+       int pos;
+
+       if (vector < IA64_FIRST_DEVICE_VECTOR || vector > 
IA64_LAST_DEVICE_VECTOR)
+               return;
+
+       pos = vector - IA64_FIRST_DEVICE_VECTOR;
+       if (!test_and_clear_bit(pos, ia64_vector_mask))
+               printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
+}
+
+#ifdef CONFIG_SMP
+#      define IS_RESCHEDULE(vec)       (vec == IA64_IPI_RESCHEDULE)
+#else
+#      define IS_RESCHEDULE(vec)       (0)
+#endif
+/*
+ * That's where the IVT branches when we get an external
+ * interrupt. This branches to the correct hardware IRQ handler via
+ * function ptr.
+ */
+void
+ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+{
+       unsigned long saved_tpr;
+
+#if IRQ_DEBUG
+#ifdef XEN
+       xen_debug_irq(vector, regs);
+#endif
+       {
+               unsigned long bsp, sp;
+
+               /*
+                * Note: if the interrupt happened while executing in
+                * the context switch routine (ia64_switch_to), we may
+                * get a spurious stack overflow here.  This is
+                * because the register and the memory stack are not
+                * switched atomically.
+                */
+               bsp = ia64_getreg(_IA64_REG_AR_BSP);
+               sp = ia64_getreg(_IA64_REG_SP);
+
+               if ((sp - bsp) < 1024) {
+                       static unsigned char count;
+                       static long last_time;
+
+                       if (jiffies - last_time > 5*HZ)
+                               count = 0;
+                       if (++count < 5) {
+                               last_time = jiffies;
+                               printk("ia64_handle_irq: DANGER: less than "
+                                      "1KB of free stack space!!\n"
+                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+                       }
+               }
+       }
+#endif /* IRQ_DEBUG */
+
+       /*
+        * Always set TPR to limit maximum interrupt nesting depth to
+        * 16 (without this, it would be ~240, which could easily lead
+        * to kernel stack overflows).
+        */
+       irq_enter();
+       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+       ia64_srlz_d();
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               if (!IS_RESCHEDULE(vector)) {
+                       ia64_setreg(_IA64_REG_CR_TPR, vector);
+                       ia64_srlz_d();
+
+#ifdef XEN
+                       if (!xen_do_IRQ(vector))
+#endif
+                       __do_IRQ(local_vector_to_irq(vector), regs);
+
+                       /*
+                        * Disable interrupts and send EOI:
+                        */
+                       local_irq_disable();
+                       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+               }
+               ia64_eoi();
+               vector = ia64_get_ivr();
+       }
+       /*
+        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
+        * handler needs to be able to wait for further keyboard interrupts, 
which can't
+        * come through until ia64_eoi() has been done.
+        */
+       irq_exit();
+}
+
+#ifdef  CONFIG_VTI
+#define vmx_irq_enter()                \
+       add_preempt_count(HARDIRQ_OFFSET);
+
+/* Now softirq will be checked when leaving hypervisor, or else
+ * scheduler irq will be executed too early.
+ */
+#define vmx_irq_exit(void)     \
+       sub_preempt_count(HARDIRQ_OFFSET);
+/*
+ * That's where the IVT branches when we get an external
+ * interrupt. This branches to the correct hardware IRQ handler via
+ * function ptr.
+ */
+void
+vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+{
+       unsigned long saved_tpr;
+       int     wake_dom0 = 0;
+
+
+#if IRQ_DEBUG
+       {
+               unsigned long bsp, sp;
+
+               /*
+                * Note: if the interrupt happened while executing in
+                * the context switch routine (ia64_switch_to), we may
+                * get a spurious stack overflow here.  This is
+                * because the register and the memory stack are not
+                * switched atomically.
+                */
+               bsp = ia64_getreg(_IA64_REG_AR_BSP);
+               sp = ia64_getreg(_IA64_REG_AR_SP);
+
+               if ((sp - bsp) < 1024) {
+                       static unsigned char count;
+                       static long last_time;
+
+                       if (jiffies - last_time > 5*HZ)
+                               count = 0;
+                       if (++count < 5) {
+                               last_time = jiffies;
+                               printk("ia64_handle_irq: DANGER: less than "
+                                      "1KB of free stack space!!\n"
+                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+                       }
+               }
+       }
+#endif /* IRQ_DEBUG */
+
+       /*
+        * Always set TPR to limit maximum interrupt nesting depth to
+        * 16 (without this, it would be ~240, which could easily lead
+        * to kernel stack overflows).
+        */
+       vmx_irq_enter();
+       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+       ia64_srlz_d();
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+           if (!IS_RESCHEDULE(vector)) {
+               ia64_setreg(_IA64_REG_CR_TPR, vector);
+               ia64_srlz_d();
+
+               if (vector != IA64_TIMER_VECTOR) {
+                       /* FIXME: Leave IRQ re-route later */
+                       vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
+                       wake_dom0 = 1;
+               }
+               else {  // FIXME: Handle Timer only now
+                       __do_IRQ(local_vector_to_irq(vector), regs);
+               }
+               
+               /*
+                * Disable interrupts and send EOI:
+                */
+               local_irq_disable();
+               ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+           }
+           else {
+                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
+            }
+           ia64_eoi();
+           vector = ia64_get_ivr();
+       }
+       /*
+        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
+        * handler needs to be able to wait for further keyboard interrupts, 
which can't
+        * come through until ia64_eoi() has been done.
+        */
+       vmx_irq_exit();
+       if ( wake_dom0 && current != dom0 ) 
+               domain_wake(dom0->vcpu[0]);
+}
+#endif
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * This function emulates a interrupt processing when a cpu is about to be
+ * brought down.
+ */
+void ia64_process_pending_intr(void)
+{
+       ia64_vector vector;
+       unsigned long saved_tpr;
+       extern unsigned int vectors_in_migration[NR_IRQS];
+
+       vector = ia64_get_ivr();
+
+        irq_enter();
+        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+        ia64_srlz_d();
+
+        /*
+         * Perform normal interrupt style processing
+         */
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               if (!IS_RESCHEDULE(vector)) {
+                       ia64_setreg(_IA64_REG_CR_TPR, vector);
+                       ia64_srlz_d();
+
+                       /*
+                        * Now try calling normal ia64_handle_irq as it would 
have got called
+                        * from a real intr handler. Try passing null for 
pt_regs, hopefully
+                        * it will work. I hope it works!.
+                        * Probably could shared code.
+                        */
+                       vectors_in_migration[local_vector_to_irq(vector)]=0;
+                       __do_IRQ(local_vector_to_irq(vector), NULL);
+
+                       /*
+                        * Disable interrupts and send EOI
+                        */
+                       local_irq_disable();
+                       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+               }
+               ia64_eoi();
+               vector = ia64_get_ivr();
+       }
+       irq_exit();
+}
+#endif
+
+
+#ifdef CONFIG_SMP
+extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
+
+static struct irqaction ipi_irqaction = {
+       .handler =      handle_IPI,
+       .flags =        SA_INTERRUPT,
+       .name =         "IPI"
+};
+#endif
+
+void
+register_percpu_irq (ia64_vector vec, struct irqaction *action)
+{
+       irq_desc_t *desc;
+       unsigned int irq;
+
+       for (irq = 0; irq < NR_IRQS; ++irq)
+               if (irq_to_vector(irq) == vec) {
+                       desc = irq_descp(irq);
+                       desc->status |= IRQ_PER_CPU;
+                       desc->handler = &irq_type_ia64_lsapic;
+                       if (action)
+                               setup_irq(irq, action);
+               }
+}
+
+void __init
+init_IRQ (void)
+{
+       register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
+#ifdef CONFIG_SMP
+       register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
+#endif
+#ifdef CONFIG_PERFMON
+       pfm_init_percpu();
+#endif
+       platform_irq_init();
+}
+
+void
+ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
+{
+       void __iomem *ipi_addr;
+       unsigned long ipi_data;
+       unsigned long phys_cpu_id;
+
+#ifdef CONFIG_SMP
+       phys_cpu_id = cpu_physical_id(cpu);
+#else
+       phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
+#endif
+
+       /*
+        * cpu number is in 8bit ID and 8bit EID
+        */
+
+       ipi_data = (delivery_mode << 8) | (vector & 0xff);
+       ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
+
+       writeq(ipi_data, ipi_addr);
+}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/mm_contig.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/mm_contig.c       Wed Aug  3 00:25:11 2005
@@ -0,0 +1,305 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 2000, Rohit Seth <rohit.seth@xxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Routines used by ia64 machines with contiguous (or virtually contiguous)
+ * memory.
+ */
+#include <linux/config.h>
+#include <linux/bootmem.h>
+#include <linux/efi.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/meminit.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/mca.h>
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+static unsigned long num_dma_physpages;
+#endif
+
+/**
+ * show_mem - display a memory statistics summary
+ *
+ * Just walks the pages in the system and describes where they're allocated.
+ */
+#ifndef XEN
+void
+show_mem (void)
+{
+       int i, total = 0, reserved = 0;
+       int shared = 0, cached = 0;
+
+       printk("Mem-info:\n");
+       show_free_areas();
+
+       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       i = max_mapnr;
+       while (i-- > 0) {
+               if (!pfn_valid(i))
+                       continue;
+               total++;
+               if (PageReserved(mem_map+i))
+                       reserved++;
+               else if (PageSwapCache(mem_map+i))
+                       cached++;
+               else if (page_count(mem_map + i))
+                       shared += page_count(mem_map + i) - 1;
+       }
+       printk("%d pages of RAM\n", total);
+       printk("%d reserved pages\n", reserved);
+       printk("%d pages shared\n", shared);
+       printk("%d pages swap cached\n", cached);
+       printk("%ld pages in page table cache\n", pgtable_cache_size);
+}
+#endif
+
+/* physical address where the bootmem map is located */
+unsigned long bootmap_start;
+
+/**
+ * find_max_pfn - adjust the maximum page number callback
+ * @start: start of range
+ * @end: end of range
+ * @arg: address of pointer to global max_pfn variable
+ *
+ * Passed as a callback function to efi_memmap_walk() to determine the highest
+ * available page frame number in the system.
+ */
+int
+find_max_pfn (unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long *max_pfnp = arg, pfn;
+
+       pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
+       if (pfn > *max_pfnp)
+               *max_pfnp = pfn;
+       return 0;
+}
+
+/**
+ * find_bootmap_location - callback to find a memory area for the bootmap
+ * @start: start of region
+ * @end: end of region
+ * @arg: unused callback data
+ *
+ * Find a place to put the bootmap and return its starting address in
+ * bootmap_start.  This address must be page-aligned.
+ */
+int
+find_bootmap_location (unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long needed = *(unsigned long *)arg;
+       unsigned long range_start, range_end, free_start;
+       int i;
+
+#if IGNORE_PFN0
+       if (start == PAGE_OFFSET) {
+               start += PAGE_SIZE;
+               if (start >= end)
+                       return 0;
+       }
+#endif
+
+       free_start = PAGE_OFFSET;
+
+       for (i = 0; i < num_rsvd_regions; i++) {
+               range_start = max(start, free_start);
+               range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
+
+               free_start = PAGE_ALIGN(rsvd_region[i].end);
+
+               if (range_end <= range_start)
+                       continue; /* skip over empty range */
+
+               if (range_end - range_start >= needed) {
+                       bootmap_start = __pa(range_start);
+                       return -1;      /* done */
+               }
+
+               /* nothing more available in this segment */
+               if (range_end == end)
+                       return 0;
+       }
+       return 0;
+}
+
+/**
+ * find_memory - setup memory map
+ *
+ * Walk the EFI memory map and find usable memory for the system, taking
+ * into account reserved areas.
+ */
+#ifndef XEN
+void
+find_memory (void)
+{
+       unsigned long bootmap_size;
+
+       reserve_memory();
+
+       /* first find highest page frame number */
+       max_pfn = 0;
+       efi_memmap_walk(find_max_pfn, &max_pfn);
+
+       /* how many bytes to cover all the pages */
+       bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
+
+       /* look for a location to hold the bootmap */
+       bootmap_start = ~0UL;
+       efi_memmap_walk(find_bootmap_location, &bootmap_size);
+       if (bootmap_start == ~0UL)
+               panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
+
+       bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+
+       /* Free all available memory, then mark bootmem-map as being in use. */
+       efi_memmap_walk(filter_rsvd_memory, free_bootmem);
+       reserve_bootmem(bootmap_start, bootmap_size);
+
+       find_initrd();
+}
+#endif
+
+#ifdef CONFIG_SMP
+/**
+ * per_cpu_init - setup per-cpu variables
+ *
+ * Allocate and setup per-cpu data areas.
+ */
+void *
+per_cpu_init (void)
+{
+       void *cpu_data;
+       int cpu;
+
+       /*
+        * get_free_pages() cannot be used before cpu_init() done.  BSP
+        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
+        * get_zeroed_page().
+        */
+       if (smp_processor_id() == 0) {
+               cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+                                          PERCPU_PAGE_SIZE, 
__pa(MAX_DMA_ADDRESS));
+               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - 
__per_cpu_start);
+                       __per_cpu_offset[cpu] = (char *) cpu_data - 
__per_cpu_start;
+                       cpu_data += PERCPU_PAGE_SIZE;
+                       per_cpu(local_per_cpu_offset, cpu) = 
__per_cpu_offset[cpu];
+               }
+       }
+       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+}
+#endif /* CONFIG_SMP */
+
+static int
+count_pages (u64 start, u64 end, void *arg)
+{
+       unsigned long *count = arg;
+
+       *count += (end - start) >> PAGE_SHIFT;
+       return 0;
+}
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+static int
+count_dma_pages (u64 start, u64 end, void *arg)
+{
+       unsigned long *count = arg;
+
+       if (start < MAX_DMA_ADDRESS)
+               *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
+       return 0;
+}
+#endif
+
+/*
+ * Set up the page tables.
+ */
+
+#ifndef XEN
+void
+paging_init (void)
+{
+       unsigned long max_dma;
+       unsigned long zones_size[MAX_NR_ZONES];
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+       unsigned long zholes_size[MAX_NR_ZONES];
+       unsigned long max_gap;
+#endif
+
+       /* initialize mem_map[] */
+
+       memset(zones_size, 0, sizeof(zones_size));
+
+       num_physpages = 0;
+       efi_memmap_walk(count_pages, &num_physpages);
+
+       max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+       memset(zholes_size, 0, sizeof(zholes_size));
+
+       num_dma_physpages = 0;
+       efi_memmap_walk(count_dma_pages, &num_dma_physpages);
+
+       if (max_low_pfn < max_dma) {
+               zones_size[ZONE_DMA] = max_low_pfn;
+               zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
+       } else {
+               zones_size[ZONE_DMA] = max_dma;
+               zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
+               if (num_physpages > num_dma_physpages) {
+                       zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+                       zholes_size[ZONE_NORMAL] =
+                               ((max_low_pfn - max_dma) -
+                                (num_physpages - num_dma_physpages));
+               }
+       }
+
+       max_gap = 0;
+       efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+       if (max_gap < LARGE_GAP) {
+               vmem_map = (struct page *) 0;
+               free_area_init_node(0, &contig_page_data, zones_size, 0,
+                                   zholes_size);
+       } else {
+               unsigned long map_size;
+
+               /* allocate virtual_mem_map */
+
+               map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+               vmalloc_end -= map_size;
+               vmem_map = (struct page *) vmalloc_end;
+               efi_memmap_walk(create_mem_map_page_table, NULL);
+
+               mem_map = contig_page_data.node_mem_map = vmem_map;
+               free_area_init_node(0, &contig_page_data, zones_size,
+                                   0, zholes_size);
+
+               printk("Virtual mem_map starts at 0x%p\n", mem_map);
+       }
+#else /* !CONFIG_VIRTUAL_MEM_MAP */
+       if (max_low_pfn < max_dma)
+               zones_size[ZONE_DMA] = max_low_pfn;
+       else {
+               zones_size[ZONE_DMA] = max_dma;
+               zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+       }
+       free_area_init(zones_size);
+#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+       zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
+}
+#endif /* !CONFIG_XEN */
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/pal.S
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/pal.S     Wed Aug  3 00:25:11 2005
@@ -0,0 +1,310 @@
+/*
+ * PAL Firmware support
+ * IA-64 Processor Programmers Reference Vol 2
+ *
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *
+ * 05/22/2000 eranian Added support for stacked register calls
+ * 05/24/2000 eranian Added support for physical mode static calls
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+       .data
+pal_entry_point:
+       data8 ia64_pal_default_handler
+       .text
+
+/*
+ * Set the PAL entry point address.  This could be written in C code, but we 
do it here
+ * to keep it all in one module (besides, it's so trivial that it's
+ * not a big deal).
+ *
+ * in0         Address of the PAL entry point (text address, NOT a function 
descriptor).
+ */
+GLOBAL_ENTRY(ia64_pal_handler_init)
+       alloc r3=ar.pfs,1,0,0,0
+       movl r2=pal_entry_point
+       ;;
+       st8 [r2]=in0
+       br.ret.sptk.many rp
+END(ia64_pal_handler_init)
+
+/*
+ * Default PAL call handler.  This needs to be coded in assembly because it 
uses
+ * the static calling convention, i.e., the RSE may not be used and calls are
+ * done via "br.cond" (not "br.call").
+ */
+GLOBAL_ENTRY(ia64_pal_default_handler)
+       mov r8=-1
+       br.cond.sptk.many rp
+END(ia64_pal_default_handler)
+
+/*
+ * Make a PAL call using the static calling convention.
+ *
+ * in0         Index of PAL service
+ * in1 - in3   Remaining PAL arguments
+ * in4        1 ==> clear psr.ic,  0 ==> don't clear psr.ic
+ *
+ */
+GLOBAL_ENTRY(ia64_pal_call_static)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+       alloc loc1 = ar.pfs,5,5,0,0
+       movl loc2 = pal_entry_point
+1:     {
+         mov r28 = in0
+         mov r29 = in1
+         mov r8 = ip
+       }
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       tbit.nz p6,p7 = in4, 0
+       adds r8 = 1f-1b,r8
+       mov loc4=ar.rsc                 // save RSE configuration
+       ;;
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov loc3 = psr
+       mov loc0 = rp
+       .body
+       mov r30 = in2
+
+(p6)   rsm psr.i | psr.ic
+       mov r31 = in3
+       mov b7 = loc2
+
+(p7)   rsm psr.i
+       ;;
+(p6)   srlz.i
+       mov rp = r8
+       br.cond.sptk.many b7
+1:     mov psr.l = loc3
+       mov ar.rsc = loc4               // restore RSE configuration
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       srlz.d                          // seralize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_static)
+
+/*
+ * Make a PAL call using the stacked registers calling convention.
+ *
+ * Inputs:
+ *     in0         Index of PAL service
+ *     in2 - in3   Remaning PAL arguments
+ */
+GLOBAL_ENTRY(ia64_pal_call_stacked)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
+       alloc loc1 = ar.pfs,4,4,4,0
+       movl loc2 = pal_entry_point
+
+       mov r28  = in0                  // Index MUST be copied to r28
+       mov out0 = in0                  // AND in0 of PAL function
+       mov loc0 = rp
+       .body
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       mov out1 = in1
+       mov out2 = in2
+       mov out3 = in3
+       mov loc3 = psr
+       ;;
+       rsm psr.i
+       mov b7 = loc2
+       ;;
+       br.call.sptk.many rp=b7         // now make the call
+.ret0: mov psr.l  = loc3
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       srlz.d                          // serialize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_stacked)
+
+/*
+ * Make a physical mode PAL call using the static registers calling convention.
+ *
+ * Inputs:
+ *     in0         Index of PAL service
+ *     in2 - in3   Remaning PAL arguments
+ *
+ * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
+ * So we don't need to clear them.
+ */
+#define PAL_PSR_BITS_TO_CLEAR                                                  
\
+       (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT  | IA64_PSR_DB | IA64_PSR_RT |  
\
+        IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |                
\
+        IA64_PSR_DFL | IA64_PSR_DFH)
+
+#define PAL_PSR_BITS_TO_SET                                                    
\
+       (IA64_PSR_BN)
+
+
+GLOBAL_ENTRY(ia64_pal_call_phys_static)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
+       alloc loc1 = ar.pfs,4,7,0,0
+       movl loc2 = pal_entry_point
+1:     {
+         mov r28  = in0                // copy procedure index
+         mov r8   = ip                 // save ip to compute branch
+         mov loc0 = rp                 // save rp
+       }
+       .body
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       mov r29  = in1                  // first argument
+       mov r30  = in2                  // copy arg2
+       mov r31  = in3                  // copy arg3
+       ;;
+       mov loc3 = psr                  // save psr
+       adds r8  = 1f-1b,r8             // calculate return address for call
+       ;;
+       mov loc4=ar.rsc                 // save RSE configuration
+#ifdef XEN
+       dep.z loc2=loc2,0,60            // convert pal entry point to physical
+#else // XEN
+       dep.z loc2=loc2,0,61            // convert pal entry point to physical
+#endif // XEN
+       tpa r8=r8                       // convert rp to physical
+       ;;
+       mov b7 = loc2                   // install target to branch reg
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       movl r16=PAL_PSR_BITS_TO_CLEAR
+       movl r17=PAL_PSR_BITS_TO_SET
+       ;;
+       or loc3=loc3,r17                // add in psr the bits to set
+       ;;
+       andcm r16=loc3,r16              // removes bits to clear from psr
+       br.call.sptk.many rp=ia64_switch_mode_phys
+.ret1: mov rp = r8                     // install return address (physical)
+       mov loc5 = r19
+       mov loc6 = r20
+       br.cond.sptk.many b7
+1:
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov r16=loc3                    // r16= original psr
+       mov r19=loc5
+       mov r20=loc6
+       br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
+.ret2:
+       mov psr.l = loc3                // restore init PSR
+
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       mov ar.rsc=loc4                 // restore RSE configuration
+       srlz.d                          // seralize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_phys_static)
+
+/*
+ * Make a PAL call using the stacked registers in physical mode.
+ *
+ * Inputs:
+ *     in0         Index of PAL service
+ *     in2 - in3   Remaning PAL arguments
+ */
+GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
+       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+       alloc   loc1 = ar.pfs,5,7,4,0
+       movl    loc2 = pal_entry_point
+1:     {
+         mov r28  = in0                // copy procedure index
+         mov loc0 = rp         // save rp
+       }
+       .body
+       ;;
+       ld8 loc2 = [loc2]               // loc2 <- entry point
+       mov out0 = in0          // first argument
+       mov out1 = in1          // copy arg2
+       mov out2 = in2          // copy arg3
+       mov out3 = in3          // copy arg3
+       ;;
+       mov loc3 = psr          // save psr
+       ;;
+       mov loc4=ar.rsc                 // save RSE configuration
+#ifdef XEN
+       dep.z loc2=loc2,0,60            // convert pal entry point to physical
+#else // XEN
+       dep.z loc2=loc2,0,61            // convert pal entry point to physical
+#endif // XEN
+       ;;
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       movl r16=PAL_PSR_BITS_TO_CLEAR
+       movl r17=PAL_PSR_BITS_TO_SET
+       ;;
+       or loc3=loc3,r17                // add in psr the bits to set
+       mov b7 = loc2                   // install target to branch reg
+       ;;
+       andcm r16=loc3,r16              // removes bits to clear from psr
+       br.call.sptk.many rp=ia64_switch_mode_phys
+.ret6:
+       mov loc5 = r19
+       mov loc6 = r20
+       br.call.sptk.many rp=b7         // now make the call
+.ret7:
+       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
+       mov r16=loc3                    // r16= original psr
+       mov r19=loc5
+       mov r20=loc6
+       br.call.sptk.many rp=ia64_switch_mode_virt      // return to virtual 
mode
+
+.ret8: mov psr.l  = loc3               // restore init PSR
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+       mov ar.rsc=loc4                 // restore RSE configuration
+       srlz.d                          // seralize restoration of psr.l
+       br.ret.sptk.many b0
+END(ia64_pal_call_phys_stacked)
+
+/*
+ * Save scratch fp scratch regs which aren't saved in pt_regs already 
(fp10-fp15).
+ *
+ * NOTE: We need to do this since firmware (SAL and PAL) may use any of the 
scratch
+ * regs fp-low partition.
+ *
+ * Inputs:
+ *      in0    Address of stack storage for fp regs
+ */
+GLOBAL_ENTRY(ia64_save_scratch_fpregs)
+       alloc r3=ar.pfs,1,0,0,0
+       add r2=16,in0
+       ;;
+       stf.spill [in0] = f10,32
+       stf.spill [r2]  = f11,32
+       ;;
+       stf.spill [in0] = f12,32
+       stf.spill [r2]  = f13,32
+       ;;
+       stf.spill [in0] = f14,32
+       stf.spill [r2]  = f15,32
+       br.ret.sptk.many rp
+END(ia64_save_scratch_fpregs)
+
+/*
+ * Load scratch fp scratch regs (fp10-fp15)
+ *
+ * Inputs:
+ *      in0    Address of stack storage for fp regs
+ */
+GLOBAL_ENTRY(ia64_load_scratch_fpregs)
+       alloc r3=ar.pfs,1,0,0,0
+       add r2=16,in0
+       ;;
+       ldf.fill  f10 = [in0],32
+       ldf.fill  f11 = [r2],32
+       ;;
+       ldf.fill  f12 = [in0],32
+       ldf.fill  f13 = [r2],32
+       ;;
+       ldf.fill  f14 = [in0],32
+       ldf.fill  f15 = [r2],32
+       br.ret.sptk.many rp
+END(ia64_load_scratch_fpregs)
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/setup.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/setup.c   Wed Aug  3 00:25:11 2005
@@ -0,0 +1,773 @@
+/*
+ * Architecture-specific setup.
+ *
+ * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 2000, Rohit Seth <rohit.seth@xxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ *
+ * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
+ * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
+ * 03/31/00 R.Seth     cpu_initialized and current->processor fixes
+ * 02/04/00 D.Mosberger        some more get_cpuinfo fixes...
+ * 02/01/00 R.Seth     fixed get_cpuinfo for SMP
+ * 01/07/99 S.Eranian  added the support for command line argument
+ * 06/24/99 W.Drummond added boot_cpu_data.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/efi.h>
+#include <linux/initrd.h>
+
+#include <asm/ia32.h>
+#include <asm/machvec.h>
+#include <asm/mca.h>
+#include <asm/meminit.h>
+#include <asm/page.h>
+#include <asm/patch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/sal.h>
+#include <asm/sections.h>
+#include <asm/serial.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+#ifdef CONFIG_VTI
+#include <asm/vmx.h>
+#endif // CONFIG_VTI
+#include <asm/io.h>
+
+#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
+# error "struct cpuinfo_ia64 too big!"
+#endif
+
+#ifdef CONFIG_SMP
+unsigned long __per_cpu_offset[NR_CPUS];
+EXPORT_SYMBOL(__per_cpu_offset);
+#endif
+
+DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
+DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
+unsigned long ia64_cycles_per_usec;
+struct ia64_boot_param *ia64_boot_param;
+struct screen_info screen_info;
+
+unsigned long ia64_max_cacheline_size;
+unsigned long ia64_iobase;     /* virtual address for I/O accesses */
+EXPORT_SYMBOL(ia64_iobase);
+struct io_space io_space[MAX_IO_SPACES];
+EXPORT_SYMBOL(io_space);
+unsigned int num_io_spaces;
+
+unsigned char aux_device_present = 0xaa;        /* XXX remove this when legacy 
I/O is gone */
+
+/*
+ * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 
1).  This
+ * mask specifies a mask of address bits that must be 0 in order for two 
buffers to be
+ * mergeable by the I/O MMU (i.e., the end address of the first buffer and the 
start
+ * address of the second buffer must be aligned to (merge_mask+1) in order to 
be
+ * mergeable).  By default, we assume there is no I/O MMU which can merge 
physically
+ * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds 
to a iommu
+ * page-size of 2^64.
+ */
+unsigned long ia64_max_iommu_merge_mask = ~0UL;
+EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
+
+/*
+ * We use a special marker for the end of memory and it uses the extra (+1) 
slot
+ */
+struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+int num_rsvd_regions;
+
+
+/*
+ * Filter incoming memory segments based on the primitive map created from the 
boot
+ * parameters. Segments contained in the map are removed from the memory 
ranges. A
+ * caller-specified function is called with the memory ranges that remain 
after filtering.
+ * This routine does not assume the incoming segments are sorted.
+ */
+int
+filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long range_start, range_end, prev_start;
+       void (*func)(unsigned long, unsigned long, int);
+       int i;
+
+#if IGNORE_PFN0
+       if (start == PAGE_OFFSET) {
+               printk(KERN_WARNING "warning: skipping physical page 0\n");
+               start += PAGE_SIZE;
+               if (start >= end) return 0;
+       }
+#endif
+       /*
+        * lowest possible address(walker uses virtual)
+        */
+       prev_start = PAGE_OFFSET;
+       func = arg;
+
+       for (i = 0; i < num_rsvd_regions; ++i) {
+               range_start = max(start, prev_start);
+               range_end   = min(end, rsvd_region[i].start);
+
+               if (range_start < range_end)
+#ifdef XEN
+               {
+               /* init_boot_pages requires "ps, pe" */
+                       printk("Init boot pages: 0x%lx -> 0x%lx.\n",
+                               __pa(range_start), __pa(range_end));
+                       (*func)(__pa(range_start), __pa(range_end), 0);
+               }
+#else
+                       call_pernode_memory(__pa(range_start), range_end - 
range_start, func);
+#endif
+
+               /* nothing more available in this segment */
+               if (range_end == end) return 0;
+
+               prev_start = rsvd_region[i].end;
+       }
+       /* end of memory marker allows full processing inside loop body */
+       return 0;
+}
+
+static void
+sort_regions (struct rsvd_region *rsvd_region, int max)
+{
+       int j;
+
+       /* simple bubble sorting */
+       while (max--) {
+               for (j = 0; j < max; ++j) {
+                       if (rsvd_region[j].start > rsvd_region[j+1].start) {
+                               struct rsvd_region tmp;
+                               tmp = rsvd_region[j];
+                               rsvd_region[j] = rsvd_region[j + 1];
+                               rsvd_region[j + 1] = tmp;
+                       }
+               }
+       }
+}
+
+/**
+ * reserve_memory - setup reserved memory areas
+ *
+ * Setup the reserved memory areas set aside for the boot parameters,
+ * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
+ * see include/asm-ia64/meminit.h if you need to define more.
+ */
+void
+reserve_memory (void)
+{
+       int n = 0;
+
+       /*
+        * none of the entries in this table overlap
+        */
+       rsvd_region[n].start = (unsigned long) ia64_boot_param;
+       rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
+       n++;
+
+       rsvd_region[n].start = (unsigned long) 
__va(ia64_boot_param->efi_memmap);
+       rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->efi_memmap_size;
+       n++;
+
+       rsvd_region[n].start = (unsigned long) 
__va(ia64_boot_param->command_line);
+       rsvd_region[n].end   = (rsvd_region[n].start
+                               + strlen(__va(ia64_boot_param->command_line)) + 
1);
+       n++;
+
+       rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+#ifdef XEN
+       /* Reserve xen image/bitmap/xen-heap */
+       rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
+#else
+       rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
+#endif
+       n++;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (ia64_boot_param->initrd_start) {
+               rsvd_region[n].start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
+               rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->initrd_size;
+               n++;
+       }
+#endif
+
+       /* end of memory marker */
+       rsvd_region[n].start = ~0UL;
+       rsvd_region[n].end   = ~0UL;
+       n++;
+
+       num_rsvd_regions = n;
+
+       sort_regions(rsvd_region, num_rsvd_regions);
+}
+
+/**
+ * find_initrd - get initrd parameters from the boot parameter structure
+ *
+ * Grab the initrd start and end from the boot parameter struct given us by
+ * the boot loader.
+ */
+void
+find_initrd (void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (ia64_boot_param->initrd_start) {
+               initrd_start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
+               initrd_end   = initrd_start+ia64_boot_param->initrd_size;
+
+               printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
+                      initrd_start, ia64_boot_param->initrd_size);
+       }
+#endif
+}
+
+static void __init
+io_port_init (void)
+{
+       extern unsigned long ia64_iobase;
+       unsigned long phys_iobase;
+
+       /*
+        *  Set `iobase' to the appropriate address in region 6 (uncached 
access range).
+        *
+        *  The EFI memory map is the "preferred" location to get the I/O port 
space base,
+        *  rather the relying on AR.KR0. This should become more clear in 
future SAL
+        *  specs. We'll fall back to getting it out of AR.KR0 if no 
appropriate entry is
+        *  found in the memory map.
+        */
+       phys_iobase = efi_get_iobase();
+       if (phys_iobase)
+               /* set AR.KR0 since this is all we use it for anyway */
+               ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
+       else {
+               phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
+               printk(KERN_INFO "No I/O port range found in EFI memory map, 
falling back "
+                      "to AR.KR0\n");
+               printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
+       }
+       ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
+
+       /* setup legacy IO port space */
+       io_space[0].mmio_base = ia64_iobase;
+       io_space[0].sparse = 1;
+       num_io_spaces = 1;
+}
+
+/**
+ * early_console_setup - setup debugging console
+ *
+ * Consoles started here require little enough setup that we can start using
+ * them very early in the boot process, either right after the machine
+ * vector initialization, or even before if the drivers can detect their hw.
+ *
+ * Returns non-zero if a console couldn't be setup.
+ */
+static inline int __init
+early_console_setup (char *cmdline)
+{
+#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
+       {
+               extern int sn_serial_console_early_setup(void);
+               if (!sn_serial_console_early_setup())
+                       return 0;
+       }
+#endif
+#ifdef CONFIG_EFI_PCDP
+       if (!efi_setup_pcdp_console(cmdline))
+               return 0;
+#endif
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+       if (!early_serial_console_init(cmdline))
+               return 0;
+#endif
+
+       return -1;
+}
+
+static inline void
+mark_bsp_online (void)
+{
+#ifdef CONFIG_SMP
+       /* If we register an early console, allow CPU 0 to printk */
+       cpu_set(smp_processor_id(), cpu_online_map);
+#endif
+}
+
+void __init
+#ifdef XEN
+early_setup_arch (char **cmdline_p)
+#else
+setup_arch (char **cmdline_p)
+#endif
+{
+       unw_init();
+
+       ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
__end___vtop_patchlist);
+
+       *cmdline_p = __va(ia64_boot_param->command_line);
+#ifdef XEN
+       efi_init();
+#else
+       strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+
+       efi_init();
+       io_port_init();
+#endif
+
+#ifdef CONFIG_IA64_GENERIC
+       {
+               const char *mvec_name = strstr (*cmdline_p, "machvec=");
+               char str[64];
+
+               if (mvec_name) {
+                       const char *end;
+                       size_t len;
+
+                       mvec_name += 8;
+                       end = strchr (mvec_name, ' ');
+                       if (end)
+                               len = end - mvec_name;
+                       else
+                               len = strlen (mvec_name);
+                       len = min(len, sizeof (str) - 1);
+                       strncpy (str, mvec_name, len);
+                       str[len] = '\0';
+                       mvec_name = str;
+               } else
+                       mvec_name = acpi_get_sysname();
+               machvec_init(mvec_name);
+       }
+#endif
+
+#ifdef XEN
+       early_cmdline_parse(cmdline_p);
+       cmdline_parse(*cmdline_p);
+#undef CONFIG_ACPI_BOOT
+#endif
+       if (early_console_setup(*cmdline_p) == 0)
+               mark_bsp_online();
+
+#ifdef CONFIG_ACPI_BOOT
+       /* Initialize the ACPI boot-time table parser */
+       acpi_table_init();
+# ifdef CONFIG_ACPI_NUMA
+       acpi_numa_init();
+# endif
+#else
+# ifdef CONFIG_SMP
+       smp_build_cpu_map();    /* happens, e.g., with the Ski simulator */
+# endif
+#endif /* CONFIG_APCI_BOOT */
+
+#ifndef XEN
+       find_memory();
+#else
+       io_port_init();
+}
+
+void __init
+late_setup_arch (char **cmdline_p)
+{
+#undef CONFIG_ACPI_BOOT
+       acpi_table_init();
+#endif
+       /* process SAL system table: */
+       ia64_sal_init(efi.sal_systab);
+
+#ifdef CONFIG_SMP
+       cpu_physical_id(0) = hard_smp_processor_id();
+#endif
+
+#ifdef CONFIG_VTI
+       identify_vmx_feature();
+#endif // CONFIG_VTI
+
+       cpu_init();     /* initialize the bootstrap CPU */
+
+#ifdef CONFIG_ACPI_BOOT
+       acpi_boot_init();
+#endif
+
+#ifdef CONFIG_VT
+       if (!conswitchp) {
+# if defined(CONFIG_DUMMY_CONSOLE)
+               conswitchp = &dummy_con;
+# endif
+# if defined(CONFIG_VGA_CONSOLE)
+               /*
+                * Non-legacy systems may route legacy VGA MMIO range to system
+                * memory.  vga_con probes the MMIO hole, so memory looks like
+                * a VGA device to it.  The EFI memory map can tell us if it's
+                * memory so we can avoid this problem.
+                */
+               if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
+                       conswitchp = &vga_con;
+# endif
+       }
+#endif
+
+       /* enable IA-64 Machine Check Abort Handling unless disabled */
+       if (!strstr(saved_command_line, "nomca"))
+               ia64_mca_init();
+
+       platform_setup(cmdline_p);
+       paging_init();
+}
+
+/*
+ * Display cpu info for all cpu's.
+ */
+static int
+show_cpuinfo (struct seq_file *m, void *v)
+{
+#ifdef CONFIG_SMP
+#      define lpj      c->loops_per_jiffy
+#      define cpunum   c->cpu
+#else
+#      define lpj      loops_per_jiffy
+#      define cpunum   0
+#endif
+       static struct {
+               unsigned long mask;
+               const char *feature_name;
+       } feature_bits[] = {
+               { 1UL << 0, "branchlong" },
+               { 1UL << 1, "spontaneous deferral"},
+               { 1UL << 2, "16-byte atomic ops" }
+       };
+       char family[32], features[128], *cp, sep;
+       struct cpuinfo_ia64 *c = v;
+       unsigned long mask;
+       int i;
+
+       mask = c->features;
+
+       switch (c->family) {
+             case 0x07:        memcpy(family, "Itanium", 8); break;
+             case 0x1f:        memcpy(family, "Itanium 2", 10); break;
+             default:          sprintf(family, "%u", c->family); break;
+       }
+
+       /* build the feature string: */
+       memcpy(features, " standard", 10);
+       cp = features;
+       sep = 0;
+       for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
+               if (mask & feature_bits[i].mask) {
+                       if (sep)
+                               *cp++ = sep;
+                       sep = ',';
+                       *cp++ = ' ';
+                       strcpy(cp, feature_bits[i].feature_name);
+                       cp += strlen(feature_bits[i].feature_name);
+                       mask &= ~feature_bits[i].mask;
+               }
+       }
+       if (mask) {
+               /* print unknown features as a hex value: */
+               if (sep)
+                       *cp++ = sep;
+               sprintf(cp, " 0x%lx", mask);
+       }
+
+       seq_printf(m,
+                  "processor  : %d\n"
+                  "vendor     : %s\n"
+                  "arch       : IA-64\n"
+                  "family     : %s\n"
+                  "model      : %u\n"
+                  "revision   : %u\n"
+                  "archrev    : %u\n"
+                  "features   :%s\n"   /* don't change this---it _is_ right! */
+                  "cpu number : %lu\n"
+                  "cpu regs   : %u\n"
+                  "cpu MHz    : %lu.%06lu\n"
+                  "itc MHz    : %lu.%06lu\n"
+                  "BogoMIPS   : %lu.%02lu\n\n",
+                  cpunum, c->vendor, family, c->model, c->revision, c->archrev,
+                  features, c->ppn, c->number,
+                  c->proc_freq / 1000000, c->proc_freq % 1000000,
+                  c->itc_freq / 1000000, c->itc_freq % 1000000,
+                  lpj*HZ/500000, (lpj*HZ/5000) % 100);
+       return 0;
+}
+
+static void *
+c_start (struct seq_file *m, loff_t *pos)
+{
+#ifdef CONFIG_SMP
+       while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
+               ++*pos;
+#endif
+       return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
+}
+
+static void *
+c_next (struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+
+static void
+c_stop (struct seq_file *m, void *v)
+{
+}
+
+#ifndef XEN
+struct seq_operations cpuinfo_op = {
+       .start =        c_start,
+       .next =         c_next,
+       .stop =         c_stop,
+       .show =         show_cpuinfo
+};
+#endif
+
+void
+identify_cpu (struct cpuinfo_ia64 *c)
+{
+       union {
+               unsigned long bits[5];
+               struct {
+                       /* id 0 & 1: */
+                       char vendor[16];
+
+                       /* id 2 */
+                       u64 ppn;                /* processor serial number */
+
+                       /* id 3: */
+                       unsigned number         :  8;
+                       unsigned revision       :  8;
+                       unsigned model          :  8;
+                       unsigned family         :  8;
+                       unsigned archrev        :  8;
+                       unsigned reserved       : 24;
+
+                       /* id 4: */
+                       u64 features;
+               } field;
+       } cpuid;
+       pal_vm_info_1_u_t vm1;
+       pal_vm_info_2_u_t vm2;
+       pal_status_t status;
+       unsigned long impl_va_msb = 50, phys_addr_size = 44;    /* Itanium 
defaults */
+       int i;
+
+       for (i = 0; i < 5; ++i)
+               cpuid.bits[i] = ia64_get_cpuid(i);
+
+       memcpy(c->vendor, cpuid.field.vendor, 16);
+#ifdef CONFIG_SMP
+       c->cpu = smp_processor_id();
+#endif
+       c->ppn = cpuid.field.ppn;
+       c->number = cpuid.field.number;
+       c->revision = cpuid.field.revision;
+       c->model = cpuid.field.model;
+       c->family = cpuid.field.family;
+       c->archrev = cpuid.field.archrev;
+       c->features = cpuid.field.features;
+
+       status = ia64_pal_vm_summary(&vm1, &vm2);
+       if (status == PAL_STATUS_SUCCESS) {
+               impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
+               phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
+       }
+       c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
+       c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
+
+#ifdef CONFIG_VTI
+       /* If vmx feature is on, do necessary initialization for vmx */
+       if (vmx_enabled)
+               vmx_init_env();
+#endif
+}
+
+void
+setup_per_cpu_areas (void)
+{
+       /* start_kernel() requires this... */
+}
+
+static void
+get_max_cacheline_size (void)
+{
+       unsigned long line_size, max = 1;
+       u64 l, levels, unique_caches;
+        pal_cache_config_info_t cci;
+        s64 status;
+
+        status = ia64_pal_cache_summary(&levels, &unique_caches);
+        if (status != 0) {
+                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed 
(status=%ld)\n",
+                       __FUNCTION__, status);
+                max = SMP_CACHE_BYTES;
+               goto out;
+        }
+
+       for (l = 0; l < levels; ++l) {
+               status = ia64_pal_cache_config_info(l, /* cache_type 
(data_or_unified)= */ 2,
+                                                   &cci);
+               if (status != 0) {
+                       printk(KERN_ERR
+                              "%s: ia64_pal_cache_config_info(l=%lu) failed 
(status=%ld)\n",
+                              __FUNCTION__, l, status);
+                       max = SMP_CACHE_BYTES;
+               }
+               line_size = 1 << cci.pcci_line_size;
+               if (line_size > max)
+                       max = line_size;
+        }
+  out:
+       if (max > ia64_max_cacheline_size)
+               ia64_max_cacheline_size = max;
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU.  This function acts
+ * as a 'CPU state barrier', nothing should get across.
+ */
+void
+cpu_init (void)
+{
+       extern void __devinit ia64_mmu_init (void *);
+       unsigned long num_phys_stacked;
+       pal_vm_info_2_u_t vmi;
+       unsigned int max_ctx;
+       struct cpuinfo_ia64 *cpu_info;
+       void *cpu_data;
+
+       cpu_data = per_cpu_init();
+
+       /*
+        * We set ar.k3 so that assembly code in MCA handler can compute
+        * physical addresses of per cpu variables with a simple:
+        *   phys = ar.k3 + &per_cpu_var
+        */
+       ia64_set_kr(IA64_KR_PER_CPU_DATA,
+                   ia64_tpa(cpu_data) - (long) __per_cpu_start);
+
+       get_max_cacheline_size();
+
+       /*
+        * We can't pass "local_cpu_data" to identify_cpu() because we haven't 
called
+        * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first 
because it
+        * depends on the data returned by identify_cpu().  We break the 
dependency by
+        * accessing cpu_data() through the canonical per-CPU address.
+        */
+       cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - 
__per_cpu_start);
+       identify_cpu(cpu_info);
+
+#ifdef CONFIG_MCKINLEY
+       {
+#              define FEATURE_SET 16
+               struct ia64_pal_retval iprv;
+
+               if (cpu_info->family == 0x1f) {
+                       PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 
FEATURE_SET, 0);
+                       if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 
& 0x80))
+                               PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
+                                             (iprv.v1 | 0x80), FEATURE_SET, 0);
+               }
+       }
+#endif
+
+       /* Clear the stack memory reserved for pt_regs: */
+       memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+
+       ia64_set_kr(IA64_KR_FPU_OWNER, 0);
+
+       /*
+        * Initialize default control register to defer all speculative faults. 
 The
+        * kernel MUST NOT depend on a particular setting of these bits (in 
other words,
+        * the kernel must have recovery code for all speculative accesses).  
Turn on
+        * dcr.lc as per recommendation by the architecture team.  Most IA-32 
apps
+        * shouldn't be affected by this (moral: keep your ia32 locks aligned 
and you'll
+        * be fine).
+        */
+       ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | 
IA64_DCR_DX | IA64_DCR_DR
+                                       | IA64_DCR_DA | IA64_DCR_DD | 
IA64_DCR_LC));
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+#ifdef XEN
+       if (current->domain->arch.mm)
+#else
+       if (current->mm)
+#endif
+               BUG();
+
+       ia64_mmu_init(ia64_imva(cpu_data));
+       ia64_mca_cpu_init(ia64_imva(cpu_data));
+
+#ifdef CONFIG_IA32_SUPPORT
+       ia32_cpu_init();
+#endif
+
+       /* Clear ITC to eliminiate sched_clock() overflows in human time.  */
+       ia64_set_itc(0);
+
+       /* disable all local interrupt sources: */
+       ia64_set_itv(1 << 16);
+       ia64_set_lrr0(1 << 16);
+       ia64_set_lrr1(1 << 16);
+       ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
+       ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
+
+       /* clear TPR & XTP to enable all interrupt classes: */
+       ia64_setreg(_IA64_REG_CR_TPR, 0);
+#ifdef CONFIG_SMP
+       normal_xtp();
+#endif
+
+       /* set ia64_ctx.max_rid to the maximum RID that is supported by all 
CPUs: */
+       if (ia64_pal_vm_summary(NULL, &vmi) == 0)
+               max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
+       else {
+               printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 
18 RID bits\n");
+               max_ctx = (1U << 15) - 1;       /* use architected minimum */
+       }
+       while (max_ctx < ia64_ctx.max_ctx) {
+               unsigned int old = ia64_ctx.max_ctx;
+               if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
+                       break;
+       }
+
+       if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
+               printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 
physical "
+                      "stacked regs\n");
+               num_phys_stacked = 96;
+       }
+       /* size of physical stacked register partition plus 8 bytes: */
+       __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
+       platform_cpu_init();
+}
+
+void
+check_bugs (void)
+{
+       ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+                              (unsigned long) __end___mckinley_e9_bundles);
+}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/time.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/time.c    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,264 @@
+/*
+ * linux/arch/ia64/kernel/time.c
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ * Copyright (C) 1999-2000 VA Linux Systems
+ * Copyright (C) 1999-2000 Walt Drummond <drummond@xxxxxxxxxxx>
+ */
+#include <linux/config.h>
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/efi.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+
+#include <asm/machvec.h>
+#include <asm/delay.h>
+#include <asm/hw_irq.h>
+#include <asm/ptrace.h>
+#include <asm/sal.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#ifdef XEN
+#include <linux/jiffies.h>     // not included by xen/sched.h
+#endif
+
+extern unsigned long wall_jiffies;
+
+u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+#define TIME_KEEPER_ID 0       /* smp_processor_id() of time-keeper */
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+unsigned long last_cli_ip;
+EXPORT_SYMBOL(last_cli_ip);
+
+#endif
+
+#ifndef XEN
+static struct time_interpolator itc_interpolator = {
+       .shift = 16,
+       .mask = 0xffffffffffffffffLL,
+       .source = TIME_SOURCE_CPU
+};
+
+static irqreturn_t
+timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+       unsigned long new_itm;
+
+       if (unlikely(cpu_is_offline(smp_processor_id()))) {
+               return IRQ_HANDLED;
+       }
+
+       platform_timer_interrupt(irq, dev_id, regs);
+
+       new_itm = local_cpu_data->itm_next;
+
+       if (!time_after(ia64_get_itc(), new_itm))
+               printk(KERN_ERR "Oops: timer tick before it's due 
(itc=%lx,itm=%lx)\n",
+                      ia64_get_itc(), new_itm);
+
+       profile_tick(CPU_PROFILING, regs);
+
+       while (1) {
+               update_process_times(user_mode(regs));
+
+               new_itm += local_cpu_data->itm_delta;
+
+               if (smp_processor_id() == TIME_KEEPER_ID) {
+                       /*
+                        * Here we are in the timer irq handler. We have irqs 
locally
+                        * disabled, but we don't know if the timer_bh is 
running on
+                        * another CPU. We need to avoid to SMP race by 
acquiring the
+                        * xtime_lock.
+                        */
+                       write_seqlock(&xtime_lock);
+                       do_timer(regs);
+                       local_cpu_data->itm_next = new_itm;
+                       write_sequnlock(&xtime_lock);
+               } else
+                       local_cpu_data->itm_next = new_itm;
+
+               if (time_after(new_itm, ia64_get_itc()))
+                       break;
+       }
+
+       do {
+               /*
+                * If we're too close to the next clock tick for
+                * comfort, we increase the safety margin by
+                * intentionally dropping the next tick(s).  We do NOT
+                * update itm.next because that would force us to call
+                * do_timer() which in turn would let our clock run
+                * too fast (with the potentially devastating effect
+                * of losing monotony of time).
+                */
+               while (!time_after(new_itm, ia64_get_itc() + 
local_cpu_data->itm_delta/2))
+                       new_itm += local_cpu_data->itm_delta;
+               ia64_set_itm(new_itm);
+               /* double check, in case we got hit by a (slow) PMI: */
+       } while (time_after_eq(ia64_get_itc(), new_itm));
+       return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * Encapsulate access to the itm structure for SMP.
+ */
+void
+ia64_cpu_local_tick (void)
+{
+       int cpu = smp_processor_id();
+       unsigned long shift = 0, delta;
+
+       /* arrange for the cycle counter to generate a timer interrupt: */
+       ia64_set_itv(IA64_TIMER_VECTOR);
+
+       delta = local_cpu_data->itm_delta;
+       /*
+        * Stagger the timer tick for each CPU so they don't occur all at 
(almost) the
+        * same time:
+        */
+       if (cpu) {
+               unsigned long hi = 1UL << ia64_fls(cpu);
+               shift = (2*(cpu - hi) + 1) * delta/hi/2;
+       }
+       local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
+       ia64_set_itm(local_cpu_data->itm_next);
+}
+
+static int nojitter;
+
+static int __init nojitter_setup(char *str)
+{
+       nojitter = 1;
+       printk("Jitter checking for ITC timers disabled\n");
+       return 1;
+}
+
+__setup("nojitter", nojitter_setup);
+
+
+void __devinit
+ia64_init_itm (void)
+{
+       unsigned long platform_base_freq, itc_freq;
+       struct pal_freq_ratio itc_ratio, proc_ratio;
+       long status, platform_base_drift, itc_drift;
+
+       /*
+        * According to SAL v2.6, we need to use a SAL call to determine the 
platform base
+        * frequency and then a PAL call to determine the frequency ratio 
between the ITC
+        * and the base frequency.
+        */
+       status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+                                   &platform_base_freq, &platform_base_drift);
+       if (status != 0) {
+               printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", 
ia64_sal_strerror(status));
+       } else {
+               status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
+               if (status != 0)
+                       printk(KERN_ERR "PAL_FREQ_RATIOS failed with 
status=%ld\n", status);
+       }
+       if (status != 0) {
+               /* invent "random" values */
+               printk(KERN_ERR
+                      "SAL/PAL failed to obtain frequency info---inventing 
reasonable values\n");
+               platform_base_freq = 100000000;
+               platform_base_drift = -1;       /* no drift info */
+               itc_ratio.num = 3;
+               itc_ratio.den = 1;
+       }
+       if (platform_base_freq < 40000000) {
+               printk(KERN_ERR "Platform base frequency %lu bogus---resetting 
to 75MHz!\n",
+                      platform_base_freq);
+               platform_base_freq = 75000000;
+               platform_base_drift = -1;
+       }
+       if (!proc_ratio.den)
+               proc_ratio.den = 1;     /* avoid division by zero */
+       if (!itc_ratio.den)
+               itc_ratio.den = 1;      /* avoid division by zero */
+
+       itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
+
+       local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
+       printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
+              "ITC freq=%lu.%03luMHz", smp_processor_id(),
+              platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
+              itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 
1000) % 1000);
+
+       if (platform_base_drift != -1) {
+               itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
+               printk("+/-%ldppm\n", itc_drift);
+       } else {
+               itc_drift = -1;
+               printk("\n");
+       }
+
+       local_cpu_data->proc_freq = 
(platform_base_freq*proc_ratio.num)/proc_ratio.den;
+       local_cpu_data->itc_freq = itc_freq;
+       local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / 
USEC_PER_SEC;
+       local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
+                                       + itc_freq/2)/itc_freq;
+
+       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
+#ifndef XEN
+               itc_interpolator.frequency = local_cpu_data->itc_freq;
+               itc_interpolator.drift = itc_drift;
+#ifdef CONFIG_SMP
+               /* On IA64 in an SMP configuration ITCs are never accurately 
synchronized.
+                * Jitter compensation requires a cmpxchg which may limit
+                * the scalability of the syscalls for retrieving time.
+                * The ITC synchronization is usually successful to within a few
+                * ITC ticks but this is not a sure thing. If you need to 
improve
+                * timer performance in SMP situations then boot the kernel 
with the
+                * "nojitter" option. However, doing so may result in time 
fluctuating (maybe
+                * even going backward) if the ITC offsets between the 
individual CPUs
+                * are too large.
+                */
+               if (!nojitter) itc_interpolator.jitter = 1;
+#endif
+               register_time_interpolator(&itc_interpolator);
+#endif
+       }
+
+       /* Setup the CPU local timer tick */
+       ia64_cpu_local_tick();
+}
+
+#ifndef XEN
+static struct irqaction timer_irqaction = {
+       .handler =      timer_interrupt,
+       .flags =        SA_INTERRUPT,
+       .name =         "timer"
+};
+
+void __init
+time_init (void)
+{
+       register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
+       efi_gettimeofday(&xtime);
+       ia64_init_itm();
+
+       /*
+        * Initialize wall_to_monotonic such that adding it to xtime will yield 
zero, the
+        * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
+        */
+       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, 
-xtime.tv_nsec);
+}
+#endif
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/tlb.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/tlb.c     Wed Aug  3 00:25:11 2005
@@ -0,0 +1,199 @@
+/*
+ * TLB support routines.
+ *
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 08/02/00 A. Mallick <asit.k.mallick@xxxxxxxxx>
+ *             Modified RID allocation for SMP
+ *          Goutham Rao <goutham.rao@xxxxxxxxx>
+ *              IPI based ptc implementation and A-step IPI implementation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/delay.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/pal.h>
+#include <asm/tlbflush.h>
+
+static struct {
+       unsigned long mask;     /* mask of supported purge page-sizes */
+       unsigned long max_bits; /* log2() of largest supported purge page-size 
*/
+} purge;
+
+struct ia64_ctx ia64_ctx = {
+       .lock =         SPIN_LOCK_UNLOCKED,
+       .next =         1,
+       .limit =        (1 << 15) - 1,          /* start out with the safe 
(architected) limit */
+       .max_ctx =      ~0U
+};
+
+DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
+
+/*
+ * Acquire the ia64_ctx.lock before calling this function!
+ */
+void
+wrap_mmu_context (struct mm_struct *mm)
+{
+#ifdef XEN
+printf("wrap_mmu_context: called, not implemented\n");
+#else
+       unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
+       struct task_struct *tsk;
+       int i;
+
+       if (ia64_ctx.next > max_ctx)
+               ia64_ctx.next = 300;    /* skip daemons */
+       ia64_ctx.limit = max_ctx + 1;
+
+       /*
+        * Scan all the task's mm->context and set proper safe range
+        */
+
+       read_lock(&tasklist_lock);
+  repeat:
+       for_each_process(tsk) {
+               if (!tsk->mm)
+                       continue;
+               tsk_context = tsk->mm->context;
+               if (tsk_context == ia64_ctx.next) {
+                       if (++ia64_ctx.next >= ia64_ctx.limit) {
+                               /* empty range: reset the range limit and start 
over */
+                               if (ia64_ctx.next > max_ctx)
+                                       ia64_ctx.next = 300;
+                               ia64_ctx.limit = max_ctx + 1;
+                               goto repeat;
+                       }
+               }
+               if ((tsk_context > ia64_ctx.next) && (tsk_context < 
ia64_ctx.limit))
+                       ia64_ctx.limit = tsk_context;
+       }
+       read_unlock(&tasklist_lock);
+       /* can't call flush_tlb_all() here because of race condition with O(1) 
scheduler [EF] */
+       {
+               int cpu = get_cpu(); /* prevent preemption/migration */
+               for (i = 0; i < NR_CPUS; ++i)
+                       if (cpu_online(i) && (i != cpu))
+                               per_cpu(ia64_need_tlb_flush, i) = 1;
+               put_cpu();
+       }
+       local_flush_tlb_all();
+#endif
+}
+
+void
+ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long 
nbits)
+{
+       static DEFINE_SPINLOCK(ptcg_lock);
+
+       /* HW requires global serialization of ptc.ga.  */
+       spin_lock(&ptcg_lock);
+       {
+               do {
+                       /*
+                        * Flush ALAT entries also.
+                        */
+                       ia64_ptcga(start, (nbits<<2));
+                       ia64_srlz_i();
+                       start += (1UL << nbits);
+               } while (start < end);
+       }
+       spin_unlock(&ptcg_lock);
+}
+
+void
+local_flush_tlb_all (void)
+{
+       unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
+
+       addr    = local_cpu_data->ptce_base;
+       count0  = local_cpu_data->ptce_count[0];
+       count1  = local_cpu_data->ptce_count[1];
+       stride0 = local_cpu_data->ptce_stride[0];
+       stride1 = local_cpu_data->ptce_stride[1];
+
+       local_irq_save(flags);
+       for (i = 0; i < count0; ++i) {
+               for (j = 0; j < count1; ++j) {
+                       ia64_ptce(addr);
+                       addr += stride1;
+               }
+               addr += stride0;
+       }
+       local_irq_restore(flags);
+       ia64_srlz_i();                  /* srlz.i implies srlz.d */
+}
+EXPORT_SYMBOL(local_flush_tlb_all);
+
+void
+flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned 
long end)
+{
+#ifdef XEN
+printf("flush_tlb_range: called, not implemented\n");
+#else
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long size = end - start;
+       unsigned long nbits;
+
+       if (mm != current->active_mm) {
+               /* this does happen, but perhaps it's not worth optimizing for? 
*/
+#ifdef CONFIG_SMP
+               flush_tlb_all();
+#else
+               mm->context = 0;
+#endif
+               return;
+       }
+
+       nbits = ia64_fls(size + 0xfff);
+       while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < 
purge.max_bits))
+               ++nbits;
+       if (nbits > purge.max_bits)
+               nbits = purge.max_bits;
+       start &= ~((1UL << nbits) - 1);
+
+# ifdef CONFIG_SMP
+       platform_global_tlb_purge(start, end, nbits);
+# else
+       do {
+               ia64_ptcl(start, (nbits<<2));
+               start += (1UL << nbits);
+       } while (start < end);
+# endif
+
+       ia64_srlz_i();                  /* srlz.i implies srlz.d */
+#endif
+}
+EXPORT_SYMBOL(flush_tlb_range);
+
+void __devinit
+ia64_tlb_init (void)
+{
+       ia64_ptce_info_t ptce_info;
+       unsigned long tr_pgbits;
+       long status;
+
+       if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
+               printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
+                      "defaulting to architected purge page-sizes.\n", status);
+               purge.mask = 0x115557000UL;
+       }
+       purge.max_bits = ia64_fls(purge.mask);
+
+       ia64_get_ptce(&ptce_info);
+       local_cpu_data->ptce_base = ptce_info.base;
+       local_cpu_data->ptce_count[0] = ptce_info.count[0];
+       local_cpu_data->ptce_count[1] = ptce_info.count[1];
+       local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
+       local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
+
+       local_flush_tlb_all();          /* nuke left overs from 
bootstrapping... */
+}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/linux-xen/unaligned.c
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/arch/ia64/linux-xen/unaligned.c       Wed Aug  3 00:25:11 2005
@@ -0,0 +1,1653 @@
+/*
+ * Architecture-specific unaligned trap handling.
+ *
+ * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * 2002/12/09   Fix rotating register handling (off-by-1 error, missing 
fr-rotation).  Fix
+ *             get_rse_reg() to not leak kernel bits to user-level (reading an 
out-of-frame
+ *             stacked register returns an undefined value; it does NOT 
trigger a
+ *             "rsvd register fault").
+ * 2001/10/11  Fix unaligned access to rotating registers in s/w pipelined 
loops.
+ * 2001/08/13  Correct size of extended floats (float_fsz) from 16 to 10 bytes.
+ * 2001/01/17  Add support emulation of unaligned kernel accesses.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/tty.h>
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+#include <asm/rse.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+extern void die_if_kernel(char *str, struct pt_regs *regs, long err) 
__attribute__ ((noreturn));
+
+#undef DEBUG_UNALIGNED_TRAP
+
+#ifdef DEBUG_UNALIGNED_TRAP
+# define DPRINT(a...)  do { printk("%s %u: ", __FUNCTION__, __LINE__); printk 
(a); } while (0)
+# define DDUMP(str,vp,len)     dump(str, vp, len)
+
+static void
+dump (const char *str, void *vp, size_t len)
+{
+       unsigned char *cp = vp;
+       int i;
+
+       printk("%s", str);
+       for (i = 0; i < len; ++i)
+               printk (" %02x", *cp++);
+       printk("\n");
+}
+#else
+# define DPRINT(a...)
+# define DDUMP(str,vp,len)
+#endif
+
+#define IA64_FIRST_STACKED_GR  32
+#define IA64_FIRST_ROTATING_FR 32
+#define SIGN_EXT9              0xffffffffffffff00ul
+
+/*
+ * For M-unit:
+ *
+ *  opcode |   m  |   x6    |
+ * --------|------|---------|
+ * [40-37] | [36] | [35:30] |
+ * --------|------|---------|
+ *     4   |   1  |    6    | = 11 bits
+ * --------------------------
+ * However bits [31:30] are not directly useful to distinguish between
+ * load/store so we can use [35:32] instead, which gives the following
+ * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
+ * checking the m-bit until later in the load/store emulation.
+ */
+#define IA64_OPCODE_MASK       0x1ef
+#define IA64_OPCODE_SHIFT      32
+
+/*
+ * Table C-28 Integer Load/Store
+ *
+ * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
+ *
+ * ld8.fill, st8.fill  MUST be aligned because the RNATs are based on
+ * the address (bits [8:3]), so we must failed.
+ */
+#define LD_OP            0x080
+#define LDS_OP           0x081
+#define LDA_OP           0x082
+#define LDSA_OP          0x083
+#define LDBIAS_OP        0x084
+#define LDACQ_OP         0x085
+/* 0x086, 0x087 are not relevant */
+#define LDCCLR_OP        0x088
+#define LDCNC_OP         0x089
+#define LDCCLRACQ_OP     0x08a
+#define ST_OP            0x08c
+#define STREL_OP         0x08d
+/* 0x08e,0x8f are not relevant */
+
+/*
+ * Table C-29 Integer Load +Reg
+ *
+ * we use the ld->m (bit [36:36]) field to determine whether or not we have
+ * a load/store of this form.
+ */
+
+/*
+ * Table C-30 Integer Load/Store +Imm
+ *
+ * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
+ *
+ * ld8.fill, st8.fill  must be aligned because the Nat register are based on
+ * the address, so we must fail and the program must be fixed.
+ */
+#define LD_IMM_OP            0x0a0
+#define LDS_IMM_OP           0x0a1
+#define LDA_IMM_OP           0x0a2
+#define LDSA_IMM_OP          0x0a3
+#define LDBIAS_IMM_OP        0x0a4
+#define LDACQ_IMM_OP         0x0a5
+/* 0x0a6, 0xa7 are not relevant */
+#define LDCCLR_IMM_OP        0x0a8
+#define LDCNC_IMM_OP         0x0a9
+#define LDCCLRACQ_IMM_OP     0x0aa
+#define ST_IMM_OP            0x0ac
+#define STREL_IMM_OP         0x0ad
+/* 0x0ae,0xaf are not relevant */
+
+/*
+ * Table C-32 Floating-point Load/Store
+ */
+#define LDF_OP           0x0c0
+#define LDFS_OP          0x0c1
+#define LDFA_OP          0x0c2
+#define LDFSA_OP         0x0c3
+/* 0x0c6 is irrelevant */
+#define LDFCCLR_OP       0x0c8
+#define LDFCNC_OP        0x0c9
+/* 0x0cb is irrelevant  */
+#define STF_OP           0x0cc
+
+/*
+ * Table C-33 Floating-point Load +Reg
+ *
+ * we use the ld->m (bit [36:36]) field to determine whether or not we have
+ * a load/store of this form.
+ */
+
+/*
+ * Table C-34 Floating-point Load/Store +Imm
+ */
+#define LDF_IMM_OP       0x0e0
+#define LDFS_IMM_OP      0x0e1
+#define LDFA_IMM_OP      0x0e2
+#define LDFSA_IMM_OP     0x0e3
+/* 0x0e6 is irrelevant */
+#define LDFCCLR_IMM_OP   0x0e8
+#define LDFCNC_IMM_OP    0x0e9
+#define STF_IMM_OP       0x0ec
+
+typedef struct {
+       unsigned long    qp:6;  /* [0:5]   */
+       unsigned long    r1:7;  /* [6:12]  */
+       unsigned long   imm:7;  /* [13:19] */
+       unsigned long    r3:7;  /* [20:26] */
+       unsigned long     x:1;  /* [27:27] */
+       unsigned long  hint:2;  /* [28:29] */
+       unsigned long x6_sz:2;  /* [30:31] */
+       unsigned long x6_op:4;  /* [32:35], x6 = x6_sz|x6_op */
+       unsigned long     m:1;  /* [36:36] */
+       unsigned long    op:4;  /* [37:40] */
+       unsigned long   pad:23; /* [41:63] */
+} load_store_t;
+
+
+typedef enum {
+       UPD_IMMEDIATE,  /* ldXZ r1=[r3],imm(9) */
+       UPD_REG         /* ldXZ r1=[r3],r2     */
+} update_t;
+
+/*
+ * We use tables to keep track of the offsets of registers in the saved state.
+ * This way we save having big switch/case statements.
+ *
+ * We use bit 0 to indicate switch_stack or pt_regs.
+ * The offset is simply shifted by 1 bit.
+ * A 2-byte value should be enough to hold any kind of offset
+ *
+ * In case the calling convention changes (and thus pt_regs/switch_stack)
+ * simply use RSW instead of RPT or vice-versa.
+ */
+
+#define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
+#define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
+
+#define RPT(x)         (RPO(x) << 1)
+#define RSW(x)         (1| RSO(x)<<1)
+
+#define GR_OFFS(x)     (gr_info[x]>>1)
+#define GR_IN_SW(x)    (gr_info[x] & 0x1)
+
+#define FR_OFFS(x)     (fr_info[x]>>1)
+#define FR_IN_SW(x)    (fr_info[x] & 0x1)
+
+static u16 gr_info[32]={
+       0,                      /* r0 is read-only : WE SHOULD NEVER GET THIS */
+
+       RPT(r1), RPT(r2), RPT(r3),
+
+#ifdef  CONFIG_VTI
+       RPT(r4), RPT(r5), RPT(r6), RPT(r7),
+#else   //CONFIG_VTI
+       RSW(r4), RSW(r5), RSW(r6), RSW(r7),
+#endif  //CONFIG_VTI
+
+       RPT(r8), RPT(r9), RPT(r10), RPT(r11),
+       RPT(r12), RPT(r13), RPT(r14), RPT(r15),
+
+       RPT(r16), RPT(r17), RPT(r18), RPT(r19),
+       RPT(r20), RPT(r21), RPT(r22), RPT(r23),
+       RPT(r24), RPT(r25), RPT(r26), RPT(r27),
+       RPT(r28), RPT(r29), RPT(r30), RPT(r31)
+};
+
+static u16 fr_info[32]={
+       0,                      /* constant : WE SHOULD NEVER GET THIS */
+       0,                      /* constant : WE SHOULD NEVER GET THIS */
+
+       RSW(f2), RSW(f3), RSW(f4), RSW(f5),
+
+       RPT(f6), RPT(f7), RPT(f8), RPT(f9),
+       RPT(f10), RPT(f11),
+
+       RSW(f12), RSW(f13), RSW(f14),
+       RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
+       RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
+       RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
+       RSW(f30), RSW(f31)
+};
+
+/* Invalidate ALAT entry for integer register REGNO.  */
+static void
+invala_gr (int regno)
+{
+#      define F(reg)   case reg: ia64_invala_gr(reg); break
+
+       switch (regno) {
+               F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
+               F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
+               F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
+               F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
+               F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
+               F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
+               F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
+               F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
+               F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
+               F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
+               F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
+               F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
+               F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
+               F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
+               F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
+               F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
+       }
+#      undef F
+}
+
+/* Invalidate ALAT entry for floating-point register REGNO.  */
+static void
+invala_fr (int regno)
+{
+#      define F(reg)   case reg: ia64_invala_fr(reg); break
+
+       switch (regno) {
+               F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
+               F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
+               F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
+               F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
+               F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
+               F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
+               F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
+               F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
+               F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
+               F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
+               F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
+               F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
+               F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
+               F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
+               F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
+               F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
+       }
+#      undef F
+}
+
+static inline unsigned long
+rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
+{
+       reg += rrb;
+       if (reg >= sor)
+               reg -= sor;
+       return reg;
+}
+
+#ifdef CONFIG_VTI
+static void
+set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+    unsigned long old_rsc,new_rsc;
+       unsigned long on_kbs,rnat;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* this should never happen, as the "rsvd register fault" has 
higher priority */
+               DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               return;
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+    old_rsc=ia64_get_rsc();
+    new_rsc=old_rsc&(~0x3);
+    ia64_set_rsc(new_rsc);
+
+    bspstore = ia64_get_bspstore();
+    bsp =kbs + (regs->loadrs >> 19);//16+3
+
+       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
+    nat_mask = 1UL << ia64_rse_slot_num(addr);
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+    if(addr >= bspstore){
+
+        ia64_flushrs ();
+        ia64_mf ();
+               *addr = val;
+        bspstore = ia64_get_bspstore();
+       rnat = ia64_get_rnat ();
+        if(bspstore < rnat_addr){
+            rnat=rnat&(~nat_mask);
+        }else{
+            *rnat_addr = (*rnat_addr)&(~nat_mask);
+        }
+        ia64_mf();
+        ia64_loadrs();
+        ia64_set_rnat(rnat);
+    }else{
+
+       rnat = ia64_get_rnat ();
+               *addr = val;
+        if(bspstore < rnat_addr){
+            rnat=rnat&(~nat_mask);
+        }else{
+            *rnat_addr = (*rnat_addr)&(~nat_mask);
+        }
+        ia64_set_bspstore (bspstore);
+        ia64_set_rnat(rnat);
+    }
+    ia64_set_rsc(old_rsc);
+}
+
+
+static void
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+       unsigned long on_kbs;
+    unsigned long old_rsc, new_rsc;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
+               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               panic("wrong stack register number");
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+    old_rsc=ia64_get_rsc();
+    new_rsc=old_rsc&(~(0x3));
+    ia64_set_rsc(new_rsc);
+
+    bspstore = ia64_get_bspstore();
+    bsp =kbs + (regs->loadrs >> 19); //16+3;
+
+       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
+    nat_mask = 1UL << ia64_rse_slot_num(addr);
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+    if(addr >= bspstore){
+
+        ia64_flushrs ();
+        ia64_mf ();
+        bspstore = ia64_get_bspstore();
+    }
+       *val=*addr;
+    if(bspstore < rnat_addr){
+        *nat=!!(ia64_get_rnat()&nat_mask);
+    }else{
+        *nat = !!((*rnat_addr)&nat_mask);
+    }
+    ia64_set_rsc(old_rsc);
+}
+#else // CONFIG_VTI
+static void
+set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+       unsigned long on_kbs;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* this should never happen, as the "rsvd register fault" has 
higher priority */
+               DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               return;
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+       DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld 
ridx=%ld\n",
+              r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) 
& 0x7f, ridx);
+
+       on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
+       addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + 
ridx);
+       if (addr >= kbs) {
+               /* the register is on the kernel backing store: easy... */
+               rnat_addr = ia64_rse_rnat_addr(addr);
+               if ((unsigned long) rnat_addr >= sw->ar_bspstore)
+                       rnat_addr = &sw->ar_rnat;
+               nat_mask = 1UL << ia64_rse_slot_num(addr);
+
+               *addr = val;
+               if (nat)
+                       *rnat_addr |=  nat_mask;
+               else
+                       *rnat_addr &= ~nat_mask;
+               return;
+       }
+
+       if (!user_stack(current, regs)) {
+               DPRINT("ignoring kernel write to r%lu; register isn't on the 
kernel RBS!", r1);
+               return;
+       }
+
+       bspstore = (unsigned long *)regs->ar_bspstore;
+       ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
+       bsp     = ia64_rse_skip_regs(ubs_end, -sof);
+       addr    = ia64_rse_skip_regs(bsp, ridx);
+
+       DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, 
(void *) addr);
+
+       ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, 
val);
+
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+       ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, &rnats);
+       DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
+              (void *) rnat_addr, rnats, nat, (rnats >> 
ia64_rse_slot_num(addr)) & 1);
+
+       nat_mask = 1UL << ia64_rse_slot_num(addr);
+       if (nat)
+               rnats |=  nat_mask;
+       else
+               rnats &= ~nat_mask;
+       ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, rnats);
+
+       DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
+}
+
+
+static void
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int 
*nat)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+       unsigned long rnats, nat_mask;
+       unsigned long on_kbs;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx >= sof) {
+               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
+               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
+               goto fail;
+       }
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+       DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld 
ridx=%ld\n",
+              r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) 
& 0x7f, ridx);
+
+       on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
+       addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + 
ridx);
+       if (addr >= kbs) {
+               /* the register is on the kernel backing store: easy... */
+               *val = *addr;
+               if (nat) {
+                       rnat_addr = ia64_rse_rnat_addr(addr);
+                       if ((unsigned long) rnat_addr >= sw->ar_bspstore)
+                               rnat_addr = &sw->ar_rnat;
+                       nat_mask = 1UL << ia64_rse_slot_num(addr);
+                       *nat = (*rnat_addr & nat_mask) != 0;
+               }
+               return;
+       }
+
+       if (!user_stack(current, regs)) {
+               DPRINT("ignoring kernel read of r%lu; register isn't on the 
RBS!", r1);
+               goto fail;
+       }
+
+       bspstore = (unsigned long *)regs->ar_bspstore;
+       ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
+       bsp     = ia64_rse_skip_regs(ubs_end, -sof);
+       addr    = ia64_rse_skip_regs(bsp, ridx);
+
+       DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, 
(void *) addr);
+
+       ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, 
val);
+
+       if (nat) {
+               rnat_addr = ia64_rse_rnat_addr(addr);
+               nat_mask = 1UL << ia64_rse_slot_num(addr);
+
+               DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
+
+               ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, &rnats);
+               *nat = (rnats & nat_mask) != 0;
+       }
+       return;
+
+  fail:
+       *val = 0;
+       if (nat)
+               *nat = 0;
+       return;
+}
+#endif // CONFIG_VTI
+
+
+#ifdef XEN
+void
+#else
+static void
+#endif
+setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long addr;
+       unsigned long bitmask;
+       unsigned long *unat;
+
+       /*
+        * First takes care of stacked registers
+        */
+       if (regnum >= IA64_FIRST_STACKED_GR) {
+               set_rse_reg(regs, regnum, val, nat);
+               return;
+       }
+
+       /*
+        * Using r0 as a target raises a General Exception fault which has 
higher priority
+        * than the Unaligned Reference fault.
+        */
+
+       /*
+        * Now look at registers in [0-31] range and init correct UNAT
+        */
+       if (GR_IN_SW(regnum)) {
+               addr = (unsigned long)sw;
+               unat = &sw->ar_unat;
+       } else {
+               addr = (unsigned long)regs;
+#ifdef CONFIG_VTI
+               unat = &regs->eml_unat;
+#else //CONFIG_VTI
+               unat = &sw->caller_unat;
+#endif  //CONFIG_VTI
+       }
+       DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
+              addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
+       /*
+        * add offset from base of struct
+        * and do it !
+        */
+       addr += GR_OFFS(regnum);
+
+       *(unsigned long *)addr = val;
+
+       /*
+        * We need to clear the corresponding UNAT bit to fully emulate the load
+        * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
+        */
+       bitmask   = 1UL << (addr >> 3 & 0x3f);
+       DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void 
*) unat, *unat);
+       if (nat) {
+               *unat |= bitmask;
+       } else {
+               *unat &= ~bitmask;
+       }
+       DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void 
*) unat,*unat);
+}
+
+/*
+ * Return the (rotated) index for floating point register REGNUM (REGNUM must 
be in the
+ * range from 32-127, result is in the range from 0-95.
+ */
+static inline unsigned long
+fph_index (struct pt_regs *regs, long regnum)
+{
+       unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
+       return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
+}
+
+static void
+setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *)regs - 1;
+       unsigned long addr;
+
+       /*
+        * From EAS-2.5: FPDisableFault has higher priority than Unaligned
+        * Fault. Thus, when we get here, we know the partition is enabled.
+        * To update f32-f127, there are three choices:
+        *
+        *      (1) save f32-f127 to thread.fph and update the values there
+        *      (2) use a gigantic switch statement to directly access the 
registers
+        *      (3) generate code on the fly to update the desired register
+        *
+        * For now, we are using approach (1).
+        */
+       if (regnum >= IA64_FIRST_ROTATING_FR) {
+               ia64_sync_fph(current);
+#ifdef XEN
+               current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
+#else
+               current->thread.fph[fph_index(regs, regnum)] = *fpval;
+#endif
+       } else {
+               /*
+                * pt_regs or switch_stack ?
+                */
+               if (FR_IN_SW(regnum)) {
+                       addr = (unsigned long)sw;
+               } else {
+                       addr = (unsigned long)regs;
+               }
+
+               DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
+
+               addr += FR_OFFS(regnum);
+               *(struct ia64_fpreg *)addr = *fpval;
+
+               /*
+                * mark the low partition as being used now
+                *
+                * It is highly unlikely that this bit is not already set, but
+                * let's do it for safety.
+                */
+               regs->cr_ipsr |= IA64_PSR_MFL;
+       }
+}
+
+/*
+ * Those 2 inline functions generate the spilled versions of the constant 
floating point
+ * registers which can be used with stfX
+ */
+static inline void
+float_spill_f0 (struct ia64_fpreg *final)
+{
+       ia64_stf_spill(final, 0);
+}
+
+static inline void
+float_spill_f1 (struct ia64_fpreg *final)
+{
+       ia64_stf_spill(final, 1);
+}
+
+static void
+getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long addr;
+
+       /*
+        * From EAS-2.5: FPDisableFault has higher priority than
+        * Unaligned Fault. Thus, when we get here, we know the partition is
+        * enabled.
+        *
+        * When regnum > 31, the register is still live and we need to force a 
save
+        * to current->thread.fph to get access to it.  See discussion in 
setfpreg()
+        * for reasons and other ways of doing this.
+        */
+       if (regnum >= IA64_FIRST_ROTATING_FR) {
+               ia64_flush_fph(current);
+#ifdef XEN
+               *fpval = current->arch._thread.fph[fph_index(regs, regnum)];
+#else
+               *fpval = current->thread.fph[fph_index(regs, regnum)];
+#endif
+       } else {
+               /*
+                * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
+                * not saved, we must generate their spilled form on the fly
+                */
+               switch(regnum) {
+               case 0:
+                       float_spill_f0(fpval);
+                       break;
+               case 1:
+                       float_spill_f1(fpval);
+                       break;
+               default:
+                       /*
+                        * pt_regs or switch_stack ?
+                        */
+                       addr =  FR_IN_SW(regnum) ? (unsigned long)sw
+                                                : (unsigned long)regs;
+
+                       DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
+                              FR_IN_SW(regnum), addr, FR_OFFS(regnum));
+
+                       addr  += FR_OFFS(regnum);
+                       *fpval = *(struct ia64_fpreg *)addr;
+               }
+       }
+}
+
+
+#ifdef XEN
+void
+#else
+static void
+#endif
+getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs 
*regs)
+{
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       unsigned long addr, *unat;
+
+       if (regnum >= IA64_FIRST_STACKED_GR) {
+               get_rse_reg(regs, regnum, val, nat);
+               return;
+       }
+
+       /*
+        * take care of r0 (read-only always evaluate to 0)
+        */
+       if (regnum == 0) {
+               *val = 0;
+               if (nat)
+                       *nat = 0;
+               return;
+       }
+
+       /*
+        * Now look at registers in [0-31] range and init correct UNAT
+        */
+       if (GR_IN_SW(regnum)) {
+               addr = (unsigned long)sw;
+               unat = &sw->ar_unat;
+       } else {
+               addr = (unsigned long)regs;
+#ifdef  CONFIG_VTI
+               unat = &regs->eml_unat;;
+#else   //CONFIG_VTI
+               unat = &sw->caller_unat;
+#endif  //CONFIG_VTI
+       }
+
+       DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
+
+       addr += GR_OFFS(regnum);
+
+       *val  = *(unsigned long *)addr;
+
+       /*
+        * do it only when requested
+        */
+       if (nat)
+               *nat  = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
+}
+
+static void
+emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, 
unsigned long ifa)
+{
+       /*
+        * IMPORTANT:
+        * Given the way we handle unaligned speculative loads, we should
+        * not get to this point in the code but we keep this sanity check,
+        * just in case.
+        */
+       if (ld.x6_op == 1 || ld.x6_op == 3) {
+               printk(KERN_ERR "%s: register update on speculative load, 
error\n", __FUNCTION__);
+               die_if_kernel("unaligned reference on speculative load with 
register update\n",
+                             regs, 30);
+       }
+
+
+       /*
+        * at this point, we know that the base register to update is valid 
i.e.,
+        * it's not r0
+        */
+       if (type == UPD_IMMEDIATE) {
+               unsigned long imm;
+
+               /*
+                * Load +Imm: ldXZ r1=[r3],imm(9)
+                *
+                *
+                * form imm9: [13:19] contain the first 7 bits
+                */
+               imm = ld.x << 7 | ld.imm;
+
+               /*
+                * sign extend (1+8bits) if m set
+                */
+               if (ld.m) imm |= SIGN_EXT9;
+
+               /*
+                * ifa == r3 and we know that the NaT bit on r3 was clear so
+                * we can directly use ifa.
+                */
+               ifa += imm;
+
+               setreg(ld.r3, ifa, 0, regs);
+
+               DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, 
ifa);
+
+       } else if (ld.m) {
+               unsigned long r2;
+               int nat_r2;
+
+               /*
+                * Load +Reg Opcode: ldXZ r1=[r3],r2
+                *
+                * Note: that we update r3 even in the case of ldfX.a
+                * (where the load does not happen)
+                *
+                * The way the load algorithm works, we know that r3 does not
+                * have its NaT bit set (would have gotten NaT consumption
+                * before getting the unaligned fault). So we can use ifa
+                * which equals r3 at this point.
+                *
+                * IMPORTANT:
+                * The above statement holds ONLY because we know that we
+                * never reach this code when trying to do a ldX.s.
+                * If we ever make it to here on an ldfX.s then
+                */
+               getreg(ld.imm, &r2, &nat_r2, regs);
+
+               ifa += r2;
+
+               /*
+                * propagate Nat r2 -> r3
+                */
+               setreg(ld.r3, ifa, nat_r2, regs);
+
+               DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, 
nat_r2);
+       }
+}
+
+
+static int
+emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       unsigned int len = 1 << ld.x6_sz;
+       unsigned long val = 0;
+
+       /*
+        * r0, as target, doesn't need to be checked because Illegal Instruction
+        * faults have higher priority than unaligned faults.
+        *
+        * r0 cannot be found as the base as it would never generate an
+        * unaligned reference.
+        */
+
+       /*
+        * ldX.a we will emulate load and also invalidate the ALAT entry.
+        * See comment below for explanation on how we handle ldX.a
+        */
+
+       if (len != 2 && len != 4 && len != 8) {
+               DPRINT("unknown size: x6=%d\n", ld.x6_sz);
+               return -1;
+       }
+       /* this assumes little-endian byte-order: */
+       if (copy_from_user(&val, (void __user *) ifa, len))
+               return -1;
+       setreg(ld.r1, val, 0, regs);
+
+       /*
+        * check for updates on any kind of loads
+        */
+       if (ld.op == 0x5 || ld.m)
+               emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, 
regs, ifa);
+
+       /*
+        * handling of various loads (based on EAS2.4):
+        *
+        * ldX.acq (ordered load):
+        *      - acquire semantics would have been used, so force fence 
instead.
+        *
+        * ldX.c.clr (check load and clear):
+        *      - if we get to this handler, it's because the entry was not in 
the ALAT.
+        *        Therefore the operation reverts to a normal load
+        *
+        * ldX.c.nc (check load no clear):
+        *      - same as previous one
+        *
+        * ldX.c.clr.acq (ordered check load and clear):
+        *      - same as above for c.clr part. The load needs to have acquire 
semantics. So
+        *        we use the fence semantics which is stronger and thus ensures 
correctness.
+        *
+        * ldX.a (advanced load):
+        *      - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's 
because the
+        *        address doesn't match requested size alignment. This means 
that we would
+        *        possibly need more than one load to get the result.
+        *
+        *        The load part can be handled just like a normal load, however 
the difficult
+        *        part is to get the right thing into the ALAT. The critical 
piece of information
+        *        in the base address of the load & size. To do that, a ld.a 
must be executed,
+        *        clearly any address can be pushed into the table by using 
ld1.a r1=[r3]. Now
+        *        if we use the same target register, we will be okay for the 
check.a instruction.
+        *        If we look at the store, basically a stX [r3]=r1 checks the 
ALAT  for any entry
+        *        which would overlap within [r3,r3+X] (the size of the load 
was store in the
+        *        ALAT). If such an entry is found the entry is invalidated. 
But this is not good
+        *        enough, take the following example:
+        *              r3=3
+        *              ld4.a r1=[r3]
+        *
+        *        Could be emulated by doing:
+        *              ld1.a r1=[r3],1
+        *              store to temporary;
+        *              ld1.a r1=[r3],1
+        *              store & shift to temporary;
+        *              ld1.a r1=[r3],1
+        *              store & shift to temporary;
+        *              ld1.a r1=[r3]
+        *              store & shift to temporary;
+        *              r1=temporary
+        *
+        *        So in this case, you would get the right value is r1 but the 
wrong info in
+        *        the ALAT.  Notice that you could do it in reverse to finish 
with address 3
+        *        but you would still get the size wrong.  To get the size 
right, one needs to
+        *        execute exactly the same kind of load. You could do it from a 
aligned
+        *        temporary location, but you would get the address wrong.
+        *
+        *        So no matter what, it is not possible to emulate an advanced 
load
+        *        correctly. But is that really critical ?
+        *
+        *        We will always convert ld.a into a normal load with ALAT 
invalidated.  This
+        *        will enable compiler to do optimization where certain code 
path after ld.a
+        *        is not required to have ld.c/chk.a, e.g., code path with no 
intervening stores.
+        *
+        *        If there is a store after the advanced load, one must either 
do a ld.c.* or
+        *        chk.a.* to reuse the value stored in the ALAT. Both can 
"fail" (meaning no
+        *        entry found in ALAT), and that's perfectly ok because:
+        *
+        *              - ld.c.*, if the entry is not present a  normal load is 
executed
+        *              - chk.a.*, if the entry is not present, execution jumps 
to recovery code
+        *
+        *        In either case, the load can be potentially retried in 
another form.
+        *
+        *        ALAT must be invalidated for the register (so that chk.a or 
ld.c don't pick
+        *        up a stale entry later). The register base update MUST also 
be performed.
+        */
+
+       /*
+        * when the load has the .acq completer then
+        * use ordering fence.
+        */
+       if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
+               mb();
+
+       /*
+        * invalidate ALAT entry in case of advanced load
+        */
+       if (ld.x6_op == 0x2)
+               invala_gr(ld.r1);
+
+       return 0;
+}
+
+static int
+emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       unsigned long r2;
+       unsigned int len = 1 << ld.x6_sz;
+
+       /*
+        * if we get to this handler, Nat bits on both r3 and r2 have already
+        * been checked. so we don't need to do it
+        *
+        * extract the value to be stored
+        */
+       getreg(ld.imm, &r2, NULL, regs);
+
+       /*
+        * we rely on the macros in unaligned.h for now i.e.,
+        * we let the compiler figure out how to read memory gracefully.
+        *
+        * We need this switch/case because the way the inline function
+        * works. The code is optimized by the compiler and looks like
+        * a single switch/case.
+        */
+       DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
+
+       if (len != 2 && len != 4 && len != 8) {
+               DPRINT("unknown size: x6=%d\n", ld.x6_sz);
+               return -1;
+       }
+
+       /* this assumes little-endian byte-order: */
+       if (copy_to_user((void __user *) ifa, &r2, len))
+               return -1;
+
+       /*
+        * stX [r3]=r2,imm(9)
+        *
+        * NOTE:
+        * ld.r3 can never be r0, because r0 would not generate an
+        * unaligned access.
+        */
+       if (ld.op == 0x5) {
+               unsigned long imm;
+
+               /*
+                * form imm9: [12:6] contain first 7bits
+                */
+               imm = ld.x << 7 | ld.r1;
+               /*
+                * sign extend (8bits) if m set
+                */
+               if (ld.m) imm |= SIGN_EXT9;
+               /*
+                * ifa == r3 (NaT is necessarily cleared)
+                */
+               ifa += imm;
+
+               DPRINT("imm=%lx r3=%lx\n", imm, ifa);
+
+               setreg(ld.r3, ifa, 0, regs);
+       }
+       /*
+        * we don't have alat_invalidate_multiple() so we need
+        * to do the complete flush :-<<
+        */
+       ia64_invala();
+
+       /*
+        * stX.rel: use fence instead of release
+        */
+       if (ld.x6_op == 0xd)
+               mb();
+
+       return 0;
+}
+
+/*
+ * floating point operations sizes in bytes
+ */
+static const unsigned char float_fsz[4]={
+       10, /* extended precision (e) */
+       8,  /* integer (8)            */
+       4,  /* single precision (s)   */
+       8   /* double precision (d)   */
+};
+
+static inline void
+mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldfe(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf8(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldfs(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldfd(6, init);
+       ia64_stop();
+       ia64_stf_spill(final, 6);
+}
+
+static inline void
+float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stfe(final, 6);
+}
+
+static inline void
+float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stf8(final, 6);
+}
+
+static inline void
+float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stfs(final, 6);
+}
+
+static inline void
+float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
+{
+       ia64_ldf_fill(6, init);
+       ia64_stop();
+       ia64_stfd(final, 6);
+}
+
+static int
+emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs 
*regs)
+{
+       struct ia64_fpreg fpr_init[2];
+       struct ia64_fpreg fpr_final[2];
+       unsigned long len = float_fsz[ld.x6_sz];
+
+       /*
+        * fr0 & fr1 don't need to be checked because Illegal Instruction 
faults have
+        * higher priority than unaligned faults.
+        *
+        * r0 cannot be found as the base as it would never generate an 
unaligned
+        * reference.
+        */
+
+       /*
+        * make sure we get clean buffers
+        */
+       memset(&fpr_init, 0, sizeof(fpr_init));
+       memset(&fpr_final, 0, sizeof(fpr_final));
+
+       /*
+        * ldfpX.a: we don't try to emulate anything but we must
+        * invalidate the ALAT entry and execute updates, if any.
+        */
+       if (ld.x6_op != 0x2) {
+               /*
+                * This assumes little-endian byte-order.  Note that there is 
no "ldfpe"
+                * instruction:
+                */
+               if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
+                   || copy_from_user(&fpr_init[1], (void __user *) (ifa + 
len), len))
+                       return -1;
+
+               DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, 
ld.x6_sz);
+               DDUMP("frp_init =", &fpr_init, 2*len);
+               /*
+                * XXX fixme
+                * Could optimize inlines by using ldfpX & 2 spills
+                */
+               switch( ld.x6_sz ) {
+                       case 0:
+                               mem2float_extended(&fpr_init[0], &fpr_final[0]);
+                               mem2float_extended(&fpr_init[1], &fpr_final[1]);
+                               break;
+                       case 1:
+                               mem2float_integer(&fpr_init[0], &fpr_final[0]);
+                               mem2float_integer(&fpr_init[1], &fpr_final[1]);
+                               break;
+                       case 2:
+                               mem2float_single(&fpr_init[0], &fpr_final[0]);
+                               mem2float_single(&fpr_init[1], &fpr_final[1]);
+                               break;
+                       case 3:
+                               mem2float_double(&fpr_init[0], &fpr_final[0]);
+                               mem2float_double(&fpr_init[1], &fpr_final[1]);
+                               break;
+               }
+               DDUMP("fpr_final =", &fpr_final, 2*len);
+               /*
+                * XXX fixme
+                *
+                * A possible optimization would be to drop fpr_final and 
directly
+                * use the storage from the saved context i.e., the actual final
+                * destination (pt_regs, switch_stack or thread structure).
+                */
+               setfpreg(ld.r1, &fpr_final[0], regs);
+               setfpreg(ld.imm, &fpr_final[1], regs);
+       }
+
+       /*
+        * Check for updates: only immediate updates are available for this
+        * instruction.
+        */
+       if (ld.m) {
+               /*
+                * the immediate is implicit given the ldsz of the operation:
+                * single: 8 (2x4) and for  all others it's 16 (2x8)
+                */
+               ifa += len<<1;
+
+               /*
+                * IMPORTANT:
+                * the fact that we force the NaT of r3 to zero is ONLY valid
+                * as long as we don't come here with a ldfpX.s.
+                * For this reason we keep this sanity check
+                */
+               if (ld.x6_op == 1 || ld.x6_op == 3)
+                       printk(KERN_ERR "%s: register update on speculative 
load pair, error\n",
+                              __FUNCTION__);
+
+               setreg(ld.r3, ifa, 0, regs);
+       }
+
+       /*
+        * Invalidate ALAT entries, if any, for both registers.
+        */
+       if (ld.x6_op == 0x2) {
+               invala_fr(ld.r1);
+               invala_fr(ld.imm);
+       }
+       return 0;
+}
+
+
+static int
+emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       struct ia64_fpreg fpr_init;
+       struct ia64_fpreg fpr_final;
+       unsigned long len = float_fsz[ld.x6_sz];
+
+       /*
+        * fr0 & fr1 don't need to be checked because Illegal Instruction
+        * faults have higher priority than unaligned faults.
+        *
+        * r0 cannot be found as the base as it would never generate an
+        * unaligned reference.
+        */
+
+       /*
+        * make sure we get clean buffers
+        */
+       memset(&fpr_init,0, sizeof(fpr_init));
+       memset(&fpr_final,0, sizeof(fpr_final));
+
+       /*
+        * ldfX.a we don't try to emulate anything but we must
+        * invalidate the ALAT entry.
+        * See comments in ldX for descriptions on how the various loads are 
handled.
+        */
+       if (ld.x6_op != 0x2) {
+               if (copy_from_user(&fpr_init, (void __user *) ifa, len))
+                       return -1;
+
+               DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
+               DDUMP("fpr_init =", &fpr_init, len);
+               /*
+                * we only do something for x6_op={0,8,9}
+                */
+               switch( ld.x6_sz ) {
+                       case 0:
+                               mem2float_extended(&fpr_init, &fpr_final);
+                               break;
+                       case 1:
+                               mem2float_integer(&fpr_init, &fpr_final);
+                               break;
+                       case 2:
+                               mem2float_single(&fpr_init, &fpr_final);
+                               break;
+                       case 3:
+                               mem2float_double(&fpr_init, &fpr_final);
+                               break;
+               }
+               DDUMP("fpr_final =", &fpr_final, len);
+               /*
+                * XXX fixme
+                *
+                * A possible optimization would be to drop fpr_final and 
directly
+                * use the storage from the saved context i.e., the actual final
+                * destination (pt_regs, switch_stack or thread structure).
+                */
+               setfpreg(ld.r1, &fpr_final, regs);
+       }
+
+       /*
+        * check for updates on any loads
+        */
+       if (ld.op == 0x7 || ld.m)
+               emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, 
regs, ifa);
+
+       /*
+        * invalidate ALAT entry in case of advanced floating point loads
+        */
+       if (ld.x6_op == 0x2)
+               invala_fr(ld.r1);
+
+       return 0;
+}
+
+
+static int
+emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+{
+       struct ia64_fpreg fpr_init;
+       struct ia64_fpreg fpr_final;
+       unsigned long len = float_fsz[ld.x6_sz];
+
+       /*
+        * make sure we get clean buffers
+        */
+       memset(&fpr_init,0, sizeof(fpr_init));
+       memset(&fpr_final,0, sizeof(fpr_final));
+
+       /*
+        * if we get to this handler, Nat bits on both r3 and r2 have already
+        * been checked. so we don't need to do it
+        *
+        * extract the value to be stored
+        */
+       getfpreg(ld.imm, &fpr_init, regs);
+       /*
+        * during this step, we extract the spilled registers from the saved
+        * context i.e., we refill. Then we store (no spill) to temporary
+        * aligned location
+        */
+       switch( ld.x6_sz ) {
+               case 0:
+                       float2mem_extended(&fpr_init, &fpr_final);
+                       break;
+               case 1:
+                       float2mem_integer(&fpr_init, &fpr_final);
+                       break;
+               case 2:
+                       float2mem_single(&fpr_init, &fpr_final);
+                       break;
+               case 3:
+                       float2mem_double(&fpr_init, &fpr_final);
+                       break;
+       }
+       DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
+       DDUMP("fpr_init =", &fpr_init, len);
+       DDUMP("fpr_final =", &fpr_final, len);
+
+       if (copy_to_user((void __user *) ifa, &fpr_final, len))
+               return -1;
+
+       /*
+        * stfX [r3]=r2,imm(9)
+        *
+        * NOTE:
+        * ld.r3 can never be r0, because r0 would not generate an
+        * unaligned access.
+        */
+       if (ld.op == 0x7) {
+               unsigned long imm;
+
+               /*
+                * form imm9: [12:6] contain first 7bits
+                */
+               imm = ld.x << 7 | ld.r1;
+               /*
+                * sign extend (8bits) if m set
+                */
+               if (ld.m)
+                       imm |= SIGN_EXT9;
+               /*
+                * ifa == r3 (NaT is necessarily cleared)
+                */
+               ifa += imm;
+
+               DPRINT("imm=%lx r3=%lx\n", imm, ifa);
+
+               setreg(ld.r3, ifa, 0, regs);
+       }
+       /*
+        * we don't have alat_invalidate_multiple() so we need
+        * to do the complete flush :-<<
+        */
+       ia64_invala();
+
+       return 0;
+}
+
+/*
+ * Make sure we log the unaligned access, so that user/sysadmin can notice it 
and
+ * eventually fix the program.  However, we don't want to do that for every 
access so we
+ * pace it with jiffies.  This isn't really MP-safe, but it doesn't really 
have to be
+ * either...
+ */
+static int
+within_logging_rate_limit (void)
+{
+       static unsigned long count, last_time;
+
+       if (jiffies - last_time > 5*HZ)
+               count = 0;
+       if (++count < 5) {
+               last_time = jiffies;
+               return 1;
+       }
+       return 0;
+
+}
+
+void
+ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
+{
+#ifdef XEN
+printk("ia64_handle_unaligned: called, not working yet\n");
+#else
+       struct ia64_psr *ipsr = ia64_psr(regs);
+       mm_segment_t old_fs = get_fs();
+       unsigned long bundle[2];
+       unsigned long opcode;
+       struct siginfo si;
+       const struct exception_table_entry *eh = NULL;
+       union {
+               unsigned long l;
+               load_store_t insn;
+       } u;
+       int ret = -1;
+
+       if (ia64_psr(regs)->be) {
+               /* we don't support big-endian accesses */
+               die_if_kernel("big-endian unaligned accesses are not 
supported", regs, 0);
+               goto force_sigbus;
+       }
+
+       /*
+        * Treat kernel accesses for which there is an exception handler entry 
the same as
+        * user-level unaligned accesses.  Otherwise, a clever program could 
trick this
+        * handler into reading an arbitrary kernel addresses...
+        */
+       if (!user_mode(regs))
+               eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+       if (user_mode(regs) || eh) {
+               if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
+                       goto force_sigbus;
+
+               if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
+                   && within_logging_rate_limit())
+               {
+                       char buf[200];  /* comm[] is at most 16 bytes... */
+                       size_t len;
+
+                       len = sprintf(buf, "%s(%d): unaligned access to 
0x%016lx, "
+                                     "ip=0x%016lx\n\r", current->comm, 
current->pid,
+                                     ifa, regs->cr_iip + ipsr->ri);
+                       /*
+                        * Don't call tty_write_message() if we're in the 
kernel; we might
+                        * be holding locks...
+                        */
+                       if (user_mode(regs))
+                               tty_write_message(current->signal->tty, buf);
+                       buf[len-1] = '\0';      /* drop '\r' */
+                       printk(KERN_WARNING "%s", buf); /* watch for command 
names containing %s */
+               }
+       } else {
+               if (within_logging_rate_limit())
+                       printk(KERN_WARNING "kernel unaligned access to 
0x%016lx, ip=0x%016lx\n",
+                              ifa, regs->cr_iip + ipsr->ri);
+               set_fs(KERNEL_DS);
+       }
+
+       DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
+              regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
+
+       if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
+               goto failure;
+
+       /*
+        * extract the instruction from the bundle given the slot number
+        */
+       switch (ipsr->ri) {
+             case 0: u.l = (bundle[0] >>  5); break;
+             case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
+             case 2: u.l = (bundle[1] >> 23); break;
+       }
+       opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
+
+       DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d 
ld.hint=%d "
+              "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, 
u.insn.imm,
+              u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, 
u.insn.op);
+
+       /*
+        * IMPORTANT:
+        * Notice that the switch statement DOES not cover all possible 
instructions
+        * that DO generate unaligned references. This is made on purpose 
because for some
+        * instructions it DOES NOT make sense to try and emulate the access. 
Sometimes it
+        * is WRONG to try and emulate. Here is a list of instruction we don't 
emulate i.e.,
+        * the program will get a signal and die:
+        *
+        *      load/store:
+        *              - ldX.spill
+        *              - stX.spill
+        *      Reason: RNATs are based on addresses
+        *
+        *      synchronization:
+        *              - cmpxchg
+        *              - fetchadd
+        *              - xchg
+        *      Reason: ATOMIC operations cannot be emulated properly using 
multiple
+        *              instructions.
+        *
+        *      speculative loads:
+        *              - ldX.sZ
+        *      Reason: side effects, code must be ready to deal with failure 
so simpler
+        *              to let the load fail.
+        * 
---------------------------------------------------------------------------------
+        * XXX fixme
+        *
+        * I would like to get rid of this switch case and do something
+        * more elegant.
+        */
+       switch (opcode) {
+             case LDS_OP:
+             case LDSA_OP:
+             case LDS_IMM_OP:
+             case LDSA_IMM_OP:
+             case LDFS_OP:
+             case LDFSA_OP:
+             case LDFS_IMM_OP:
+               /*
+                * The instruction will be retried with deferred exceptions 
turned on, and
+                * we should get Nat bit installed
+                *
+                * IMPORTANT: When PSR_ED is set, the register & immediate 
update forms
+                * are actually executed even though the operation failed. So 
we don't
+                * need to take care of this.
+                */
+               DPRINT("forcing PSR_ED\n");
+               regs->cr_ipsr |= IA64_PSR_ED;
+               goto done;
+
+             case LD_OP:
+             case LDA_OP:
+             case LDBIAS_OP:
+             case LDACQ_OP:
+             case LDCCLR_OP:
+             case LDCNC_OP:
+             case LDCCLRACQ_OP:
+             case LD_IMM_OP:
+             case LDA_IMM_OP:
+             case LDBIAS_IMM_OP:
+             case LDACQ_IMM_OP:
+             case LDCCLR_IMM_OP:
+             case LDCNC_IMM_OP:
+             case LDCCLRACQ_IMM_OP:
+               ret = emulate_load_int(ifa, u.insn, regs);
+               break;
+
+             case ST_OP:
+             case STREL_OP:
+             case ST_IMM_OP:
+             case STREL_IMM_OP:
+               ret = emulate_store_int(ifa, u.insn, regs);
+               break;
+
+             case LDF_OP:
+             case LDFA_OP:
+             case LDFCCLR_OP:
+             case LDFCNC_OP:
+             case LDF_IMM_OP:
+             case LDFA_IMM_OP:
+             case LDFCCLR_IMM_OP:
+             case LDFCNC_IMM_OP:
+               if (u.insn.x)
+                       ret = emulate_load_floatpair(ifa, u.insn, regs);
+               else
+                       ret = emulate_load_float(ifa, u.insn, regs);
+               break;
+
+             case STF_OP:
+             case STF_IMM_OP:
+               ret = emulate_store_float(ifa, u.insn, regs);
+               break;
+
+             default:
+               goto failure;
+       }
+       DPRINT("ret=%d\n", ret);
+       if (ret)
+               goto failure;
+
+       if (ipsr->ri == 2)
+               /*
+                * given today's architecture this case is not likely to happen 
because a
+                * memory access instruction (M) can never be in the last slot 
of a
+                * bundle. But let's keep it for now.
+                */
+               regs->cr_iip += 16;
+       ipsr->ri = (ipsr->ri + 1) & 0x3;
+
+       DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
+  done:
+       set_fs(old_fs);         /* restore original address limit */
+       return;
+
+  failure:
+       /* something went wrong... */
+       if (!user_mode(regs)) {
+               if (eh) {
+                       ia64_handle_exception(regs, eh);
+                       goto done;
+               }
+               die_if_kernel("error during unaligned kernel access\n", regs, 
ret);
+               /* NOT_REACHED */
+       }
+  force_sigbus:
+       si.si_signo = SIGBUS;
+       si.si_errno = 0;
+       si.si_code = BUS_ADRALN;
+       si.si_addr = (void __user *) ifa;
+       si.si_flags = 0;
+       si.si_isr = 0;
+       si.si_imm = 0;
+       force_sig_info(SIGBUS, &si, current);
+       goto done;
+#endif
+}
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h   Wed Aug  3 00:25:11 2005
@@ -0,0 +1,657 @@
+#ifndef _ASM_IA64_GCC_INTRIN_H
+#define _ASM_IA64_GCC_INTRIN_H
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@xxxxxxxxx>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ */
+
+#include <linux/compiler.h>
+
+/* define this macro to get some asm stmts included in 'c' files */
+#define ASM_SUPPORTED
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define ia64_barrier() asm volatile ("":::"memory")
+
+#define ia64_stop()    asm volatile (";;"::)
+
+#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
+
+#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+
+extern void ia64_bad_param_for_setreg (void);
+extern void ia64_bad_param_for_getreg (void);
+
+register unsigned long ia64_r13 asm ("r13") __attribute_used__;
+
+#define ia64_setreg(regnum, val)                                               
\
+({                                                                             
\
+       switch (regnum) {                                                       
\
+           case _IA64_REG_PSR_L:                                               
\
+                   asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");       
\
+                   break;                                                      
\
+           case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:                          
\
+                   asm volatile ("mov ar%0=%1" ::                              
\
+                                         "i" (regnum - _IA64_REG_AR_KR0),      
\
+                                         "r"(val): "memory");                  
\
+                   break;                                                      
\
+           case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:                        
\
+                   asm volatile ("mov cr%0=%1" ::                              
\
+                                         "i" (regnum - _IA64_REG_CR_DCR),      
\
+                                         "r"(val): "memory" );                 
\
+                   break;                                                      
\
+           case _IA64_REG_SP:                                                  
\
+                   asm volatile ("mov r12=%0" ::                               
\
+                                         "r"(val): "memory");                  
\
+                   break;                                                      
\
+           case _IA64_REG_GP:                                                  
\
+                   asm volatile ("mov gp=%0" :: "r"(val) : "memory");          
\
+               break;                                                          
\
+           default:                                                            
\
+                   ia64_bad_param_for_setreg();                                
\
+                   break;                                                      
\
+       }                                                                       
\
+})
+
+#define ia64_getreg(regnum)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+                                                                               
\
+       switch (regnum) {                                                       
\
+       case _IA64_REG_GP:                                                      
\
+               asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));              
\
+               break;                                                          
\
+       case _IA64_REG_IP:                                                      
\
+               asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));              
\
+               break;                                                          
\
+       case _IA64_REG_PSR:                                                     
\
+               asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));             
\
+               break;                                                          
\
+       case _IA64_REG_TP:      /* for current() */                             
\
+               ia64_intri_res = ia64_r13;                                      
\
+               break;                                                          
\
+       case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:                              
\
+               asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)             
\
+                                     : "i"(regnum - _IA64_REG_AR_KR0));        
\
+               break;                                                          
\
+       case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:                            
\
+               asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)             
\
+                                     : "i" (regnum - _IA64_REG_CR_DCR));       
\
+               break;                                                          
\
+       case _IA64_REG_SP:                                                      
\
+               asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));             
\
+               break;                                                          
\
+       default:                                                                
\
+               ia64_bad_param_for_getreg();                                    
\
+               break;                                                          
\
+       }                                                                       
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_hint_pause 0
+
+#define ia64_hint(mode)                                                \
+({                                                             \
+       switch (mode) {                                         \
+       case ia64_hint_pause:                                   \
+               asm volatile ("hint @pause" ::: "memory");      \
+               break;                                          \
+       }                                                       \
+})
+
+
+/* Integer values for mux1 instruction */
+#define ia64_mux1_brcst 0
+#define ia64_mux1_mix   8
+#define ia64_mux1_shuf  9
+#define ia64_mux1_alt  10
+#define ia64_mux1_rev  11
+
+#define ia64_mux1(x, mode)                                                     
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+                                                                               
\
+       switch (mode) {                                                         
\
+       case ia64_mux1_brcst:                                                   
\
+               asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));    
\
+               break;                                                          
\
+       case ia64_mux1_mix:                                                     
\
+               asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));      
\
+               break;                                                          
\
+       case ia64_mux1_shuf:                                                    
\
+               asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));     
\
+               break;                                                          
\
+       case ia64_mux1_alt:                                                     
\
+               asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));      
\
+               break;                                                          
\
+       case ia64_mux1_rev:                                                     
\
+               asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));      
\
+               break;                                                          
\
+       }                                                                       
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_popcnt(x)                                         \
+({                                                             \
+       __u64 ia64_intri_res;                                   \
+       asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
+                                                               \
+       ia64_intri_res;                                         \
+})
+
+#define ia64_getf_exp(x)                                       \
+({                                                             \
+       long ia64_intri_res;                                    \
+                                                               \
+       asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
+                                                               \
+       ia64_intri_res;                                         \
+})
+
+#define ia64_shrp(a, b, count)                                                 
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), 
"i"(count));   \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_ldfs(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldfd(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldfe(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldf8(regnum, x)                                   \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));     \
+})
+
+#define ia64_ldf_fill(regnum, x)                               \
+({                                                             \
+       register double __f__ asm ("f"#regnum);                 \
+       asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_stfs(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfd(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfe(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf8(x, regnum)                                           \
+({                                                                     \
+       register double __f__ asm ("f"#regnum);                         \
+       asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf_spill(x, regnum)                                              
\
+({                                                                             
\
+       register double __f__ asm ("f"#regnum);                                 
\
+       asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");    
\
+})
+
+#define ia64_fetchadd4_acq(p, inc)                                             
\
+({                                                                             
\
+                                                                               
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd4.acq %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fetchadd4_rel(p, inc)                                             
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd4.rel %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fetchadd8_acq(p, inc)                                             
\
+({                                                                             
\
+                                                                               
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd8.acq %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fetchadd8_rel(p, inc)                                             
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("fetchadd8.rel %0=[%1],%2"                                
\
+                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
+                               : "memory");                                    
\
+                                                                               
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_xchg1(ptr,x)                                                      
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("xchg1 %0=[%1],%2"                                        
\
+                     : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); 
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_xchg2(ptr,x)                                              \
+({                                                                     \
+       __u64 ia64_intri_res;                                           \
+       asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)        \
+                     : "r" (ptr), "r" (x) : "memory");                 \
+       ia64_intri_res;                                                 \
+})
+
+#define ia64_xchg4(ptr,x)                                              \
+({                                                                     \
+       __u64 ia64_intri_res;                                           \
+       asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)        \
+                     : "r" (ptr), "r" (x) : "memory");                 \
+       ia64_intri_res;                                                 \
+})
+
+#define ia64_xchg8(ptr,x)                                              \
+({                                                                     \
+       __u64 ia64_intri_res;                                           \
+       asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)        \
+                     : "r" (ptr), "r" (x) : "memory");                 \
+       ia64_intri_res;                                                 \
+})
+
+#define ia64_cmpxchg1_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+                                                                               
        \
+       asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+       asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old)                                       
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
+                                                                               
        \
+       asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":                         
        \
+                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
+       ia64_intri_res;                                                         
        \
+})
+
+#define ia64_mf()      asm volatile ("mf" ::: "memory")
+#define ia64_mfa()     asm volatile ("mf.a" ::: "memory")
+
+#ifdef CONFIG_VTI
+/*
+ * Flushrs instruction stream.
+ */
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+#define ia64_get_rsc()                          \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_rsc(val)                       \
+    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_bspstore()     \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_bspstore(val)                       \
+    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_rnat()     \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_rnat(val)                       \
+    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
+
+#define ia64_ttag(addr)                                                        
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));        
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_dcr()                          \
+({                                      \
+    __u64 result;                               \
+    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
+    result;                                 \
+})
+
+#define ia64_set_dcr(val)                           \
+({                                      \
+    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
+})
+
+#endif // CONFIG_VTI
+
+
+#define ia64_invala() asm volatile ("invala" ::: "memory")
+
+#define ia64_thash(addr)                                                       
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));       
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_srlz_i()  asm volatile (";; srlz.i ;;" ::: "memory")
+#define ia64_srlz_d()  asm volatile (";; srlz.d" ::: "memory");
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define ia64_dv_serialize_data()              asm volatile 
(".serialize.data");
+# define ia64_dv_serialize_instruction()       asm volatile 
(".serialize.instruction");
+#else
+# define ia64_dv_serialize_data()
+# define ia64_dv_serialize_instruction()
+#endif
+
+#define ia64_nop(x)    asm volatile ("nop %0"::"i"(x));
+
+#define ia64_itci(addr)        asm volatile ("itc.i %0;;" :: "r"(addr) : 
"memory")
+
+#define ia64_itcd(addr)        asm volatile ("itc.d %0;;" :: "r"(addr) : 
"memory")
+
+
+#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"                
                \
+                                            :: "r"(trnum), "r"(addr) : 
"memory")
+
+#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"                
                \
+                                            :: "r"(trnum), "r"(addr) : 
"memory")
+
+#define ia64_tpa(addr)                                                         
\
+({                                                                             
\
+       __u64 ia64_pa;                                                          
\
+       asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");    
\
+       ia64_pa;                                                                
\
+})
+
+#define __ia64_set_dbr(index, val)                                             
\
+       asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_ibr(index, val)                                               
\
+       asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pkr(index, val)                                               
\
+       asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmc(index, val)                                               
\
+       asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmd(index, val)                                               
\
+       asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_rr(index, val)                                                
        \
+       asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+#define ia64_get_cpuid(index)                                                  
        \
+({                                                                             
        \
+       __u64 ia64_intri_res;                                                   
        \
+       asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : 
"rO"(index));        \
+       ia64_intri_res;                                                         
        \
+})
+
+#define __ia64_get_dbr(index)                                                  
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_ibr(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_pkr(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_pmc(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+
+#define ia64_get_pmd(index)                                                    
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_get_rr(index)                                                     
\
+({                                                                             
\
+       __u64 ia64_intri_res;                                                   
\
+       asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));    
\
+       ia64_intri_res;                                                         
\
+})
+
+#define ia64_fc(addr)  asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+#define ia64_sync_i()  asm volatile (";; sync.i" ::: "memory")
+
+#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
+#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+#define ia64_ptce(addr)        asm volatile ("ptc.e %0" :: "r"(addr))
+
+#define ia64_ptcga(addr, size)                                                 
\
+do {                                                                           
\
+       asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");       
\
+       ia64_dv_serialize_data();                                               
\
+} while (0)
+
+#define ia64_ptcl(addr, size)                                                  
\
+do {                                                                           
\
+       asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");        
\
+       ia64_dv_serialize_data();                                               
\
+} while (0)
+
+#define ia64_ptri(addr, size)                                          \
+       asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptrd(addr, size)                                          \
+       asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+
+#define ia64_lfhint_none   0
+#define ia64_lfhint_nt1    1
+#define ia64_lfhint_nt2    2
+#define ia64_lfhint_nta    3
+
+#define ia64_lfetch(lfhint, y)                                 \
+({                                                             \
+        switch (lfhint) {                                      \
+        case ia64_lfhint_none:                                 \
+                asm volatile ("lfetch [%0]" : : "r"(y));       \
+                break;                                         \
+        case ia64_lfhint_nt1:                                  \
+                asm volatile ("lfetch.nt1 [%0]" : : "r"(y));   \
+                break;                                         \
+        case ia64_lfhint_nt2:                                  \
+                asm volatile ("lfetch.nt2 [%0]" : : "r"(y));   \
+                break;                                         \
+        case ia64_lfhint_nta:                                  \
+                asm volatile ("lfetch.nta [%0]" : : "r"(y));   \
+                break;                                         \
+        }                                                      \
+})
+
+#define ia64_lfetch_excl(lfhint, y)                                    \
+({                                                                     \
+        switch (lfhint) {                                              \
+        case ia64_lfhint_none:                                         \
+                asm volatile ("lfetch.excl [%0]" :: "r"(y));           \
+                break;                                                 \
+        case ia64_lfhint_nt1:                                          \
+                asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));       \
+                break;                                                 \
+        case ia64_lfhint_nt2:                                          \
+                asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));       \
+                break;                                                 \
+        case ia64_lfhint_nta:                                          \
+                asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));       \
+                break;                                                 \
+        }                                                              \
+})
+
+#define ia64_lfetch_fault(lfhint, y)                                   \
+({                                                                     \
+        switch (lfhint) {                                              \
+        case ia64_lfhint_none:                                         \
+                asm volatile ("lfetch.fault [%0]" : : "r"(y));         \
+                break;                                                 \
+        case ia64_lfhint_nt1:                                          \
+                asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));     \
+                break;                                                 \
+        case ia64_lfhint_nt2:                                          \
+                asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));     \
+                break;                                                 \
+        case ia64_lfhint_nta:                                          \
+                asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));     \
+                break;                                                 \
+        }                                                              \
+})
+
+#define ia64_lfetch_fault_excl(lfhint, y)                              \
+({                                                                     \
+        switch (lfhint) {                                              \
+        case ia64_lfhint_none:                                         \
+                asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));     \
+                break;                                                 \
+        case ia64_lfhint_nt1:                                          \
+                asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
+                break;                                                 \
+        case ia64_lfhint_nt2:                                          \
+                asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
+                break;                                                 \
+        case ia64_lfhint_nta:                                          \
+                asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
+                break;                                                 \
+        }                                                              \
+})
+
+#define ia64_intrin_local_irq_restore(x)                       \
+do {                                                           \
+       asm volatile (";;   cmp.ne p6,p7=%0,r0;;"               \
+                     "(p6) ssm psr.i;"                         \
+                     "(p7) rsm psr.i;;"                        \
+                     "(p6) srlz.d"                             \
+                     :: "r"((x)) : "p6", "p7", "memory");      \
+} while (0)
+
+#endif /* _ASM_IA64_GCC_INTRIN_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,55 @@
+/*
+ * Platform dependent support for HP simulator.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Vijay Chander <vijay@xxxxxxxxxxxx>
+ */
+#ifndef _IA64_PLATFORM_HPSIM_SSC_H
+#define _IA64_PLATFORM_HPSIM_SSC_H
+
+/* Simulator system calls: */
+
+#define SSC_CONSOLE_INIT               20
+#define SSC_GETCHAR                    21
+#define SSC_PUTCHAR                    31
+#define SSC_CONNECT_INTERRUPT          58
+#define SSC_GENERATE_INTERRUPT         59
+#define SSC_SET_PERIODIC_INTERRUPT     60
+#define SSC_GET_RTC                    65
+#define SSC_EXIT                       66
+#define SSC_LOAD_SYMBOLS               69
+#define SSC_GET_TOD                    74
+#define SSC_CTL_TRACE                  76
+
+#define SSC_NETDEV_PROBE               100
+#define SSC_NETDEV_SEND                        101
+#define SSC_NETDEV_RECV                        102
+#define SSC_NETDEV_ATTACH              103
+#define SSC_NETDEV_DETACH              104
+
+/*
+ * Simulator system call.
+ */
+extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
+#ifdef XEN
+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
+#define SSC_OPEN                       50
+#define SSC_CLOSE                      51
+#define SSC_READ                       52
+#define SSC_WRITE                      53
+#define SSC_GET_COMPLETION             54
+#define SSC_WAIT_COMPLETION            55
+
+#define SSC_WRITE_ACCESS               2
+#define SSC_READ_ACCESS                        1
+
+struct ssc_disk_req {
+       unsigned long addr;
+       unsigned long len;
+};
+#endif
+
+#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/ia64regs.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/ia64regs.h     Wed Aug  3 00:25:11 2005
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2002,2003 Intel Corp.
+ *      Jun Nakajima <jun.nakajima@xxxxxxxxx>
+ *      Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ */
+
+#ifndef _ASM_IA64_IA64REGS_H
+#define _ASM_IA64_IA64REGS_H
+
+/*
+ * Register Names for getreg() and setreg().
+ *
+ * The "magic" numbers happen to match the values used by the Intel compiler's
+ * getreg()/setreg() intrinsics.
+ */
+
+/* Special Registers */
+
+#define _IA64_REG_IP           1016    /* getreg only */
+#define _IA64_REG_PSR          1019
+#define _IA64_REG_PSR_L                1019
+
+/* General Integer Registers */
+
+#define _IA64_REG_GP           1025    /* R1 */
+#define _IA64_REG_R8           1032    /* R8 */
+#define _IA64_REG_R9           1033    /* R9 */
+#define _IA64_REG_SP           1036    /* R12 */
+#define _IA64_REG_TP           1037    /* R13 */
+
+/* Application Registers */
+
+#define _IA64_REG_AR_KR0       3072
+#define _IA64_REG_AR_KR1       3073
+#define _IA64_REG_AR_KR2       3074
+#define _IA64_REG_AR_KR3       3075
+#define _IA64_REG_AR_KR4       3076
+#define _IA64_REG_AR_KR5       3077
+#define _IA64_REG_AR_KR6       3078
+#define _IA64_REG_AR_KR7       3079
+#define _IA64_REG_AR_RSC       3088
+#define _IA64_REG_AR_BSP       3089
+#define _IA64_REG_AR_BSPSTORE  3090
+#define _IA64_REG_AR_RNAT      3091
+#define _IA64_REG_AR_FCR       3093
+#define _IA64_REG_AR_EFLAG     3096
+#define _IA64_REG_AR_CSD       3097
+#define _IA64_REG_AR_SSD       3098
+#define _IA64_REG_AR_CFLAG     3099
+#define _IA64_REG_AR_FSR       3100
+#define _IA64_REG_AR_FIR       3101
+#define _IA64_REG_AR_FDR       3102
+#define _IA64_REG_AR_CCV       3104
+#define _IA64_REG_AR_UNAT      3108
+#define _IA64_REG_AR_FPSR      3112
+#define _IA64_REG_AR_ITC       3116
+#define _IA64_REG_AR_PFS       3136
+#define _IA64_REG_AR_LC                3137
+#define _IA64_REG_AR_EC                3138
+
+/* Control Registers */
+
+#define _IA64_REG_CR_DCR       4096
+#define _IA64_REG_CR_ITM       4097
+#define _IA64_REG_CR_IVA       4098
+#define _IA64_REG_CR_PTA       4104
+#define _IA64_REG_CR_IPSR      4112
+#define _IA64_REG_CR_ISR       4113
+#define _IA64_REG_CR_IIP       4115
+#define _IA64_REG_CR_IFA       4116
+#define _IA64_REG_CR_ITIR      4117
+#define _IA64_REG_CR_IIPA      4118
+#define _IA64_REG_CR_IFS       4119
+#define _IA64_REG_CR_IIM       4120
+#define _IA64_REG_CR_IHA       4121
+#define _IA64_REG_CR_LID       4160
+#define _IA64_REG_CR_IVR       4161    /* getreg only */
+#define _IA64_REG_CR_TPR       4162
+#define _IA64_REG_CR_EOI       4163
+#define _IA64_REG_CR_IRR0      4164    /* getreg only */
+#define _IA64_REG_CR_IRR1      4165    /* getreg only */
+#define _IA64_REG_CR_IRR2      4166    /* getreg only */
+#define _IA64_REG_CR_IRR3      4167    /* getreg only */
+#define _IA64_REG_CR_ITV       4168
+#define _IA64_REG_CR_PMV       4169
+#define _IA64_REG_CR_CMCV      4170
+#define _IA64_REG_CR_LRR0      4176
+#define _IA64_REG_CR_LRR1      4177
+
+#ifdef  CONFIG_VTI
+#define IA64_REG_CR_DCR   0
+#define IA64_REG_CR_ITM   1
+#define IA64_REG_CR_IVA   2
+#define IA64_REG_CR_PTA   8
+#define IA64_REG_CR_IPSR  16
+#define IA64_REG_CR_ISR   17
+#define IA64_REG_CR_IIP   19
+#define IA64_REG_CR_IFA   20
+#define IA64_REG_CR_ITIR  21
+#define IA64_REG_CR_IIPA  22
+#define IA64_REG_CR_IFS   23
+#define IA64_REG_CR_IIM   24
+#define IA64_REG_CR_IHA   25
+#define IA64_REG_CR_LID   64
+#define IA64_REG_CR_IVR   65
+#define IA64_REG_CR_TPR   66
+#define IA64_REG_CR_EOI   67
+#define IA64_REG_CR_IRR0  68
+#define IA64_REG_CR_IRR1  69
+#define IA64_REG_CR_IRR2  70
+#define IA64_REG_CR_IRR3  71
+#define IA64_REG_CR_ITV   72
+#define IA64_REG_CR_PMV   73
+#define IA64_REG_CR_CMCV  74
+#define IA64_REG_CR_LRR0  80
+#define IA64_REG_CR_LRR1  81
+#endif  //  CONFIG_VTI
+
+/* Indirect Registers for getindreg() and setindreg() */
+
+#define _IA64_REG_INDR_CPUID   9000    /* getindreg only */
+#define _IA64_REG_INDR_DBR     9001
+#define _IA64_REG_INDR_IBR     9002
+#define _IA64_REG_INDR_PKR     9003
+#define _IA64_REG_INDR_PMC     9004
+#define _IA64_REG_INDR_PMD     9005
+#define _IA64_REG_INDR_RR      9006
+
+#endif /* _ASM_IA64_IA64REGS_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/io.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/io.h   Wed Aug  3 00:25:11 2005
@@ -0,0 +1,488 @@
+#ifndef _ASM_IA64_IO_H
+#define _ASM_IA64_IO_H
+
+/*
+ * This file contains the definitions for the emulated IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated to
+ * (a) handle it all in a way that makes gcc able to optimize it as
+ * well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ */
+
+/* We don't use IO slowdowns on the ia64, but.. */
+#define __SLOW_DOWN_IO do { } while (0)
+#define SLOW_DOWN_IO   do { } while (0)
+
+#ifdef XEN
+#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
+#else
+#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL    /* region 6 */
+#endif
+
+/*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 
ports, but
+ * large machines may have multiple other I/O spaces so we can't place any a 
priori limit
+ * on IO_SPACE_LIMIT.  These additional spaces are described in ACPI.
+ */
+#define IO_SPACE_LIMIT         0xffffffffffffffffUL
+
+#define MAX_IO_SPACES_BITS             4
+#define MAX_IO_SPACES                  (1UL << MAX_IO_SPACES_BITS)
+#define IO_SPACE_BITS                  24
+#define IO_SPACE_SIZE                  (1UL << IO_SPACE_BITS)
+
+#define IO_SPACE_NR(port)              ((port) >> IO_SPACE_BITS)
+#define IO_SPACE_BASE(space)           ((space) << IO_SPACE_BITS)
+#define IO_SPACE_PORT(port)            ((port) & (IO_SPACE_SIZE - 1))
+
+#define IO_SPACE_SPARSE_ENCODING(p)    ((((p) >> 2) << 12) | (p & 0xfff))
+
+struct io_space {
+       unsigned long mmio_base;        /* base in MMIO space */
+       int sparse;
+};
+
+extern struct io_space io_space[];
+extern unsigned int num_io_spaces;
+
+# ifdef __KERNEL__
+
+/*
+ * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
+ *     0xCxxxxxxxxxxxxxxx      MMIO cookie (return from ioremap)
+ *     0x000000001SPPPPPP      PIO cookie (S=space number, P..P=port)
+ *
+ * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
+ * code that uses bare port numbers without the prerequisite pci_iomap().
+ */
+#define PIO_OFFSET             (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
+#define PIO_MASK               (PIO_OFFSET - 1)
+#define PIO_RESERVED           __IA64_UNCACHED_OFFSET
+#define HAVE_ARCH_PIO_SIZE
+
+#include <asm/intrinsics.h>
+#include <asm/machvec.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm-generic/iomap.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long
+virt_to_phys (volatile void *address)
+{
+       return (unsigned long) address - PAGE_OFFSET;
+}
+
+static inline void*
+phys_to_virt (unsigned long address)
+{
+       return (void *) (address + PAGE_OFFSET);
+}
+
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c 
*/
+
+/*
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
+#define bus_to_virt    phys_to_virt
+#define virt_to_bus    virt_to_phys
+#define page_to_bus    page_to_phys
+
+# endif /* KERNEL */
+
+/*
+ * Memory fence w/accept.  This should never be used in code that is
+ * not IA-64 specific.
+ */
+#define __ia64_mf_a()  ia64_mfa()
+
+/**
+ * ___ia64_mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes.  This will make sure that writes
+ * following the barrier will arrive after all previous writes.  For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ *
+ * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ */
+static inline void ___ia64_mmiowb(void)
+{
+       ia64_mfa();
+}
+
+static inline const unsigned long
+__ia64_get_io_port_base (void)
+{
+       extern unsigned long ia64_iobase;
+
+       return ia64_iobase;
+}
+
+static inline void*
+__ia64_mk_io_addr (unsigned long port)
+{
+       struct io_space *space;
+       unsigned long offset;
+
+       space = &io_space[IO_SPACE_NR(port)];
+       port = IO_SPACE_PORT(port);
+       if (space->sparse)
+               offset = IO_SPACE_SPARSE_ENCODING(port);
+       else
+               offset = port;
+
+       return (void *) (space->mmio_base | offset);
+}
+
+#define __ia64_inb     ___ia64_inb
+#define __ia64_inw     ___ia64_inw
+#define __ia64_inl     ___ia64_inl
+#define __ia64_outb    ___ia64_outb
+#define __ia64_outw    ___ia64_outw
+#define __ia64_outl    ___ia64_outl
+#define __ia64_readb   ___ia64_readb
+#define __ia64_readw   ___ia64_readw
+#define __ia64_readl   ___ia64_readl
+#define __ia64_readq   ___ia64_readq
+#define __ia64_readb_relaxed   ___ia64_readb
+#define __ia64_readw_relaxed   ___ia64_readw
+#define __ia64_readl_relaxed   ___ia64_readl
+#define __ia64_readq_relaxed   ___ia64_readq
+#define __ia64_writeb  ___ia64_writeb
+#define __ia64_writew  ___ia64_writew
+#define __ia64_writel  ___ia64_writel
+#define __ia64_writeq  ___ia64_writeq
+#define __ia64_mmiowb  ___ia64_mmiowb
+
+/*
+ * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access 
to ensure
+ * that the access has completed before executing other I/O accesses.  Since 
we're doing
+ * the accesses through an uncachable (UC) translation, the CPU will execute 
them in
+ * program order.  However, we still need to tell the compiler not to shuffle 
them around
+ * during optimization, which is why we use "volatile" pointers.
+ */
+
+static inline unsigned int
+___ia64_inb (unsigned long port)
+{
+       volatile unsigned char *addr = __ia64_mk_io_addr(port);
+       unsigned char ret;
+
+       ret = *addr;
+       __ia64_mf_a();
+       return ret;
+}
+
+static inline unsigned int
+___ia64_inw (unsigned long port)
+{
+       volatile unsigned short *addr = __ia64_mk_io_addr(port);
+       unsigned short ret;
+
+       ret = *addr;
+       __ia64_mf_a();
+       return ret;
+}
+
+static inline unsigned int
+___ia64_inl (unsigned long port)
+{
+       volatile unsigned int *addr = __ia64_mk_io_addr(port);
+       unsigned int ret;
+
+       ret = *addr;
+       __ia64_mf_a();
+       return ret;
+}
+
+static inline void
+___ia64_outb (unsigned char val, unsigned long port)
+{
+       volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+       *addr = val;
+       __ia64_mf_a();
+}
+
+static inline void
+___ia64_outw (unsigned short val, unsigned long port)
+{
+       volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+       *addr = val;
+       __ia64_mf_a();
+}
+
+static inline void
+___ia64_outl (unsigned int val, unsigned long port)
+{
+       volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+       *addr = val;
+       __ia64_mf_a();
+}
+
+static inline void
+__insb (unsigned long port, void *dst, unsigned long count)
+{
+       unsigned char *dp = dst;
+
+       while (count--)
+               *dp++ = platform_inb(port);
+}
+
+static inline void
+__insw (unsigned long port, void *dst, unsigned long count)
+{
+       unsigned short *dp = dst;
+
+       while (count--)
+               *dp++ = platform_inw(port);
+}
+
+static inline void
+__insl (unsigned long port, void *dst, unsigned long count)
+{
+       unsigned int *dp = dst;
+
+       while (count--)
+               *dp++ = platform_inl(port);
+}
+
+static inline void
+__outsb (unsigned long port, const void *src, unsigned long count)
+{
+       const unsigned char *sp = src;
+
+       while (count--)
+               platform_outb(*sp++, port);
+}
+
+static inline void
+__outsw (unsigned long port, const void *src, unsigned long count)
+{
+       const unsigned short *sp = src;
+
+       while (count--)
+               platform_outw(*sp++, port);
+}
+
+static inline void
+__outsl (unsigned long port, const void *src, unsigned long count)
+{
+       const unsigned int *sp = src;
+
+       while (count--)
+               platform_outl(*sp++, port);
+}
+
+/*
+ * Unfortunately, some platforms are broken and do not follow the IA-64 
architecture
+ * specification regarding legacy I/O support.  Thus, we have to make these 
operations
+ * platform dependent...
+ */
+#define __inb          platform_inb
+#define __inw          platform_inw
+#define __inl          platform_inl
+#define __outb         platform_outb
+#define __outw         platform_outw
+#define __outl         platform_outl
+#define __mmiowb       platform_mmiowb
+
+#define inb(p)         __inb(p)
+#define inw(p)         __inw(p)
+#define inl(p)         __inl(p)
+#define insb(p,d,c)    __insb(p,d,c)
+#define insw(p,d,c)    __insw(p,d,c)
+#define insl(p,d,c)    __insl(p,d,c)
+#define outb(v,p)      __outb(v,p)
+#define outw(v,p)      __outw(v,p)
+#define outl(v,p)      __outl(v,p)
+#define outsb(p,s,c)   __outsb(p,s,c)
+#define outsw(p,s,c)   __outsw(p,s,c)
+#define outsl(p,s,c)   __outsl(p,s,c)
+#define mmiowb()       __mmiowb()
+
+/*
+ * The address passed to these functions are ioremap()ped already.
+ *
+ * We need these to be machine vectors since some platforms don't provide
+ * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
+ * a good idea).  Writes are ok though for all existing ia64 platforms (and
+ * hopefully it'll stay that way).
+ */
+static inline unsigned char
+___ia64_readb (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___ia64_readw (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___ia64_readl (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___ia64_readq (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned long __force *) addr;
+}
+
+static inline void
+__writeb (unsigned char val, volatile void __iomem *addr)
+{
+       *(volatile unsigned char __force *) addr = val;
+}
+
+static inline void
+__writew (unsigned short val, volatile void __iomem *addr)
+{
+       *(volatile unsigned short __force *) addr = val;
+}
+
+static inline void
+__writel (unsigned int val, volatile void __iomem *addr)
+{
+       *(volatile unsigned int __force *) addr = val;
+}
+
+static inline void
+__writeq (unsigned long val, volatile void __iomem *addr)
+{
+       *(volatile unsigned long __force *) addr = val;
+}
+
+#define __readb                platform_readb
+#define __readw                platform_readw
+#define __readl                platform_readl
+#define __readq                platform_readq
+#define __readb_relaxed        platform_readb_relaxed
+#define __readw_relaxed        platform_readw_relaxed
+#define __readl_relaxed        platform_readl_relaxed
+#define __readq_relaxed        platform_readq_relaxed
+
+#define readb(a)       __readb((a))
+#define readw(a)       __readw((a))
+#define readl(a)       __readl((a))
+#define readq(a)       __readq((a))
+#define readb_relaxed(a)       __readb_relaxed((a))
+#define readw_relaxed(a)       __readw_relaxed((a))
+#define readl_relaxed(a)       __readl_relaxed((a))
+#define readq_relaxed(a)       __readq_relaxed((a))
+#define __raw_readb    readb
+#define __raw_readw    readw
+#define __raw_readl    readl
+#define __raw_readq    readq
+#define __raw_readb_relaxed    readb_relaxed
+#define __raw_readw_relaxed    readw_relaxed
+#define __raw_readl_relaxed    readl_relaxed
+#define __raw_readq_relaxed    readq_relaxed
+#define writeb(v,a)    __writeb((v), (a))
+#define writew(v,a)    __writew((v), (a))
+#define writel(v,a)    __writel((v), (a))
+#define writeq(v,a)    __writeq((v), (a))
+#define __raw_writeb   writeb
+#define __raw_writew   writew
+#define __raw_writel   writel
+#define __raw_writeq   writeq
+
+#ifndef inb_p
+# define inb_p         inb
+#endif
+#ifndef inw_p
+# define inw_p         inw
+#endif
+#ifndef inl_p
+# define inl_p         inl
+#endif
+
+#ifndef outb_p
+# define outb_p                outb
+#endif
+#ifndef outw_p
+# define outw_p                outw
+#endif
+#ifndef outl_p
+# define outl_p                outl
+#endif
+
+/*
+ * An "address" in IO memory space is not clearly either an integer or a 
pointer. We will
+ * accept both, thus the casts.
+ *
+ * On ia-64, we access the physical I/O memory space through the uncached 
kernel region.
+ */
+static inline void __iomem *
+ioremap (unsigned long offset, unsigned long size)
+{
+       return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
+}
+
+static inline void
+iounmap (volatile void __iomem *addr)
+{
+}
+
+#define ioremap_nocache(o,s)   ioremap(o,s)
+
+# ifdef __KERNEL__
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
+extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
+extern void memset_io(volatile void __iomem *s, int c, long n);
+
+#define dma_cache_inv(_start,_size)             do { } while (0)
+#define dma_cache_wback(_start,_size)           do { } while (0)
+#define dma_cache_wback_inv(_start,_size)       do { } while (0)
+
+# endif /* __KERNEL__ */
+
+/*
+ * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing.  It 
is said that
+ * BIO-level virtual merging can give up to 4% performance boost (not verified 
for ia64).
+ * On the other hand, we know that I/O MMU bypassing gives ~8% performance 
improvement on
+ * SPECweb-like workloads on zx1-based machines.  Thus, for now we favor I/O 
MMU bypassing
+ * over BIO-level virtual merging.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#if 1
+#define BIO_VMERGE_BOUNDARY    0
+#else
+/*
+ * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here.  
Should be
+ * replaced by dma_merge_mask() or something of that sort.  Note: the only way
+ * BIO_VMERGE_BOUNDARY is used is to mask off bits.  Effectively, our 
definition gets
+ * expanded into:
+ *
+ *     addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & 
ia64_max_iommu_vmerge_mask)
+ *
+ * which is precisely what we want.
+ */
+#define BIO_VMERGE_BOUNDARY    (ia64_max_iommu_merge_mask + 1)
+#endif
+
+#endif /* _ASM_IA64_IO_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/kregs.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/kregs.h        Wed Aug  3 00:25:11 2005
@@ -0,0 +1,199 @@
+#ifndef _ASM_IA64_KREGS_H
+#define _ASM_IA64_KREGS_H
+
+/*
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+/*
+ * This file defines the kernel register usage convention used by Linux/ia64.
+ */
+
+/*
+ * Kernel registers:
+ */
+#define IA64_KR_IO_BASE                0       /* ar.k0: legacy I/O base 
address */
+#define IA64_KR_TSSD           1       /* ar.k1: IVE uses this as the TSSD */
+#define IA64_KR_PER_CPU_DATA   3       /* ar.k3: physical per-CPU base */
+#define IA64_KR_CURRENT_STACK  4       /* ar.k4: what's mapped in 
IA64_TR_CURRENT_STACK */
+#define IA64_KR_FPU_OWNER      5       /* ar.k5: fpu-owner (UP only, at the 
moment) */
+#define IA64_KR_CURRENT                6       /* ar.k6: "current" task 
pointer */
+#define IA64_KR_PT_BASE                7       /* ar.k7: page table base 
address (physical) */
+
+#define _IA64_KR_PASTE(x,y)    x##y
+#define _IA64_KR_PREFIX(n)     _IA64_KR_PASTE(ar.k, n)
+#define IA64_KR(n)             _IA64_KR_PREFIX(IA64_KR_##n)
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_KERNEL         0       /* itr0, dtr0: maps kernel image (code 
& data) */
+#define IA64_TR_PALCODE                1       /* itr1: maps PALcode as 
required by EFI */
+#ifdef CONFIG_VTI
+#define IA64_TR_XEN_IN_DOM     6       /* itr6, dtr6: Double mapping for xen 
image in domain space */
+#endif // CONFIG_VTI
+#define IA64_TR_PERCPU_DATA    1       /* dtr1: percpu data */
+#define IA64_TR_CURRENT_STACK  2       /* dtr2: maps kernel's memory- & 
register-stacks */
+#ifdef XEN
+#define IA64_TR_SHARED_INFO    3       /* dtr3: page shared with domain */
+#define        IA64_TR_VHPT            4       /* dtr4: vhpt */
+#define IA64_TR_ARCH_INFO      5
+#ifdef CONFIG_VTI
+#define IA64_TR_VHPT_IN_DOM    5       /* dtr5: Double mapping for vhpt table 
in domain space */
+#define IA64_TR_RR7_SWITCH_STUB        7       /* dtr7: mapping for rr7 switch 
stub */
+#define IA64_TEMP_PHYSICAL     8       /* itr8, dtr8: temp mapping for guest 
physical memory 256M */
+#endif // CONFIG_VTI
+#endif
+
+/* Processor status register bits: */
+#define IA64_PSR_BE_BIT                1
+#define IA64_PSR_UP_BIT                2
+#define IA64_PSR_AC_BIT                3
+#define IA64_PSR_MFL_BIT       4
+#define IA64_PSR_MFH_BIT       5
+#define IA64_PSR_IC_BIT                13
+#define IA64_PSR_I_BIT         14
+#define IA64_PSR_PK_BIT                15
+#define IA64_PSR_DT_BIT                17
+#define IA64_PSR_DFL_BIT       18
+#define IA64_PSR_DFH_BIT       19
+#define IA64_PSR_SP_BIT                20
+#define IA64_PSR_PP_BIT                21
+#define IA64_PSR_DI_BIT                22
+#define IA64_PSR_SI_BIT                23
+#define IA64_PSR_DB_BIT                24
+#define IA64_PSR_LP_BIT                25
+#define IA64_PSR_TB_BIT                26
+#define IA64_PSR_RT_BIT                27
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL0_BIT      32
+#define IA64_PSR_CPL1_BIT      33
+#define IA64_PSR_IS_BIT                34
+#define IA64_PSR_MC_BIT                35
+#define IA64_PSR_IT_BIT                36
+#define IA64_PSR_ID_BIT                37
+#define IA64_PSR_DA_BIT                38
+#define IA64_PSR_DD_BIT                39
+#define IA64_PSR_SS_BIT                40
+#define IA64_PSR_RI_BIT                41
+#define IA64_PSR_ED_BIT                43
+#define IA64_PSR_BN_BIT                44
+#define IA64_PSR_IA_BIT                45
+#ifdef CONFIG_VTI
+#define IA64_PSR_VM_BIT                46
+#endif // CONFIG_VTI
+
+/* A mask of PSR bits that we generally don't want to inherit across a 
clone2() or an
+   execve().  Only list flags here that need to be cleared/set for BOTH 
clone2() and
+   execve().  */
+#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | 
IA64_PSR_LP | \
+                                IA64_PSR_TB  | IA64_PSR_ID  | IA64_PSR_DA | 
IA64_PSR_DD | \
+                                IA64_PSR_SS  | IA64_PSR_ED  | IA64_PSR_IA)
+#define IA64_PSR_BITS_TO_SET   (IA64_PSR_DFH | IA64_PSR_SP)
+
+#define IA64_PSR_BE    (__IA64_UL(1) << IA64_PSR_BE_BIT)
+#define IA64_PSR_UP    (__IA64_UL(1) << IA64_PSR_UP_BIT)
+#define IA64_PSR_AC    (__IA64_UL(1) << IA64_PSR_AC_BIT)
+#define IA64_PSR_MFL   (__IA64_UL(1) << IA64_PSR_MFL_BIT)
+#define IA64_PSR_MFH   (__IA64_UL(1) << IA64_PSR_MFH_BIT)
+#define IA64_PSR_IC    (__IA64_UL(1) << IA64_PSR_IC_BIT)
+#define IA64_PSR_I     (__IA64_UL(1) << IA64_PSR_I_BIT)
+#define IA64_PSR_PK    (__IA64_UL(1) << IA64_PSR_PK_BIT)
+#define IA64_PSR_DT    (__IA64_UL(1) << IA64_PSR_DT_BIT)
+#define IA64_PSR_DFL   (__IA64_UL(1) << IA64_PSR_DFL_BIT)
+#define IA64_PSR_DFH   (__IA64_UL(1) << IA64_PSR_DFH_BIT)
+#define IA64_PSR_SP    (__IA64_UL(1) << IA64_PSR_SP_BIT)
+#define IA64_PSR_PP    (__IA64_UL(1) << IA64_PSR_PP_BIT)
+#define IA64_PSR_DI    (__IA64_UL(1) << IA64_PSR_DI_BIT)
+#define IA64_PSR_SI    (__IA64_UL(1) << IA64_PSR_SI_BIT)
+#define IA64_PSR_DB    (__IA64_UL(1) << IA64_PSR_DB_BIT)
+#define IA64_PSR_LP    (__IA64_UL(1) << IA64_PSR_LP_BIT)
+#define IA64_PSR_TB    (__IA64_UL(1) << IA64_PSR_TB_BIT)
+#define IA64_PSR_RT    (__IA64_UL(1) << IA64_PSR_RT_BIT)
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL   (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
+#define IA64_PSR_IS    (__IA64_UL(1) << IA64_PSR_IS_BIT)
+#define IA64_PSR_MC    (__IA64_UL(1) << IA64_PSR_MC_BIT)
+#define IA64_PSR_IT    (__IA64_UL(1) << IA64_PSR_IT_BIT)
+#define IA64_PSR_ID    (__IA64_UL(1) << IA64_PSR_ID_BIT)
+#define IA64_PSR_DA    (__IA64_UL(1) << IA64_PSR_DA_BIT)
+#define IA64_PSR_DD    (__IA64_UL(1) << IA64_PSR_DD_BIT)
+#define IA64_PSR_SS    (__IA64_UL(1) << IA64_PSR_SS_BIT)
+#define IA64_PSR_RI    (__IA64_UL(3) << IA64_PSR_RI_BIT)
+#define IA64_PSR_ED    (__IA64_UL(1) << IA64_PSR_ED_BIT)
+#define IA64_PSR_BN    (__IA64_UL(1) << IA64_PSR_BN_BIT)
+#define IA64_PSR_IA    (__IA64_UL(1) << IA64_PSR_IA_BIT)
+#ifdef CONFIG_VTI
+#define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)
+#endif // CONFIG_VTI
+
+/* User mask bits: */
+#define IA64_PSR_UM    (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL 
| IA64_PSR_MFH)
+
+/* Default Control Register */
+#define IA64_DCR_PP_BIT                 0      /* privileged performance 
monitor default */
+#define IA64_DCR_BE_BIT                 1      /* big-endian default */
+#define IA64_DCR_LC_BIT                 2      /* ia32 lock-check enable */
+#define IA64_DCR_DM_BIT                 8      /* defer TLB miss faults */
+#define IA64_DCR_DP_BIT                 9      /* defer page-not-present 
faults */
+#define IA64_DCR_DK_BIT                10      /* defer key miss faults */
+#define IA64_DCR_DX_BIT                11      /* defer key permission faults 
*/
+#define IA64_DCR_DR_BIT                12      /* defer access right faults */
+#define IA64_DCR_DA_BIT                13      /* defer access bit faults */
+#define IA64_DCR_DD_BIT                14      /* defer debug faults */
+
+#define IA64_DCR_PP    (__IA64_UL(1) << IA64_DCR_PP_BIT)
+#define IA64_DCR_BE    (__IA64_UL(1) << IA64_DCR_BE_BIT)
+#define IA64_DCR_LC    (__IA64_UL(1) << IA64_DCR_LC_BIT)
+#define IA64_DCR_DM    (__IA64_UL(1) << IA64_DCR_DM_BIT)
+#define IA64_DCR_DP    (__IA64_UL(1) << IA64_DCR_DP_BIT)
+#define IA64_DCR_DK    (__IA64_UL(1) << IA64_DCR_DK_BIT)
+#define IA64_DCR_DX    (__IA64_UL(1) << IA64_DCR_DX_BIT)
+#define IA64_DCR_DR    (__IA64_UL(1) << IA64_DCR_DR_BIT)
+#define IA64_DCR_DA    (__IA64_UL(1) << IA64_DCR_DA_BIT)
+#define IA64_DCR_DD    (__IA64_UL(1) << IA64_DCR_DD_BIT)
+
+/* Interrupt Status Register */
+#define IA64_ISR_X_BIT         32      /* execute access */
+#define IA64_ISR_W_BIT         33      /* write access */
+#define IA64_ISR_R_BIT         34      /* read access */
+#define IA64_ISR_NA_BIT                35      /* non-access */
+#define IA64_ISR_SP_BIT                36      /* speculative load exception */
+#define IA64_ISR_RS_BIT                37      /* mandatory register-stack 
exception */
+#define IA64_ISR_IR_BIT                38      /* invalid register frame 
exception */
+#define IA64_ISR_CODE_MASK     0xf
+
+#define IA64_ISR_X     (__IA64_UL(1) << IA64_ISR_X_BIT)
+#define IA64_ISR_W     (__IA64_UL(1) << IA64_ISR_W_BIT)
+#define IA64_ISR_R     (__IA64_UL(1) << IA64_ISR_R_BIT)
+#define IA64_ISR_NA    (__IA64_UL(1) << IA64_ISR_NA_BIT)
+#define IA64_ISR_SP    (__IA64_UL(1) << IA64_ISR_SP_BIT)
+#define IA64_ISR_RS    (__IA64_UL(1) << IA64_ISR_RS_BIT)
+#define IA64_ISR_IR    (__IA64_UL(1) << IA64_ISR_IR_BIT)
+
+/* ISR code field for non-access instructions */
+#define IA64_ISR_CODE_TPA      0
+#define IA64_ISR_CODE_FC       1
+#define IA64_ISR_CODE_PROBE    2
+#define IA64_ISR_CODE_TAK      3
+#define IA64_ISR_CODE_LFETCH   4
+#define IA64_ISR_CODE_PROBEF   5
+
+#ifdef XEN
+/* Interruption Function State */
+#define IA64_IFS_V_BIT         63
+#define IA64_IFS_V     (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+/* Page Table Address */
+#define IA64_PTA_VE_BIT 0
+#define IA64_PTA_SIZE_BIT 2
+#define IA64_PTA_VF_BIT 8
+#define IA64_PTA_BASE_BIT 15
+
+#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
+#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
+#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
+#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
+#endif
+
+#endif /* _ASM_IA64_kREGS_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/mca_asm.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h      Wed Aug  3 00:25:11 2005
@@ -0,0 +1,323 @@
+/*
+ * File:       mca_asm.h
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander (vijay@xxxxxxxxxxxx)
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@xxxxxxxxxxxx>
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@xxxxxxxxx>
+ */
+#ifndef _ASM_IA64_MCA_ASM_H
+#define _ASM_IA64_MCA_ASM_H
+
+#define PSR_IC         13
+#define PSR_I          14
+#define        PSR_DT          17
+#define PSR_RT         27
+#define PSR_MC         35
+#define PSR_IT         36
+#define PSR_BN         44
+
+/*
+ * This macro converts a instruction virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *     1. Lop off bits 61 thru 63 in the virtual address
+ */
+#ifdef XEN
+#define INST_VA_TO_PA(addr)                                                    
\
+       dep     addr    = 0, addr, 60, 4
+#else // XEN
+#define INST_VA_TO_PA(addr)                                                    
\
+       dep     addr    = 0, addr, 61, 3
+#endif // XEN
+/*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *     1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define DATA_VA_TO_PA(addr)                                                    
\
+       tpa     addr    = addr
+/*
+ * This macro converts a data physical address to a virtual address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ *     1. Put 0x7 in bits 61 thru 63.
+ */
+#ifdef XEN
+#define DATA_PA_TO_VA(addr,temp)                                               
        \
+       mov     temp    = 0xf   ;;                                              
        \
+       dep     addr    = temp, addr, 60, 4
+#else // XEN
+#define DATA_PA_TO_VA(addr,temp)                                               
        \
+       mov     temp    = 0x7   ;;                                              
        \
+       dep     addr    = temp, addr, 61, 3
+#endif // XEN
+
+#define GET_THIS_PADDR(reg, var)               \
+       mov     reg = IA64_KR(PER_CPU_DATA);;   \
+        addl   reg = THIS_CPU(var), reg
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in physical mode with all the address
+ * translations turned off.
+ *     1.      Save the current psr
+ *     2.      Make sure that all the upper 32 bits are off
+ *
+ *     3.      Clear the interrupt enable and interrupt state collection bits
+ *             in the psr before updating the ipsr and iip.
+ *
+ *     4.      Turn off the instruction, data and rse translation bits of the 
psr
+ *             and store the new value into ipsr
+ *             Also make sure that the interrupts are disabled.
+ *             Ensure that we are in little endian mode.
+ *             [psr.{rt, it, dt, i, be} = 0]
+ *
+ *     5.      Get the physical address corresponding to the virtual address
+ *             of the next instruction bundle and put it in iip.
+ *             (Using magic numbers 24 and 40 in the deposint instruction since
+ *              the IA64_SDK code directly maps to lower 24bits as physical 
address
+ *              from a virtual address).
+ *
+ *     6.      Do an rfi to move the values from ipsr to psr and iip to ip.
+ */
+#define  PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)                
                \
+       mov     old_psr = psr;                                                  
        \
+       ;;                                                                      
        \
+       dep     old_psr = 0, old_psr, 32, 32;                                   
        \
+                                                                               
        \
+       mov     ar.rsc = 0 ;                                                    
        \
+       ;;                                                                      
        \
+       srlz.d;                                                                 
        \
+       mov     temp2 = ar.bspstore;                                            
        \
+       ;;                                                                      
        \
+       DATA_VA_TO_PA(temp2);                                                   
        \
+       ;;                                                                      
        \
+       mov     temp1 = ar.rnat;                                                
        \
+       ;;                                                                      
        \
+       mov     ar.bspstore = temp2;                                            
        \
+       ;;                                                                      
        \
+       mov     ar.rnat = temp1;                                                
        \
+       mov     temp1 = psr;                                                    
        \
+       mov     temp2 = psr;                                                    
        \
+       ;;                                                                      
        \
+                                                                               
        \
+       dep     temp2 = 0, temp2, PSR_IC, 2;                                    
        \
+       ;;                                                                      
        \
+       mov     psr.l = temp2;                                                  
        \
+       ;;                                                                      
        \
+       srlz.d;                                                                 
        \
+       dep     temp1 = 0, temp1, 32, 32;                                       
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_IT, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_DT, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_RT, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_I, 1;                                     
        \
+       ;;                                                                      
        \
+       dep     temp1 = 0, temp1, PSR_IC, 1;                                    
        \
+       ;;                                                                      
        \
+       dep     temp1 = -1, temp1, PSR_MC, 1;                                   
        \
+       ;;                                                                      
        \
+       mov     cr.ipsr = temp1;                                                
        \
+       ;;                                                                      
        \
+       LOAD_PHYSICAL(p0, temp2, start_addr);                                   
        \
+       ;;                                                                      
        \
+       mov     cr.iip = temp2;                                                 
        \
+       mov     cr.ifs = r0;                                                    
        \
+       DATA_VA_TO_PA(sp);                                                      
        \
+       DATA_VA_TO_PA(gp);                                                      
        \
+       ;;                                                                      
        \
+       srlz.i;                                                                 
        \
+       ;;                                                                      
        \
+       nop     1;                                                              
        \
+       nop     2;                                                              
        \
+       nop     1;                                                              
        \
+       nop     2;                                                              
        \
+       rfi;                                                                    
        \
+       ;;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in virtual mode with all the address
+ * translations turned on.
+ *     1.      Get the old saved psr
+ *
+ *     2.      Clear the interrupt state collection bit in the current psr.
+ *
+ *     3.      Set the instruction translation bit back in the old psr
+ *             Note we have to do this since we are right now saving only the
+ *             lower 32-bits of old psr.(Also the old psr has the data and
+ *             rse translation bits on)
+ *
+ *     4.      Set ipsr to this old_psr with "it" bit set and "bn" = 1.
+ *
+ *     5.      Reset the current thread pointer (r13).
+ *
+ *     6.      Set iip to the virtual address of the next instruction bundle.
+ *
+ *     7.      Do an rfi to move ipsr to psr and iip to ip.
+ */
+
+#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)  \
+       mov     temp2 = psr;                                    \
+       ;;                                                      \
+       mov     old_psr = temp2;                                \
+       ;;                                                      \
+       dep     temp2 = 0, temp2, PSR_IC, 2;                    \
+       ;;                                                      \
+       mov     psr.l = temp2;                                  \
+       mov     ar.rsc = 0;                                     \
+       ;;                                                      \
+       srlz.d;                                                 \
+       mov     r13 = ar.k6;                                    \
+       mov     temp2 = ar.bspstore;                            \
+       ;;                                                      \
+       DATA_PA_TO_VA(temp2,temp1);                             \
+       ;;                                                      \
+       mov     temp1 = ar.rnat;                                \
+       ;;                                                      \
+       mov     ar.bspstore = temp2;                            \
+       ;;                                                      \
+       mov     ar.rnat = temp1;                                \
+       ;;                                                      \
+       mov     temp1 = old_psr;                                \
+       ;;                                                      \
+       mov     temp2 = 1;                                      \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_IC, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_IT, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_DT, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_RT, 1;                \
+       ;;                                                      \
+       dep     temp1 = temp2, temp1, PSR_BN, 1;                \
+       ;;                                                      \
+                                                               \
+       mov     cr.ipsr = temp1;                                \
+       movl    temp2 = start_addr;                             \
+       ;;                                                      \
+       mov     cr.iip = temp2;                                 \
+       ;;                                                      \
+       DATA_PA_TO_VA(sp, temp1);                               \
+       DATA_PA_TO_VA(gp, temp2);                               \
+       srlz.i;                                                 \
+       ;;                                                      \
+       nop     1;                                              \
+       nop     2;                                              \
+       nop     1;                                              \
+       rfi                                                     \
+       ;;
+
+/*
+ * The following offsets capture the order in which the
+ * RSE related registers from the old context are
+ * saved onto the new stack frame.
+ *
+ *     +-----------------------+
+ *     |NDIRTY [BSP - BSPSTORE]|
+ *     +-----------------------+
+ *     |       RNAT            |
+ *     +-----------------------+
+ *     |       BSPSTORE        |
+ *     +-----------------------+
+ *     |       IFS             |
+ *     +-----------------------+
+ *     |       PFS             |
+ *     +-----------------------+
+ *     |       RSC             |
+ *     +-----------------------+ <-------- Bottom of new stack frame
+ */
+#define  rse_rsc_offset                0
+#define  rse_pfs_offset                (rse_rsc_offset+0x08)
+#define  rse_ifs_offset                (rse_pfs_offset+0x08)
+#define  rse_bspstore_offset   (rse_ifs_offset+0x08)
+#define  rse_rnat_offset       (rse_bspstore_offset+0x08)
+#define  rse_ndirty_offset     (rse_rnat_offset+0x08)
+
+/*
+ * rse_switch_context
+ *
+ *     1. Save old RSC onto the new stack frame
+ *     2. Save PFS onto new stack frame
+ *     3. Cover the old frame and start a new frame.
+ *     4. Save IFS onto new stack frame
+ *     5. Save the old BSPSTORE on the new stack frame
+ *     6. Save the old RNAT on the new stack frame
+ *     7. Write BSPSTORE with the new backing store pointer
+ *     8. Read and save the new BSP to calculate the #dirty registers
+ * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
+ */
+#define rse_switch_context(temp,p_stackframe,p_bspstore)                       
\
+       ;;                                                                      
\
+       mov     temp=ar.rsc;;                                                   
\
+       st8     [p_stackframe]=temp,8;;                                 \
+       mov     temp=ar.pfs;;                                                   
\
+       st8     [p_stackframe]=temp,8;                                          
\
+       cover ;;                                                                
\
+       mov     temp=cr.ifs;;                                                   
\
+       st8     [p_stackframe]=temp,8;;                                         
\
+       mov     temp=ar.bspstore;;                                              
\
+       st8     [p_stackframe]=temp,8;;                                 \
+       mov     temp=ar.rnat;;                                                  
\
+       st8     [p_stackframe]=temp,8;                                          
\
+       mov     ar.bspstore=p_bspstore;;                                        
\
+       mov     temp=ar.bsp;;                                                   
\
+       sub     temp=temp,p_bspstore;;                                          
\
+       st8     [p_stackframe]=temp,8;;
+
+/*
+ * rse_return_context
+ *     1. Allocate a zero-sized frame
+ *     2. Store the number of dirty registers RSC.loadrs field
+ *     3. Issue a loadrs to insure that any registers from the interrupted
+ *        context which were saved on the new stack frame have been loaded
+ *        back into the stacked registers
+ *     4. Restore BSPSTORE
+ *     5. Restore RNAT
+ *     6. Restore PFS
+ *     7. Restore IFS
+ *     8. Restore RSC
+ *     9. Issue an RFI
+ */
+#define rse_return_context(psr_mask_reg,temp,p_stackframe)                     
\
+       ;;                                                                      
\
+       alloc   temp=ar.pfs,0,0,0,0;                                            
\
+       add     p_stackframe=rse_ndirty_offset,p_stackframe;;                   
\
+       ld8     temp=[p_stackframe];;                                           
\
+       shl     temp=temp,16;;                                                  
\
+       mov     ar.rsc=temp;;                                                   
\
+       loadrs;;                                                                
\
+       add     
p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.bspstore=temp;;                                              
\
+       add     
p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.rnat=temp;;                                                  
\
+       add     p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;;     
\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.pfs=temp;;                                                   
\
+       add     p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;;      
\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     cr.ifs=temp;;                                                   
\
+       add     p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;;      
\
+       ld8     temp=[p_stackframe];;                                           
\
+       mov     ar.rsc=temp ;                                                   
\
+       mov     temp=psr;;                                                      
\
+       or      temp=temp,psr_mask_reg;;                                        
\
+       mov     cr.ipsr=temp;;                                                  
\
+       mov     temp=ip;;                                                       
\
+       add     temp=0x30,temp;;                                                
\
+       mov     cr.iip=temp;;                                                   
\
+       srlz.i;;                                                                
\
+       rfi;;
+
+#endif /* _ASM_IA64_MCA_ASM_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/page.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Wed Aug  3 00:25:11 2005
@@ -0,0 +1,238 @@
+#ifndef _ASM_IA64_PAGE_H
+#define _ASM_IA64_PAGE_H
+/*
+ * Pagetable related stuff.
+ *
+ * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/intrinsics.h>
+#include <asm/types.h>
+
+/*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define PAGE_SHIFT    12
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define PAGE_SHIFT    13
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define PAGE_SHIFT    14
+#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
+# define PAGE_SHIFT    16
+#else
+# error Unsupported page size!
+#endif
+
+#define PAGE_SIZE              (__IA64_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK              (~(PAGE_SIZE - 1))
+#define PAGE_ALIGN(addr)       (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#define PERCPU_PAGE_SHIFT      16      /* log2() of max. size of per-CPU area 
*/
+
+#define PERCPU_PAGE_SIZE       (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
+
+#define RGN_MAP_LIMIT  ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)      /* per 
region addr limit */
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define REGION_HPAGE          (4UL)   /* note: this is hardcoded in 
reload_context()!*/
+# define REGION_SHIFT          61
+# define HPAGE_REGION_BASE     (REGION_HPAGE << REGION_SHIFT)
+# define HPAGE_SHIFT           hpage_shift
+# define HPAGE_SHIFT_DEFAULT   28      /* check ia64 SDM for architecture 
supported size */
+# define HPAGE_SIZE            (__IA64_UL_CONST(1) << HPAGE_SHIFT)
+# define HPAGE_MASK            (~(HPAGE_SIZE - 1))
+
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef __ASSEMBLY__
+# define __pa(x)               ((x) - PAGE_OFFSET)
+# define __va(x)               ((x) + PAGE_OFFSET)
+#else /* !__ASSEMBLY */
+# ifdef __KERNEL__
+#  define STRICT_MM_TYPECHECKS
+
+extern void clear_page (void *page);
+extern void copy_page (void *to, void *from);
+
+/*
+ * clear_user_page() and copy_user_page() can't be inline functions because
+ * flush_dcache_page() can't be defined until later...
+ */
+#define clear_user_page(addr, vaddr, page)     \
+do {                                           \
+       clear_page(addr);                       \
+       flush_dcache_page(page);                \
+} while (0)
+
+#define copy_user_page(to, from, vaddr, page)  \
+do {                                           \
+       copy_page((to), (from));                \
+       flush_dcache_page(page);                \
+} while (0)
+
+
+#define alloc_zeroed_user_highpage(vma, vaddr) \
+({                                             \
+       struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, 
vaddr); \
+       if (page)                               \
+               flush_dcache_page(page);        \
+       page;                                   \
+})
+
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern int ia64_pfn_valid (unsigned long pfn);
+#else
+# define ia64_pfn_valid(pfn) 1
+#endif
+
+#ifndef CONFIG_DISCONTIGMEM
+#ifdef XEN
+# define pfn_valid(pfn)                (0)
+# define page_to_pfn(_page)    ((unsigned long)((_page) - frame_table))
+# define pfn_to_page(_pfn)     (frame_table + (_pfn))
+#else
+# define pfn_valid(pfn)                (((pfn) < max_mapnr) && 
ia64_pfn_valid(pfn))
+# define page_to_pfn(page)     ((unsigned long) (page - mem_map))
+# define pfn_to_page(pfn)      (mem_map + (pfn))
+#endif
+#else
+extern struct page *vmem_map;
+extern unsigned long max_low_pfn;
+# define pfn_valid(pfn)                (((pfn) < max_low_pfn) && 
ia64_pfn_valid(pfn))
+# define page_to_pfn(page)     ((unsigned long) (page - vmem_map))
+# define pfn_to_page(pfn)      (vmem_map + (pfn))
+#endif
+
+#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef XEN
+#define page_to_virt(_page)    phys_to_virt(page_to_phys(_page))
+#define phys_to_page(kaddr)    pfn_to_page(((kaddr) >> PAGE_SHIFT))
+#endif
+
+typedef union ia64_va {
+       struct {
+               unsigned long off : 61;         /* intra-region offset */
+               unsigned long reg :  3;         /* region number */
+       } f;
+       unsigned long l;
+       void *p;
+} ia64_va;
+
+/*
+ * Note: These macros depend on the fact that PAGE_OFFSET has all
+ * region bits set to 1 and all other bits set to zero.  They are
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
+#ifdef XEN
+typedef union xen_va {
+       struct {
+               unsigned long off : 60;
+               unsigned long reg : 4;
+       } f;
+       unsigned long l;
+       void *p;
+} xen_va;
+
+// xen/drivers/console.c uses __va in a declaration (should be fixed!)
+#define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
+#define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
+#else
+#define __pa(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
+#define __va(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
+#endif
+
+#define REGION_NUMBER(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+#define REGION_OFFSET(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+
+#define REGION_SIZE            REGION_NUMBER(1)
+#define REGION_KERNEL          7
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define htlbpage_to_page(x)   (((unsigned long) REGION_NUMBER(x) << 61)       
                \
+                                | (REGION_OFFSET(x) >> 
(HPAGE_SHIFT-PAGE_SHIFT)))
+# define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT - PAGE_SHIFT)
+# define is_hugepage_only_range(addr, len)             \
+        (REGION_NUMBER(addr) == REGION_HPAGE &&        \
+         REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
+extern unsigned int hpage_shift;
+#endif
+
+static __inline__ int
+get_order (unsigned long size)
+{
+       long double d = size - 1;
+       long order;
+
+       order = ia64_getf_exp(d);
+       order = order - PAGE_SHIFT - 0xffff + 1;
+       if (order < 0)
+               order = 0;
+       return order;
+}
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#ifdef STRICT_MM_TYPECHECKS
+  /*
+   * These are used to make use of C type-checking..
+   */
+  typedef struct { unsigned long pte; } pte_t;
+  typedef struct { unsigned long pmd; } pmd_t;
+  typedef struct { unsigned long pgd; } pgd_t;
+  typedef struct { unsigned long pgprot; } pgprot_t;
+
+# define pte_val(x)    ((x).pte)
+# define pmd_val(x)    ((x).pmd)
+# define pgd_val(x)    ((x).pgd)
+# define pgprot_val(x) ((x).pgprot)
+
+# define __pte(x)      ((pte_t) { (x) } )
+# define __pgprot(x)   ((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+  /*
+   * .. while these make it easier on the compiler
+   */
+# ifndef __ASSEMBLY__
+    typedef unsigned long pte_t;
+    typedef unsigned long pmd_t;
+    typedef unsigned long pgd_t;
+    typedef unsigned long pgprot_t;
+# endif
+
+# define pte_val(x)    (x)
+# define pmd_val(x)    (x)
+# define pgd_val(x)    (x)
+# define pgprot_val(x) (x)
+
+# define __pte(x)      (x)
+# define __pgd(x)      (x)
+# define __pgprot(x)   (x)
+#endif /* !STRICT_MM_TYPECHECKS */
+
+#ifdef XEN
+#define PAGE_OFFSET                    __IA64_UL_CONST(0xf000000000000000)
+#else
+#define PAGE_OFFSET                    __IA64_UL_CONST(0xe000000000000000)
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS          (VM_READ | VM_WRITE |                   
                \
+                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC 
|                \
+                                        (((current->personality & 
READ_IMPLIES_EXEC) != 0)     \
+                                         ? VM_EXEC : 0))
+
+#endif /* _ASM_IA64_PAGE_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/pal.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/pal.h  Wed Aug  3 00:25:11 2005
@@ -0,0 +1,1567 @@
+#ifndef _ASM_IA64_PAL_H
+#define _ASM_IA64_PAL_H
+
+/*
+ * Processor Abstraction Layer definitions.
+ *
+ * This is based on Intel IA-64 Architecture Software Developer's Manual rev 
1.0
+ * chapter 11 IA-64 Processor Abstraction Layer
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar 
<sprasad@xxxxxxxxxxxxxxxxxxxx>
+ *
+ * 99/10/01    davidm  Make sure we pass zero for reserved parameters.
+ * 00/03/07    davidm  Updated pal_cache_flush() to be in sync with PAL v2.6.
+ * 00/03/23     cfleck  Modified processor min-state save area to match 
updated PAL & SAL info
+ * 00/05/24     eranian Updated to latest PAL spec, fix structures bugs, added
+ * 00/05/25    eranian Support for stack calls, and static physical calls
+ * 00/06/18    eranian Support for stacked physical calls
+ */
+
+/*
+ * Note that some of these calls use a static-register only calling
+ * convention which has nothing to do with the regular calling
+ * convention.
+ */
+#define PAL_CACHE_FLUSH                1       /* flush i/d cache */
+#define PAL_CACHE_INFO         2       /* get detailed i/d cache info */
+#define PAL_CACHE_INIT         3       /* initialize i/d cache */
+#define PAL_CACHE_SUMMARY      4       /* get summary of cache heirarchy */
+#define PAL_MEM_ATTRIB         5       /* list supported memory attributes */
+#define PAL_PTCE_INFO          6       /* purge TLB info */
+#define PAL_VM_INFO            7       /* return supported virtual memory 
features */
+#define PAL_VM_SUMMARY         8       /* return summary on supported vm 
features */
+#define PAL_BUS_GET_FEATURES   9       /* return processor bus interface 
features settings */
+#define PAL_BUS_SET_FEATURES   10      /* set processor bus features */
+#define PAL_DEBUG_INFO         11      /* get number of debug registers */
+#define PAL_FIXED_ADDR         12      /* get fixed component of processors's 
directed address */
+#define PAL_FREQ_BASE          13      /* base frequency of the platform */
+#define PAL_FREQ_RATIOS                14      /* ratio of processor, bus and 
ITC frequency */
+#define PAL_PERF_MON_INFO      15      /* return performance monitor info */
+#define PAL_PLATFORM_ADDR      16      /* set processor interrupt block and IO 
port space addr */
+#define PAL_PROC_GET_FEATURES  17      /* get configurable processor features 
& settings */
+#define PAL_PROC_SET_FEATURES  18      /* enable/disable configurable 
processor features */
+#define PAL_RSE_INFO           19      /* return rse information */
+#define PAL_VERSION            20      /* return version of PAL code */
+#define PAL_MC_CLEAR_LOG       21      /* clear all processor log info */
+#define PAL_MC_DRAIN           22      /* drain operations which could result 
in an MCA */
+#define PAL_MC_EXPECTED                23      /* set/reset expected MCA 
indicator */
+#define PAL_MC_DYNAMIC_STATE   24      /* get processor dynamic state */
+#define PAL_MC_ERROR_INFO      25      /* get processor MCA info and static 
state */
+#define PAL_MC_RESUME          26      /* Return to interrupted process */
+#define PAL_MC_REGISTER_MEM    27      /* Register memory for PAL to use 
during MCAs and inits */
+#define PAL_HALT               28      /* enter the low power HALT state */
+#define PAL_HALT_LIGHT         29      /* enter the low power light halt 
state*/
+#define PAL_COPY_INFO          30      /* returns info needed to relocate PAL 
*/
+#define PAL_CACHE_LINE_INIT    31      /* init tags & data of cache line */
+#define PAL_PMI_ENTRYPOINT     32      /* register PMI memory entry points 
with the processor */
+#define PAL_ENTER_IA_32_ENV    33      /* enter IA-32 system environment */
+#define PAL_VM_PAGE_SIZE       34      /* return vm TC and page walker page 
sizes */
+
+#define PAL_MEM_FOR_TEST       37      /* get amount of memory needed for late 
processor test */
+#define PAL_CACHE_PROT_INFO    38      /* get i/d cache protection info */
+#define PAL_REGISTER_INFO      39      /* return AR and CR register 
information*/
+#define PAL_SHUTDOWN           40      /* enter processor shutdown state */
+#define PAL_PREFETCH_VISIBILITY        41      /* Make Processor Prefetches 
Visible */
+
+#define PAL_COPY_PAL           256     /* relocate PAL procedures and PAL PMI 
*/
+#define PAL_HALT_INFO          257     /* return the low power capabilities of 
processor */
+#define PAL_TEST_PROC          258     /* perform late processor self-test */
+#define PAL_CACHE_READ         259     /* read tag & data of cacheline for 
diagnostic testing */
+#define PAL_CACHE_WRITE                260     /* write tag & data of 
cacheline for diagnostic testing */
+#define PAL_VM_TR_READ         261     /* read contents of translation 
register */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+
+/*
+ * Data types needed to pass information into PAL procedures and
+ * interpret information returned by them.
+ */
+
+/* Return status from the PAL procedure */
+typedef s64                            pal_status_t;
+
+#define PAL_STATUS_SUCCESS             0       /* No error */
+#define PAL_STATUS_UNIMPLEMENTED       (-1)    /* Unimplemented procedure */
+#define PAL_STATUS_EINVAL              (-2)    /* Invalid argument */
+#define PAL_STATUS_ERROR               (-3)    /* Error */
+#define PAL_STATUS_CACHE_INIT_FAIL     (-4)    /* Could not initialize the
+                                                * specified level and type of
+                                                * cache without sideeffects
+                                                * and "restrict" was 1
+                                                */
+
+/* Processor cache level in the heirarchy */
+typedef u64                            pal_cache_level_t;
+#define PAL_CACHE_LEVEL_L0             0       /* L0 */
+#define PAL_CACHE_LEVEL_L1             1       /* L1 */
+#define PAL_CACHE_LEVEL_L2             2       /* L2 */
+
+
+/* Processor cache type at a particular level in the heirarchy */
+
+typedef u64                            pal_cache_type_t;
+#define PAL_CACHE_TYPE_INSTRUCTION     1       /* Instruction cache */
+#define PAL_CACHE_TYPE_DATA            2       /* Data or unified cache */
+#define PAL_CACHE_TYPE_INSTRUCTION_DATA        3       /* Both Data & 
Instruction */
+
+
+#define PAL_CACHE_FLUSH_INVALIDATE     1       /* Invalidate clean lines */
+#define PAL_CACHE_FLUSH_CHK_INTRS      2       /* check for interrupts/mc 
while flushing */
+
+/* Processor cache line size in bytes  */
+typedef int                            pal_cache_line_size_t;
+
+/* Processor cache line state */
+typedef u64                            pal_cache_line_state_t;
+#define PAL_CACHE_LINE_STATE_INVALID   0       /* Invalid */
+#define PAL_CACHE_LINE_STATE_SHARED    1       /* Shared */
+#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2       /* Exclusive */
+#define PAL_CACHE_LINE_STATE_MODIFIED  3       /* Modified */
+
+typedef struct pal_freq_ratio {
+       u64 den : 32, num : 32; /* numerator & denominator */
+} itc_ratio, proc_ratio;
+
+typedef        union  pal_cache_config_info_1_s {
+       struct {
+               u64             u               : 1,    /* 0 Unified cache ? */
+                               at              : 2,    /* 2-1 Cache mem attr*/
+                               reserved        : 5,    /* 7-3 Reserved */
+                               associativity   : 8,    /* 16-8 Associativity*/
+                               line_size       : 8,    /* 23-17 Line size */
+                               stride          : 8,    /* 31-24 Stride */
+                               store_latency   : 8,    /*39-32 Store latency*/
+                               load_latency    : 8,    /* 47-40 Load latency*/
+                               store_hints     : 8,    /* 55-48 Store hints*/
+                               load_hints      : 8;    /* 63-56 Load hints */
+       } pcci1_bits;
+       u64                     pcci1_data;
+} pal_cache_config_info_1_t;
+
+typedef        union  pal_cache_config_info_2_s {
+       struct {
+               u64             cache_size      : 32,   /*cache size in bytes*/
+
+
+                               alias_boundary  : 8,    /* 39-32 aliased addr
+                                                        * separation for max
+                                                        * performance.
+                                                        */
+                               tag_ls_bit      : 8,    /* 47-40 LSb of addr*/
+                               tag_ms_bit      : 8,    /* 55-48 MSb of addr*/
+                               reserved        : 8;    /* 63-56 Reserved */
+       } pcci2_bits;
+       u64                     pcci2_data;
+} pal_cache_config_info_2_t;
+
+
+typedef struct pal_cache_config_info_s {
+       pal_status_t                    pcci_status;
+       pal_cache_config_info_1_t       pcci_info_1;
+       pal_cache_config_info_2_t       pcci_info_2;
+       u64                             pcci_reserved;
+} pal_cache_config_info_t;
+
+#define pcci_ld_hints          pcci_info_1.pcci1_bits.load_hints
+#define pcci_st_hints          pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_latency                pcci_info_1.pcci1_bits.load_latency
+#define pcci_st_latency                pcci_info_1.pcci1_bits.store_latency
+#define pcci_stride            pcci_info_1.pcci1_bits.stride
+#define pcci_line_size         pcci_info_1.pcci1_bits.line_size
+#define pcci_assoc             pcci_info_1.pcci1_bits.associativity
+#define pcci_cache_attr                pcci_info_1.pcci1_bits.at
+#define pcci_unified           pcci_info_1.pcci1_bits.u
+#define pcci_tag_msb           pcci_info_2.pcci2_bits.tag_ms_bit
+#define pcci_tag_lsb           pcci_info_2.pcci2_bits.tag_ls_bit
+#define pcci_alias_boundary    pcci_info_2.pcci2_bits.alias_boundary
+#define pcci_cache_size                pcci_info_2.pcci2_bits.cache_size
+
+
+
+/* Possible values for cache attributes */
+
+#define PAL_CACHE_ATTR_WT              0       /* Write through cache */
+#define PAL_CACHE_ATTR_WB              1       /* Write back cache */
+#define PAL_CACHE_ATTR_WT_OR_WB                2       /* Either write thru or 
write
+                                                * back depending on TLB
+                                                * memory attributes
+                                                */
+
+
+/* Possible values for cache hints */
+
+#define PAL_CACHE_HINT_TEMP_1          0       /* Temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_1         1       /* Non-temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_ALL       3       /* Non-temporal all levels */
+
+/* Processor cache protection  information */
+typedef union pal_cache_protection_element_u {
+       u32                     pcpi_data;
+       struct {
+               u32             data_bits       : 8, /* # data bits covered by
+                                                     * each unit of protection
+                                                     */
+
+                               tagprot_lsb     : 6, /* Least -do- */
+                               tagprot_msb     : 6, /* Most Sig. tag address
+                                                     * bit that this
+                                                     * protection covers.
+                                                     */
+                               prot_bits       : 6, /* # of protection bits */
+                               method          : 4, /* Protection method */
+                               t_d             : 2; /* Indicates which part
+                                                     * of the cache this
+                                                     * protection encoding
+                                                     * applies.
+                                                     */
+       } pcp_info;
+} pal_cache_protection_element_t;
+
+#define pcpi_cache_prot_part   pcp_info.t_d
+#define pcpi_prot_method       pcp_info.method
+#define pcpi_prot_bits         pcp_info.prot_bits
+#define pcpi_tagprot_msb       pcp_info.tagprot_msb
+#define pcpi_tagprot_lsb       pcp_info.tagprot_lsb
+#define pcpi_data_bits         pcp_info.data_bits
+
+/* Processor cache part encodings */
+#define PAL_CACHE_PROT_PART_DATA       0       /* Data protection  */
+#define PAL_CACHE_PROT_PART_TAG                1       /* Tag  protection */
+#define PAL_CACHE_PROT_PART_TAG_DATA   2       /* Tag+data protection (tag is
+                                                * more significant )
+                                                */
+#define PAL_CACHE_PROT_PART_DATA_TAG   3       /* Data+tag protection (data is
+                                                * more significant )
+                                                */
+#define PAL_CACHE_PROT_PART_MAX                6
+
+
+typedef struct pal_cache_protection_info_s {
+       pal_status_t                    pcpi_status;
+       pal_cache_protection_element_t  pcp_info[PAL_CACHE_PROT_PART_MAX];
+} pal_cache_protection_info_t;
+
+
+/* Processor cache protection method encodings */
+#define PAL_CACHE_PROT_METHOD_NONE             0       /* No protection */
+#define PAL_CACHE_PROT_METHOD_ODD_PARITY       1       /* Odd parity */
+#define PAL_CACHE_PROT_METHOD_EVEN_PARITY      2       /* Even parity */
+#define PAL_CACHE_PROT_METHOD_ECC              3       /* ECC protection */
+
+
+/* Processor cache line identification in the heirarchy */
+typedef union pal_cache_line_id_u {
+       u64                     pclid_data;
+       struct {
+               u64             cache_type      : 8,    /* 7-0 cache type */
+                               level           : 8,    /* 15-8 level of the
+                                                        * cache in the
+                                                        * heirarchy.
+                                                        */
+                               way             : 8,    /* 23-16 way in the set
+                                                        */
+                               part            : 8,    /* 31-24 part of the
+                                                        * cache
+                                                        */
+                               reserved        : 32;   /* 63-32 is reserved*/
+       } pclid_info_read;
+       struct {
+               u64             cache_type      : 8,    /* 7-0 cache type */
+                               level           : 8,    /* 15-8 level of the
+                                                        * cache in the
+                                                        * heirarchy.
+                                                        */
+                               way             : 8,    /* 23-16 way in the set
+                                                        */
+                               part            : 8,    /* 31-24 part of the
+                                                        * cache
+                                                        */
+                               mesi            : 8,    /* 39-32 cache line
+                                                        * state
+                                                        */
+                               start           : 8,    /* 47-40 lsb of data to
+                                                        * invert
+                                                        */
+                               length          : 8,    /* 55-48 #bits to
+                                                        * invert
+                                                        */
+                               trigger         : 8;    /* 63-56 Trigger error
+                                                        * by doing a load
+                                                        * after the write
+                                                        */
+
+       } pclid_info_write;
+} pal_cache_line_id_u_t;
+
+#define pclid_read_part                pclid_info_read.part
+#define pclid_read_way         pclid_info_read.way
+#define pclid_read_level       pclid_info_read.level
+#define pclid_read_cache_type  pclid_info_read.cache_type
+
+#define pclid_write_trigger    pclid_info_write.trigger
+#define pclid_write_length     pclid_info_write.length
+#define pclid_write_start      pclid_info_write.start
+#define pclid_write_mesi       pclid_info_write.mesi
+#define pclid_write_part       pclid_info_write.part
+#define pclid_write_way                pclid_info_write.way
+#define pclid_write_level      pclid_info_write.level
+#define pclid_write_cache_type pclid_info_write.cache_type
+
+/* Processor cache line part encodings */
+#define PAL_CACHE_LINE_ID_PART_DATA            0       /* Data */
+#define PAL_CACHE_LINE_ID_PART_TAG             1       /* Tag */
+#define PAL_CACHE_LINE_ID_PART_DATA_PROT       2       /* Data protection */
+#define PAL_CACHE_LINE_ID_PART_TAG_PROT                3       /* Tag 
protection */
+#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT   4       /* Data+tag
+                                                        * protection
+                                                        */
+typedef struct pal_cache_line_info_s {
+       pal_status_t            pcli_status;            /* Return status of the 
read cache line
+                                                        * info call.
+                                                        */
+       u64                     pcli_data;              /* 64-bit data, tag, 
protection bits .. */
+       u64                     pcli_data_len;          /* data length in bits 
*/
+       pal_cache_line_state_t  pcli_cache_line_state;  /* mesi state */
+
+} pal_cache_line_info_t;
+
+
+/* Machine Check related crap */
+
+/* Pending event status bits  */
+typedef u64                                    pal_mc_pending_events_t;
+
+#define PAL_MC_PENDING_MCA                     (1 << 0)
+#define PAL_MC_PENDING_INIT                    (1 << 1)
+
+/* Error information type */
+typedef u64                                    pal_mc_info_index_t;
+
+#define PAL_MC_INFO_PROCESSOR                  0       /* Processor */
+#define PAL_MC_INFO_CACHE_CHECK                        1       /* Cache check 
*/
+#define PAL_MC_INFO_TLB_CHECK                  2       /* Tlb check */
+#define PAL_MC_INFO_BUS_CHECK                  3       /* Bus check */
+#define PAL_MC_INFO_REQ_ADDR                   4       /* Requestor address */
+#define PAL_MC_INFO_RESP_ADDR                  5       /* Responder address */
+#define PAL_MC_INFO_TARGET_ADDR                        6       /* Target 
address */
+#define PAL_MC_INFO_IMPL_DEP                   7       /* Implementation
+                                                        * dependent
+                                                        */
+
+
+typedef struct pal_process_state_info_s {
+       u64             reserved1       : 2,
+                       rz              : 1,    /* PAL_CHECK processor
+                                                * rendezvous
+                                                * successful.
+                                                */
+
+                       ra              : 1,    /* PAL_CHECK attempted
+                                                * a rendezvous.
+                                                */
+                       me              : 1,    /* Distinct multiple
+                                                * errors occurred
+                                                */
+
+                       mn              : 1,    /* Min. state save
+                                                * area has been
+                                                * registered with PAL
+                                                */
+
+                       sy              : 1,    /* Storage integrity
+                                                * synched
+                                                */
+
+
+                       co              : 1,    /* Continuable */
+                       ci              : 1,    /* MC isolated */
+                       us              : 1,    /* Uncontained storage
+                                                * damage.
+                                                */
+
+
+                       hd              : 1,    /* Non-essential hw
+                                                * lost (no loss of
+                                                * functionality)
+                                                * causing the
+                                                * processor to run in
+                                                * degraded mode.
+                                                */
+
+                       tl              : 1,    /* 1 => MC occurred
+                                                * after an instr was
+                                                * executed but before
+                                                * the trap that
+                                                * resulted from instr
+                                                * execution was
+                                                * generated.
+                                                * (Trap Lost )
+                                                */
+                       mi              : 1,    /* More information available
+                                                * call PAL_MC_ERROR_INFO
+                                                */
+                       pi              : 1,    /* Precise instruction pointer 
*/
+                       pm              : 1,    /* Precise min-state save area 
*/
+
+                       dy              : 1,    /* Processor dynamic
+                                                * state valid
+                                                */
+
+
+                       in              : 1,    /* 0 = MC, 1 = INIT */
+                       rs              : 1,    /* RSE valid */
+                       cm              : 1,    /* MC corrected */
+                       ex              : 1,    /* MC is expected */
+                       cr              : 1,    /* Control regs valid*/
+                       pc              : 1,    /* Perf cntrs valid */
+                       dr              : 1,    /* Debug regs valid */
+                       tr              : 1,    /* Translation regs
+                                                * valid
+                                                */
+                       rr              : 1,    /* Region regs valid */
+                       ar              : 1,    /* App regs valid */
+                       br              : 1,    /* Branch regs valid */
+                       pr              : 1,    /* Predicate registers
+                                                * valid
+                                                */
+
+                       fp              : 1,    /* fp registers valid*/
+                       b1              : 1,    /* Preserved bank one
+                                                * general registers
+                                                * are valid
+                                                */
+                       b0              : 1,    /* Preserved bank zero
+                                                * general registers
+                                                * are valid
+                                                */
+                       gr              : 1,    /* General registers
+                                                * are valid
+                                                * (excl. banked regs)
+                                                */
+                       dsize           : 16,   /* size of dynamic
+                                                * state returned
+                                                * by the processor
+                                                */
+
+                       reserved2       : 11,
+                       cc              : 1,    /* Cache check */
+                       tc              : 1,    /* TLB check */
+                       bc              : 1,    /* Bus check */
+                       rc              : 1,    /* Register file check */
+                       uc              : 1;    /* Uarch check */
+
+} pal_processor_state_info_t;
+
+typedef struct pal_cache_check_info_s {
+       u64             op              : 4,    /* Type of cache
+                                                * operation that
+                                                * caused the machine
+                                                * check.
+                                                */
+                       level           : 2,    /* Cache level */
+                       reserved1       : 2,
+                       dl              : 1,    /* Failure in data part
+                                                * of cache line
+                                                */
+                       tl              : 1,    /* Failure in tag part
+                                                * of cache line
+                                                */
+                       dc              : 1,    /* Failure in dcache */
+                       ic              : 1,    /* Failure in icache */
+                       mesi            : 3,    /* Cache line state */
+                       mv              : 1,    /* mesi valid */
+                       way             : 5,    /* Way in which the
+                                                * error occurred
+                                                */
+                       wiv             : 1,    /* Way field valid */
+                       reserved2       : 10,
+
+                       index           : 20,   /* Cache line index */
+                       reserved3       : 2,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_cache_check_info_t;
+
+typedef struct pal_tlb_check_info_s {
+
+       u64             tr_slot         : 8,    /* Slot# of TR where
+                                                * error occurred
+                                                */
+                       trv             : 1,    /* tr_slot field is valid */
+                       reserved1       : 1,
+                       level           : 2,    /* TLB level where failure 
occurred */
+                       reserved2       : 4,
+                       dtr             : 1,    /* Fail in data TR */
+                       itr             : 1,    /* Fail in inst TR */
+                       dtc             : 1,    /* Fail in data TC */
+                       itc             : 1,    /* Fail in inst. TC */
+                       op              : 4,    /* Cache operation */
+                       reserved3       : 30,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_tlb_check_info_t;
+
+typedef struct pal_bus_check_info_s {
+       u64             size            : 5,    /* Xaction size */
+                       ib              : 1,    /* Internal bus error */
+                       eb              : 1,    /* External bus error */
+                       cc              : 1,    /* Error occurred
+                                                * during cache-cache
+                                                * transfer.
+                                                */
+                       type            : 8,    /* Bus xaction type*/
+                       sev             : 5,    /* Bus error severity*/
+                       hier            : 2,    /* Bus hierarchy level */
+                       reserved1       : 1,
+                       bsi             : 8,    /* Bus error status
+                                                * info
+                                                */
+                       reserved2       : 22,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_bus_check_info_t;
+
+typedef struct pal_reg_file_check_info_s {
+       u64             id              : 4,    /* Register file identifier */
+                       op              : 4,    /* Type of register
+                                                * operation that
+                                                * caused the machine
+                                                * check.
+                                                */
+                       reg_num         : 7,    /* Register number */
+                       rnv             : 1,    /* reg_num valid */
+                       reserved2       : 38,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       reserved3       : 3,
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_reg_file_check_info_t;
+
+typedef struct pal_uarch_check_info_s {
+       u64             sid             : 5,    /* Structure identification */
+                       level           : 3,    /* Level of failure */
+                       array_id        : 4,    /* Array identification */
+                       op              : 4,    /* Type of
+                                                * operation that
+                                                * caused the machine
+                                                * check.
+                                                */
+                       way             : 6,    /* Way of structure */
+                       wv              : 1,    /* way valid */
+                       xv              : 1,    /* index valid */
+                       reserved1       : 8,
+                       index           : 8,    /* Index or set of the uarch
+                                                * structure that failed.
+                                                */
+                       reserved2       : 24,
+
+                       is              : 1,    /* instruction set (1 == ia32) 
*/
+                       iv              : 1,    /* instruction set field valid 
*/
+                       pl              : 2,    /* privilege level */
+                       pv              : 1,    /* privilege level field valid 
*/
+                       mcc             : 1,    /* Machine check corrected */
+                       tv              : 1,    /* Target address
+                                                * structure is valid
+                                                */
+                       rq              : 1,    /* Requester identifier
+                                                * structure is valid
+                                                */
+                       rp              : 1,    /* Responder identifier
+                                                * structure is valid
+                                                */
+                       pi              : 1;    /* Precise instruction pointer
+                                                * structure is valid
+                                                */
+} pal_uarch_check_info_t;
+
+typedef union pal_mc_error_info_u {
+       u64                             pmei_data;
+       pal_processor_state_info_t      pme_processor;
+       pal_cache_check_info_t          pme_cache;
+       pal_tlb_check_info_t            pme_tlb;
+       pal_bus_check_info_t            pme_bus;
+       pal_reg_file_check_info_t       pme_reg_file;
+       pal_uarch_check_info_t          pme_uarch;
+} pal_mc_error_info_t;
+
+#define pmci_proc_unknown_check                        pme_processor.uc
+#define pmci_proc_bus_check                    pme_processor.bc
+#define pmci_proc_tlb_check                    pme_processor.tc
+#define pmci_proc_cache_check                  pme_processor.cc
+#define pmci_proc_dynamic_state_size           pme_processor.dsize
+#define pmci_proc_gpr_valid                    pme_processor.gr
+#define pmci_proc_preserved_bank0_gpr_valid    pme_processor.b0
+#define pmci_proc_preserved_bank1_gpr_valid    pme_processor.b1
+#define pmci_proc_fp_valid                     pme_processor.fp
+#define pmci_proc_predicate_regs_valid         pme_processor.pr
+#define pmci_proc_branch_regs_valid            pme_processor.br
+#define pmci_proc_app_regs_valid               pme_processor.ar
+#define pmci_proc_region_regs_valid            pme_processor.rr
+#define pmci_proc_translation_regs_valid       pme_processor.tr
+#define pmci_proc_debug_regs_valid             pme_processor.dr
+#define pmci_proc_perf_counters_valid          pme_processor.pc
+#define pmci_proc_control_regs_valid           pme_processor.cr
+#define pmci_proc_machine_check_expected       pme_processor.ex
+#define pmci_proc_machine_check_corrected      pme_processor.cm
+#define pmci_proc_rse_valid                    pme_processor.rs
+#define pmci_proc_machine_check_or_init                pme_processor.in
+#define pmci_proc_dynamic_state_valid          pme_processor.dy
+#define pmci_proc_operation                    pme_processor.op
+#define pmci_proc_trap_lost                    pme_processor.tl
+#define pmci_proc_hardware_damage              pme_processor.hd
+#define pmci_proc_uncontained_storage_damage   pme_processor.us
+#define pmci_proc_machine_check_isolated       pme_processor.ci
+#define pmci_proc_continuable                  pme_processor.co
+#define pmci_proc_storage_intergrity_synced    pme_processor.sy
+#define pmci_proc_min_state_save_area_regd     pme_processor.mn
+#define        pmci_proc_distinct_multiple_errors      pme_processor.me
+#define pmci_proc_pal_attempted_rendezvous     pme_processor.ra
+#define pmci_proc_pal_rendezvous_complete      pme_processor.rz
+
+
+#define pmci_cache_level                       pme_cache.level
+#define pmci_cache_line_state                  pme_cache.mesi
+#define pmci_cache_line_state_valid            pme_cache.mv
+#define pmci_cache_line_index                  pme_cache.index
+#define pmci_cache_instr_cache_fail            pme_cache.ic
+#define pmci_cache_data_cache_fail             pme_cache.dc
+#define pmci_cache_line_tag_fail               pme_cache.tl
+#define pmci_cache_line_data_fail              pme_cache.dl
+#define pmci_cache_operation                   pme_cache.op
+#define pmci_cache_way_valid                   pme_cache.wv
+#define pmci_cache_target_address_valid                pme_cache.tv
+#define pmci_cache_way                         pme_cache.way
+#define pmci_cache_mc                          pme_cache.mc
+
+#define pmci_tlb_instr_translation_cache_fail  pme_tlb.itc
+#define pmci_tlb_data_translation_cache_fail   pme_tlb.dtc
+#define pmci_tlb_instr_translation_reg_fail    pme_tlb.itr
+#define pmci_tlb_data_translation_reg_fail     pme_tlb.dtr
+#define pmci_tlb_translation_reg_slot          pme_tlb.tr_slot
+#define pmci_tlb_mc                            pme_tlb.mc
+
+#define pmci_bus_status_info                   pme_bus.bsi
+#define pmci_bus_req_address_valid             pme_bus.rq
+#define pmci_bus_resp_address_valid            pme_bus.rp
+#define pmci_bus_target_address_valid          pme_bus.tv
+#define pmci_bus_error_severity                        pme_bus.sev
+#define pmci_bus_transaction_type              pme_bus.type
+#define pmci_bus_cache_cache_transfer          pme_bus.cc
+#define pmci_bus_transaction_size              pme_bus.size
+#define pmci_bus_internal_error                        pme_bus.ib
+#define pmci_bus_external_error                        pme_bus.eb
+#define pmci_bus_mc                            pme_bus.mc
+
+/*
+ * NOTE: this min_state_save area struct only includes the 1KB
+ * architectural state save area.  The other 3 KB is scratch space
+ * for PAL.
+ */
+
+typedef struct pal_min_state_area_s {
+       u64     pmsa_nat_bits;          /* nat bits for saved GRs  */
+       u64     pmsa_gr[15];            /* GR1  - GR15             */
+       u64     pmsa_bank0_gr[16];      /* GR16 - GR31             */
+       u64     pmsa_bank1_gr[16];      /* GR16 - GR31             */
+       u64     pmsa_pr;                /* predicate registers     */
+       u64     pmsa_br0;               /* branch register 0       */
+       u64     pmsa_rsc;               /* ar.rsc                  */
+       u64     pmsa_iip;               /* cr.iip                  */
+       u64     pmsa_ipsr;              /* cr.ipsr                 */
+       u64     pmsa_ifs;               /* cr.ifs                  */
+       u64     pmsa_xip;               /* previous iip            */
+       u64     pmsa_xpsr;              /* previous psr            */
+       u64     pmsa_xfs;               /* previous ifs            */
+       u64     pmsa_br1;               /* branch register 1       */
+       u64     pmsa_reserved[70];      /* pal_min_state_area should total to 
1KB */
+} pal_min_state_area_t;
+
+
+struct ia64_pal_retval {
+       /*
+        * A zero status value indicates call completed without error.
+        * A negative status value indicates reason of call failure.
+        * A positive status value indicates success but an
+        * informational value should be printed (e.g., "reboot for
+        * change to take effect").
+        */
+       s64 status;
+       u64 v0;
+       u64 v1;
+       u64 v2;
+};
+
+/*
+ * Note: Currently unused PAL arguments are generally labeled
+ * "reserved" so the value specified in the PAL documentation
+ * (generally 0) MUST be passed.  Reserved parameters are not optional
+ * parameters.
+ */
+extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
+extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
+extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
+
+#define PAL_CALL(iprv,a0,a1,a2,a3) do {                        \
+       struct ia64_fpreg fr[6];                        \
+       ia64_save_scratch_fpregs(fr);                   \
+       iprv = ia64_pal_call_static(a0, a1, a2, a3, 0); \
+       ia64_load_scratch_fpregs(fr);                   \
+} while (0)
+
+#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) do {         \
+       struct ia64_fpreg fr[6];                        \
+       ia64_save_scratch_fpregs(fr);                   \
+       iprv = ia64_pal_call_static(a0, a1, a2, a3, 1); \
+       ia64_load_scratch_fpregs(fr);                   \
+} while (0)
+
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do {            \
+       struct ia64_fpreg fr[6];                        \
+       ia64_save_scratch_fpregs(fr);                   \
+       iprv = ia64_pal_call_stacked(a0, a1, a2, a3);   \
+       ia64_load_scratch_fpregs(fr);                   \
+} while (0)
+
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do {                   \
+       struct ia64_fpreg fr[6];                                \
+       ia64_save_scratch_fpregs(fr);                           \
+       iprv = ia64_pal_call_phys_static(a0, a1, a2, a3);       \
+       ia64_load_scratch_fpregs(fr);                           \
+} while (0)
+
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do {               \
+       struct ia64_fpreg fr[6];                                \
+       ia64_save_scratch_fpregs(fr);                           \
+       iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3);      \
+       ia64_load_scratch_fpregs(fr);                           \
+} while (0)
+
+typedef int (*ia64_pal_handler) (u64, ...);
+extern ia64_pal_handler ia64_pal;
+extern void ia64_pal_handler_init (void *);
+
+extern ia64_pal_handler ia64_pal;
+
+extern pal_cache_config_info_t         l0d_cache_config_info;
+extern pal_cache_config_info_t         l0i_cache_config_info;
+extern pal_cache_config_info_t         l1_cache_config_info;
+extern pal_cache_config_info_t         l2_cache_config_info;
+
+extern pal_cache_protection_info_t     l0d_cache_protection_info;
+extern pal_cache_protection_info_t     l0i_cache_protection_info;
+extern pal_cache_protection_info_t     l1_cache_protection_info;
+extern pal_cache_protection_info_t     l2_cache_protection_info;
+
+extern pal_cache_config_info_t         
pal_cache_config_info_get(pal_cache_level_t,
+                                                                 
pal_cache_type_t);
+
+extern pal_cache_protection_info_t     
pal_cache_protection_info_get(pal_cache_level_t,
+                                                                     
pal_cache_type_t);
+
+
+extern void                            pal_error(int);
+
+
+/* Useful wrappers for the current list of pal procedures */
+
+typedef union pal_bus_features_u {
+       u64     pal_bus_features_val;
+       struct {
+               u64     pbf_reserved1                           :       29;
+               u64     pbf_req_bus_parking                     :       1;
+               u64     pbf_bus_lock_mask                       :       1;
+               u64     pbf_enable_half_xfer_rate               :       1;
+               u64     pbf_reserved2                           :       22;
+               u64     pbf_disable_xaction_queueing            :       1;
+               u64     pbf_disable_resp_err_check              :       1;
+               u64     pbf_disable_berr_check                  :       1;
+               u64     pbf_disable_bus_req_internal_err_signal :       1;
+               u64     pbf_disable_bus_req_berr_signal         :       1;
+               u64     pbf_disable_bus_init_event_check        :       1;
+               u64     pbf_disable_bus_init_event_signal       :       1;
+               u64     pbf_disable_bus_addr_err_check          :       1;
+               u64     pbf_disable_bus_addr_err_signal         :       1;
+               u64     pbf_disable_bus_data_err_check          :       1;
+       } pal_bus_features_s;
+} pal_bus_features_u_t;
+
+extern void pal_bus_features_print (u64);
+
+/* Provide information about configurable processor bus features */
+static inline s64
+ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
+                          pal_bus_features_u_t *features_status,
+                          pal_bus_features_u_t *features_control)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+       if (features_avail)
+               features_avail->pal_bus_features_val = iprv.v0;
+       if (features_status)
+               features_status->pal_bus_features_val = iprv.v1;
+       if (features_control)
+               features_control->pal_bus_features_val = iprv.v2;
+       return iprv.status;
+}
+
+/* Enables/disables specific processor bus features */
+static inline s64
+ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, 
feature_select.pal_bus_features_val, 0, 0);
+       return iprv.status;
+}
+
+/* Get detailed cache information */
+static inline s64
+ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, 
pal_cache_config_info_t *conf)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
+
+       if (iprv.status == 0) {
+               conf->pcci_status                 = iprv.status;
+               conf->pcci_info_1.pcci1_data      = iprv.v0;
+               conf->pcci_info_2.pcci2_data      = iprv.v1;
+               conf->pcci_reserved               = iprv.v2;
+       }
+       return iprv.status;
+
+}
+
+/* Get detailed cche protection information */
+static inline s64
+ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, 
pal_cache_protection_info_t *prot)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
+
+       if (iprv.status == 0) {
+               prot->pcpi_status           = iprv.status;
+               prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
+               prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
+               prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
+               prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
+               prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
+               prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
+       }
+       return iprv.status;
+}
+
+/*
+ * Flush the processor instruction or data caches.  *PROGRESS must be
+ * initialized to zero before calling this for the first time..
+ */
+static inline s64
+ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 
*vector)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, 
*progress);
+       if (vector)
+               *vector = iprv.v0;
+       *progress = iprv.v1;
+       return iprv.status;
+}
+
+
+/* Initialize the processor controlled caches */
+static inline s64
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
+       return iprv.status;
+}
+
+/* Initialize the tags and data of a data or unified cache line of
+ * processor controlled cache to known values without the availability
+ * of backing memory.
+ */
+static inline s64
+ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
+       return iprv.status;
+}
+
+
+/* Read the data and tag of a processor controlled cache line for diags */
+static inline s64
+ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0);
+       return iprv.status;
+}
+
+/* Return summary information about the heirarchy of caches controlled by the 
processor */
+static inline s64
+ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
+       if (cache_levels)
+               *cache_levels = iprv.v0;
+       if (unique_caches)
+               *unique_caches = iprv.v1;
+       return iprv.status;
+}
+
+/* Write the data and tag of a processor-controlled cache line for diags */
+static inline s64
+ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 
data)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, 
data);
+       return iprv.status;
+}
+
+
+/* Return the parameters needed to copy relocatable PAL procedures from ROM to 
memory */
+static inline s64
+ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
+                   u64 *buffer_size, u64 *buffer_align)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
+       if (buffer_size)
+               *buffer_size = iprv.v0;
+       if (buffer_align)
+               *buffer_align = iprv.v1;
+       return iprv.status;
+}
+
+/* Copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 
*pal_proc_offset)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
+       if (pal_proc_offset)
+               *pal_proc_offset = iprv.v0;
+       return iprv.status;
+}
+
+/* Return the number of instruction and data debug register pairs */
+static inline s64
+ia64_pal_debug_info (u64 *inst_regs,  u64 *data_regs)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
+       if (inst_regs)
+               *inst_regs = iprv.v0;
+       if (data_regs)
+               *data_regs = iprv.v1;
+
+       return iprv.status;
+}
+
+#ifdef TBD
+/* Switch from IA64-system environment to IA-32 system environment */
+static inline s64
+ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
+       return iprv.status;
+}
+#endif
+
+/* Get unique geographical address of this processor on its bus */
+static inline s64
+ia64_pal_fixed_addr (u64 *global_unique_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
+       if (global_unique_addr)
+               *global_unique_addr = iprv.v0;
+       return iprv.status;
+}
+
+/* Get base frequency of the platform if generated by the processor */
+static inline s64
+ia64_pal_freq_base (u64 *platform_base_freq)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
+       if (platform_base_freq)
+               *platform_base_freq = iprv.v0;
+       return iprv.status;
+}
+
+/*
+ * Get the ratios for processor frequency, bus frequency and interval timer to
+ * to base frequency of the platform
+ */
+static inline s64
+ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio 
*bus_ratio,
+                     struct pal_freq_ratio *itc_ratio)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
+       if (proc_ratio)
+               *(u64 *)proc_ratio = iprv.v0;
+       if (bus_ratio)
+               *(u64 *)bus_ratio = iprv.v1;
+       if (itc_ratio)
+               *(u64 *)itc_ratio = iprv.v2;
+       return iprv.status;
+}
+
+/* Make the processor enter HALT or one of the implementation dependent low
+ * power states where prefetching and execution are suspended and cache and
+ * TLB coherency is not maintained.
+ */
+static inline s64
+ia64_pal_halt (u64 halt_state)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
+       return iprv.status;
+}
+
+typedef union pal_power_mgmt_info_u {
+       u64                     ppmi_data;
+       struct {
+              u64              exit_latency            : 16,
+                               entry_latency           : 16,
+                               power_consumption       : 28,
+                               im                      : 1,
+                               co                      : 1,
+                               reserved                : 2;
+       } pal_power_mgmt_info_s;
+} pal_power_mgmt_info_u_t;
+
+/* Return information about processor's optional power management 
capabilities. */
+static inline s64
+ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+       return iprv.status;
+}
+
+/* Cause the processor to enter LIGHT HALT state, where prefetching and 
execution are
+ * suspended, but cache and TLB coherency is maintained.
+ */
+static inline s64
+ia64_pal_halt_light (void)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
+       return iprv.status;
+}
+
+/* Clear all the processor error logging   registers and reset the indicator 
that allows
+ * the error logging registers to be written. This procedure also checks the 
pending
+ * machine check bit and pending INIT bit and reports their states.
+ */
+static inline s64
+ia64_pal_mc_clear_log (u64 *pending_vector)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
+       if (pending_vector)
+               *pending_vector = iprv.v0;
+       return iprv.status;
+}
+
+/* Ensure that all outstanding transactions in a processor are completed or 
that any
+ * MCA due to thes outstanding transaction is taken.
+ */
+static inline s64
+ia64_pal_mc_drain (void)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
+       return iprv.status;
+}
+
+/* Return the machine check dynamic processor state */
+static inline s64
+ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
+       if (size)
+               *size = iprv.v0;
+       if (pds)
+               *pds = iprv.v1;
+       return iprv.status;
+}
+
+/* Return processor machine check information */
+static inline s64
+ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 
*error_info)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
+       if (size)
+               *size = iprv.v0;
+       if (error_info)
+               *error_info = iprv.v1;
+       return iprv.status;
+}
+
+/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK 
willnot
+ * attempt to correct any expected machine checks.
+ */
+static inline s64
+ia64_pal_mc_expected (u64 expected, u64 *previous)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
+       if (previous)
+               *previous = iprv.v0;
+       return iprv.status;
+}
+
+/* Register a platform dependent location with PAL to which it can save
+ * minimal processor state in the event of a machine check or initialization
+ * event.
+ */
+static inline s64
+ia64_pal_mc_register_mem (u64 physical_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
+       return iprv.status;
+}
+
+/* Restore minimal architectural processor state, set CMC interrupt if 
necessary
+ * and resume execution
+ */
+static inline s64
+ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
+       return iprv.status;
+}
+
+/* Return the memory attributes implemented by the processor */
+static inline s64
+ia64_pal_mem_attrib (u64 *mem_attrib)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
+       if (mem_attrib)
+               *mem_attrib = iprv.v0 & 0xff;
+       return iprv.status;
+}
+
+/* Return the amount of memory needed for second phase of processor
+ * self-test and the required alignment of memory.
+ */
+static inline s64
+ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
+       if (bytes_needed)
+               *bytes_needed = iprv.v0;
+       if (alignment)
+               *alignment = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_perf_mon_info_u {
+       u64                       ppmi_data;
+       struct {
+              u64              generic         : 8,
+                               width           : 8,
+                               cycles          : 8,
+                               retired         : 8,
+                               reserved        : 32;
+       } pal_perf_mon_info_s;
+} pal_perf_mon_info_u_t;
+
+/* Return the performance monitor information about what can be counted
+ * and how to configure the monitors to count the desired events.
+ */
+static inline s64
+ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
+       if (pm_info)
+               pm_info->ppmi_data = iprv.v0;
+       return iprv.status;
+}
+
+/* Specifies the physical address of the processor interrupt block
+ * and I/O port space.
+ */
+static inline s64
+ia64_pal_platform_addr (u64 type, u64 physical_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
+       return iprv.status;
+}
+
+/* Set the SAL PMI entrypoint in memory */
+static inline s64
+ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
+       return iprv.status;
+}
+
+struct pal_features_s;
+/* Provide information about configurable processor features */
+static inline s64
+ia64_pal_proc_get_features (u64 *features_avail,
+                           u64 *features_status,
+                           u64 *features_control)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
+       if (iprv.status == 0) {
+               *features_avail   = iprv.v0;
+               *features_status  = iprv.v1;
+               *features_control = iprv.v2;
+       }
+       return iprv.status;
+}
+
+/* Enable/disable processor dependent features */
+static inline s64
+ia64_pal_proc_set_features (u64 feature_select)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+       return iprv.status;
+}
+
+/*
+ * Put everything in a struct so we avoid the global offset table whenever
+ * possible.
+ */
+typedef struct ia64_ptce_info_s {
+       u64             base;
+       u32             count[2];
+       u32             stride[2];
+} ia64_ptce_info_t;
+
+/* Return the information required for the architected loop used to purge
+ * (initialize) the entire TC
+ */
+static inline s64
+ia64_get_ptce (ia64_ptce_info_t *ptce)
+{
+       struct ia64_pal_retval iprv;
+
+       if (!ptce)
+               return -1;
+
+       PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
+       if (iprv.status == 0) {
+               ptce->base = iprv.v0;
+               ptce->count[0] = iprv.v1 >> 32;
+               ptce->count[1] = iprv.v1 & 0xffffffff;
+               ptce->stride[0] = iprv.v2 >> 32;
+               ptce->stride[1] = iprv.v2 & 0xffffffff;
+       }
+       return iprv.status;
+}
+
+/* Return info about implemented application and control registers. */
+static inline s64
+ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
+       if (reg_info_1)
+               *reg_info_1 = iprv.v0;
+       if (reg_info_2)
+               *reg_info_2 = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_hints_u {
+       u64                     ph_data;
+       struct {
+              u64              si              : 1,
+                               li              : 1,
+                               reserved        : 62;
+       } pal_hints_s;
+} pal_hints_u_t;
+
+/* Return information about the register stack and RSE for this processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
+       if (num_phys_stacked)
+               *num_phys_stacked = iprv.v0;
+       if (hints)
+               hints->ph_data = iprv.v1;
+       return iprv.status;
+}
+
+/* Cause the processor to enter        SHUTDOWN state, where prefetching and 
execution are
+ * suspended, but cause cache and TLB coherency to be maintained.
+ * This is usually called in IA-32 mode.
+ */
+static inline s64
+ia64_pal_shutdown (void)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
+       return iprv.status;
+}
+
+/* Perform the second phase of processor self-test. */
+static inline s64
+ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 
*self_test_state)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
+       if (self_test_state)
+               *self_test_state = iprv.v0;
+       return iprv.status;
+}
+
+typedef union  pal_version_u {
+       u64     pal_version_val;
+       struct {
+               u64     pv_pal_b_rev            :       8;
+               u64     pv_pal_b_model          :       8;
+               u64     pv_reserved1            :       8;
+               u64     pv_pal_vendor           :       8;
+               u64     pv_pal_a_rev            :       8;
+               u64     pv_pal_a_model          :       8;
+               u64     pv_reserved2            :       16;
+       } pal_version_s;
+} pal_version_u_t;
+
+
+/* Return PAL version information */
+static inline s64
+ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t 
*pal_cur_version)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+       if (pal_min_version)
+               pal_min_version->pal_version_val = iprv.v0;
+
+       if (pal_cur_version)
+               pal_cur_version->pal_version_val = iprv.v1;
+
+       return iprv.status;
+}
+
+typedef union pal_tc_info_u {
+       u64                     pti_val;
+       struct {
+              u64              num_sets        :       8,
+                               associativity   :       8,
+                               num_entries     :       16,
+                               pf              :       1,
+                               unified         :       1,
+                               reduce_tr       :       1,
+                               reserved        :       29;
+       } pal_tc_info_s;
+} pal_tc_info_u_t;
+
+#define tc_reduce_tr           pal_tc_info_s.reduce_tr
+#define tc_unified             pal_tc_info_s.unified
+#define tc_pf                  pal_tc_info_s.pf
+#define tc_num_entries         pal_tc_info_s.num_entries
+#define tc_associativity       pal_tc_info_s.associativity
+#define tc_num_sets            pal_tc_info_s.num_sets
+
+
+/* Return information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_info (u64 tc_level, u64 tc_type,  pal_tc_info_u_t *tc_info, u64 
*tc_pages)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
+       if (tc_info)
+               tc_info->pti_val = iprv.v0;
+       if (tc_pages)
+               *tc_pages = iprv.v1;
+       return iprv.status;
+}
+
+/* Get page size information about the virtual memory characteristics of the 
processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
+       if (tr_pages)
+               *tr_pages = iprv.v0;
+       if (vw_pages)
+               *vw_pages = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_vm_info_1_u {
+       u64                     pvi1_val;
+       struct {
+               u64             vw              : 1,
+                               phys_add_size   : 7,
+                               key_size        : 8,
+                               max_pkr         : 8,
+                               hash_tag_id     : 8,
+                               max_dtr_entry   : 8,
+                               max_itr_entry   : 8,
+                               max_unique_tcs  : 8,
+                               num_tc_levels   : 8;
+       } pal_vm_info_1_s;
+} pal_vm_info_1_u_t;
+
+typedef union pal_vm_info_2_u {
+       u64                     pvi2_val;
+       struct {
+               u64             impl_va_msb     : 8,
+                               rid_size        : 8,
+                               reserved        : 48;
+       } pal_vm_info_2_s;
+} pal_vm_info_2_u_t;
+
+/* Get summary information about the virtual memory characteristics of the 
processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t 
*vm_info_2)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
+       if (vm_info_1)
+               vm_info_1->pvi1_val = iprv.v0;
+       if (vm_info_2)
+               vm_info_2->pvi2_val = iprv.v1;
+       return iprv.status;
+}
+
+typedef union pal_itr_valid_u {
+       u64                     piv_val;
+       struct {
+              u64              access_rights_valid     : 1,
+                               priv_level_valid        : 1,
+                               dirty_bit_valid         : 1,
+                               mem_attr_valid          : 1,
+                               reserved                : 60;
+       } pal_tr_valid_s;
+} pal_tr_valid_u_t;
+
+/* Read a translation register */
+static inline s64
+ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t 
*tr_valid)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, 
tr_type,(u64)ia64_tpa(tr_buffer));
+       if (tr_valid)
+               tr_valid->piv_val = iprv.v0;
+       return iprv.status;
+}
+
+/*
+ * PAL_PREFETCH_VISIBILITY transaction types
+ */
+#define PAL_VISIBILITY_VIRTUAL         0
+#define PAL_VISIBILITY_PHYSICAL                1
+
+/*
+ * PAL_PREFETCH_VISIBILITY return codes
+ */
+#define PAL_VISIBILITY_OK              1
+#define PAL_VISIBILITY_OK_REMOTE_NEEDED        0
+#define PAL_VISIBILITY_INVAL_ARG       -2
+#define PAL_VISIBILITY_ERROR           -3
+
+static inline s64
+ia64_pal_prefetch_visibility (s64 trans_type)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
+       return iprv.status;
+}
+
+#ifdef CONFIG_VTI
+#include <asm/vmx_pal.h>
+#endif // CONFIG_VTI
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PAL_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/pgalloc.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h      Wed Aug  3 00:25:11 2005
@@ -0,0 +1,196 @@
+#ifndef _ASM_IA64_PGALLOC_H
+#define _ASM_IA64_PGALLOC_H
+
+/*
+ * This file contains the functions and defines necessary to allocate
+ * page tables.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 2000, Goutham Rao <goutham.rao@xxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/threads.h>
+
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+
+/*
+ * Very stupidly, we used to get new pgd's and pmd's, init their contents
+ * to point to the NULL versions of the next level page table, later on
+ * completely re-init them the same way, then free them up.  This wasted
+ * a lot of work and caused unnecessary memory traffic.  How broken...
+ * We fix this by caching them.
+ */
+#define pgd_quicklist          (local_cpu_data->pgd_quick)
+#define pmd_quicklist          (local_cpu_data->pmd_quick)
+#define pgtable_cache_size     (local_cpu_data->pgtable_cache_sz)
+
+static inline pgd_t*
+pgd_alloc_one_fast (struct mm_struct *mm)
+{
+       unsigned long *ret = NULL;
+
+       preempt_disable();
+
+       ret = pgd_quicklist;
+       if (likely(ret != NULL)) {
+               pgd_quicklist = (unsigned long *)(*ret);
+               ret[0] = 0;
+               --pgtable_cache_size;
+       } else
+               ret = NULL;
+
+       preempt_enable();
+
+       return (pgd_t *) ret;
+}
+
+static inline pgd_t*
+pgd_alloc (struct mm_struct *mm)
+{
+       /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
+       pgd_t *pgd = pgd_alloc_one_fast(mm);
+
+       if (unlikely(pgd == NULL)) {
+#ifdef XEN
+               pgd = (pgd_t *)alloc_xenheap_page();
+               memset(pgd,0,PAGE_SIZE);
+#else
+               pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+#endif
+       }
+       return pgd;
+}
+
+static inline void
+pgd_free (pgd_t *pgd)
+{
+       preempt_disable();
+       *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+       pgd_quicklist = (unsigned long *) pgd;
+       ++pgtable_cache_size;
+       preempt_enable();
+}
+
+static inline void
+pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
+{
+       pud_val(*pud_entry) = __pa(pmd);
+}
+
+static inline pmd_t*
+pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
+{
+       unsigned long *ret = NULL;
+
+       preempt_disable();
+
+       ret = (unsigned long *)pmd_quicklist;
+       if (likely(ret != NULL)) {
+               pmd_quicklist = (unsigned long *)(*ret);
+               ret[0] = 0;
+               --pgtable_cache_size;
+       }
+
+       preempt_enable();
+
+       return (pmd_t *)ret;
+}
+
+static inline pmd_t*
+pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+{
+#ifdef XEN
+       pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
+       memset(pmd,0,PAGE_SIZE);
+#else
+       pmd_t *pmd = (pmd_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+#endif
+
+       return pmd;
+}
+
+static inline void
+pmd_free (pmd_t *pmd)
+{
+       preempt_disable();
+       *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
+       pmd_quicklist = (unsigned long *) pmd;
+       ++pgtable_cache_size;
+       preempt_enable();
+}
+
+#define __pmd_free_tlb(tlb, pmd)       pmd_free(pmd)
+
+static inline void
+pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
+{
+       pmd_val(*pmd_entry) = page_to_phys(pte);
+}
+
+static inline void
+pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
+{
+       pmd_val(*pmd_entry) = __pa(pte);
+}
+
+static inline struct page *
+pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+{
+#ifdef XEN
+       struct page *pte = alloc_xenheap_page();
+       memset(pte,0,PAGE_SIZE);
+#else
+       struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+#endif
+
+       return pte;
+}
+
+static inline pte_t *
+pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
+{
+#ifdef XEN
+       pte_t *pte = (pte_t *)alloc_xenheap_page();
+       memset(pte,0,PAGE_SIZE);
+#else
+       pte_t *pte = (pte_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+#endif
+
+       return pte;
+}
+
+static inline void
+pte_free (struct page *pte)
+{
+#ifdef XEN
+       free_xenheap_page(pte);
+#else
+       __free_page(pte);
+#endif
+}
+
+static inline void
+pte_free_kernel (pte_t *pte)
+{
+#ifdef XEN
+       free_xenheap_page((unsigned long) pte);
+#else
+       free_page((unsigned long) pte);
+#endif
+}
+
+#define __pte_free_tlb(tlb, pte)       tlb_remove_page((tlb), (pte))
+
+extern void check_pgt_cache (void);
+
+#endif /* _ASM_IA64_PGALLOC_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/processor.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/processor.h    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,705 @@
+#ifndef _ASM_IA64_PROCESSOR_H
+#define _ASM_IA64_PROCESSOR_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ *
+ * 11/24/98    S.Eranian       added ia64_set_iva()
+ * 12/03/99    D. Mosberger    implement thread_saved_pc() via kernel unwind 
API
+ * 06/16/00    A. Mallick      added csd/ssd/tssd for ia32 support
+ */
+
+#include <linux/config.h>
+
+#include <asm/intrinsics.h>
+#include <asm/kregs.h>
+#include <asm/ptrace.h>
+#include <asm/ustack.h>
+
+/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
+#define ARCH_HAS_SCHED_DOMAIN
+
+#define IA64_NUM_DBG_REGS      8
+/*
+ * Limits for PMC and PMD are set to less than maximum architected values
+ * but should be sufficient for a while
+ */
+#define IA64_NUM_PMC_REGS      32
+#define IA64_NUM_PMD_REGS      32
+
+#define DEFAULT_MAP_BASE       __IA64_UL_CONST(0x2000000000000000)
+#define DEFAULT_TASK_SIZE      __IA64_UL_CONST(0xa000000000000000)
+
+/*
+ * TASK_SIZE really is a mis-named.  It really is the maximum user
+ * space address (plus one).  On IA-64, there are five regions of 2TB
+ * each (assuming 8KB page size), for a total of 8TB of user virtual
+ * address space.
+ */
+#define TASK_SIZE              (current->thread.task_size)
+
+/*
+ * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a 
mapping for
+ * address-space MM.  Note that with 32-bit tasks, this is still 
DEFAULT_TASK_SIZE,
+ * because the kernel may have installed helper-mappings above TASK_SIZE.  For 
example,
+ * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
+ */
+#define MM_VM_SIZE(mm)         DEFAULT_TASK_SIZE
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE     (current->thread.map_base)
+
+#define IA64_THREAD_FPH_VALID  (__IA64_UL(1) << 0)     /* floating-point high 
state valid? */
+#define IA64_THREAD_DBG_VALID  (__IA64_UL(1) << 1)     /* debug registers 
valid? */
+#define IA64_THREAD_PM_VALID   (__IA64_UL(1) << 2)     /* performance 
registers valid? */
+#define IA64_THREAD_UAC_NOPRINT        (__IA64_UL(1) << 3)     /* don't log 
unaligned accesses */
+#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4)     /* generate SIGBUS on 
unaligned acc. */
+                                                       /* bit 5 is currently 
unused */
+#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)  /* don't log any fpswa 
faults */
+#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)  /* send a SIGFPE for 
fpswa faults */
+
+#define IA64_THREAD_UAC_SHIFT  3
+#define IA64_THREAD_UAC_MASK   (IA64_THREAD_UAC_NOPRINT | 
IA64_THREAD_UAC_SIGBUS)
+#define IA64_THREAD_FPEMU_SHIFT        6
+#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | 
IA64_THREAD_FPEMU_SIGFPE)
+
+
+/*
+ * This shift should be large enough to be able to represent 
1000000000/itc_freq with good
+ * accuracy while being small enough to fit 
10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
+ * (this will give enough slack to represent 10 seconds worth of time as a 
scaled number).
+ */
+#define IA64_NSEC_PER_CYC_SHIFT        30
+
+#ifndef __ASSEMBLY__
+
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/percpu.h>
+#include <asm/rse.h>
+#include <asm/unwind.h>
+#include <asm/atomic.h>
+#ifdef CONFIG_NUMA
+#include <asm/nodedata.h>
+#endif
+#ifdef XEN
+#include <asm/xenprocessor.h>
+#endif
+
+#ifndef XEN
+/* like above but expressed as bitfields for more efficient access: */
+struct ia64_psr {
+       __u64 reserved0 : 1;
+       __u64 be : 1;
+       __u64 up : 1;
+       __u64 ac : 1;
+       __u64 mfl : 1;
+       __u64 mfh : 1;
+       __u64 reserved1 : 7;
+       __u64 ic : 1;
+       __u64 i : 1;
+       __u64 pk : 1;
+       __u64 reserved2 : 1;
+       __u64 dt : 1;
+       __u64 dfl : 1;
+       __u64 dfh : 1;
+       __u64 sp : 1;
+       __u64 pp : 1;
+       __u64 di : 1;
+       __u64 si : 1;
+       __u64 db : 1;
+       __u64 lp : 1;
+       __u64 tb : 1;
+       __u64 rt : 1;
+       __u64 reserved3 : 4;
+       __u64 cpl : 2;
+       __u64 is : 1;
+       __u64 mc : 1;
+       __u64 it : 1;
+       __u64 id : 1;
+       __u64 da : 1;
+       __u64 dd : 1;
+       __u64 ss : 1;
+       __u64 ri : 2;
+       __u64 ed : 1;
+       __u64 bn : 1;
+       __u64 reserved4 : 19;
+};
+#endif
+
+/*
+ * CPU type, hardware bug flags, and per-CPU state.  Frequently used
+ * state comes earlier:
+ */
+struct cpuinfo_ia64 {
+       __u32 softirq_pending;
+       __u64 itm_delta;        /* # of clock cycles between clock ticks */
+       __u64 itm_next;         /* interval timer mask value to use for next 
clock tick */
+       __u64 nsec_per_cyc;     /* 
(1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
+       __u64 unimpl_va_mask;   /* mask of unimplemented virtual address bits 
(from PAL) */
+       __u64 unimpl_pa_mask;   /* mask of unimplemented physical address bits 
(from PAL) */
+       __u64 *pgd_quick;
+       __u64 *pmd_quick;
+       __u64 pgtable_cache_sz;
+       __u64 itc_freq;         /* frequency of ITC counter */
+       __u64 proc_freq;        /* frequency of processor */
+       __u64 cyc_per_usec;     /* itc_freq/1000000 */
+       __u64 ptce_base;
+       __u32 ptce_count[2];
+       __u32 ptce_stride[2];
+       struct task_struct *ksoftirqd;  /* kernel softirq daemon for this CPU */
+
+#ifdef CONFIG_SMP
+       __u64 loops_per_jiffy;
+       int cpu;
+#endif
+
+       /* CPUID-derived information: */
+       __u64 ppn;
+       __u64 features;
+       __u8 number;
+       __u8 revision;
+       __u8 model;
+       __u8 family;
+       __u8 archrev;
+       char vendor[16];
+
+#ifdef CONFIG_NUMA
+       struct ia64_node_data *node_data;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+
+/*
+ * The "local" data variable.  It refers to the per-CPU data of the currently 
executing
+ * CPU, much like "current" points to the per-task data of the currently 
executing task.
+ * Do not use the address of local_cpu_data, since it will be different from
+ * cpu_data(smp_processor_id())!
+ */
+#define local_cpu_data         (&__ia64_per_cpu_var(cpu_info))
+#define cpu_data(cpu)          (&per_cpu(cpu_info, cpu))
+
+extern void identify_cpu (struct cpuinfo_ia64 *);
+extern void print_cpu_info (struct cpuinfo_ia64 *);
+
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
+
+#define SET_UNALIGN_CTL(task,value)                                            
                \
+({                                                                             
                \
+       (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)  
                \
+                               | (((value) << IA64_THREAD_UAC_SHIFT) & 
IA64_THREAD_UAC_MASK)); \
+       0;                                                                      
                \
+})
+#define GET_UNALIGN_CTL(task,addr)                                             
                \
+({                                                                             
                \
+       put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> 
IA64_THREAD_UAC_SHIFT,        \
+                (int __user *) (addr));                                        
                \
+})
+
+#define SET_FPEMU_CTL(task,value)                                              
                \
+({                                                                             
                \
+       (task)->thread.flags = (((task)->thread.flags & 
~IA64_THREAD_FPEMU_MASK)                \
+                         | (((value) << IA64_THREAD_FPEMU_SHIFT) & 
IA64_THREAD_FPEMU_MASK));   \
+       0;                                                                      
                \
+})
+#define GET_FPEMU_CTL(task,addr)                                               
                \
+({                                                                             
                \
+       put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> 
IA64_THREAD_FPEMU_SHIFT,    \
+                (int __user *) (addr));                                        
                \
+})
+
+#ifdef CONFIG_IA32_SUPPORT
+struct desc_struct {
+       unsigned int a, b;
+};
+
+#define desc_empty(desc)               (!((desc)->a + (desc)->b))
+#define desc_equal(desc1, desc2)       (((desc1)->a == (desc2)->a) && 
((desc1)->b == (desc2)->b))
+
+#define GDT_ENTRY_TLS_ENTRIES  3
+#define GDT_ENTRY_TLS_MIN      6
+#define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+struct partial_page_list;
+#endif
+
+struct thread_struct {
+       __u32 flags;                    /* various thread flags (see 
IA64_THREAD_*) */
+       /* writing on_ustack is performance-critical, so it's worth spending 8 
bits on it... */
+       __u8 on_ustack;                 /* executing on user-stacks? */
+       __u8 pad[3];
+       __u64 ksp;                      /* kernel stack pointer */
+       __u64 map_base;                 /* base address for get_unmapped_area() 
*/
+       __u64 task_size;                /* limit for task size */
+       __u64 rbs_bot;                  /* the base address for the RBS */
+       int last_fph_cpu;               /* CPU that may hold the contents of 
f32-f127 */
+
+#ifdef CONFIG_IA32_SUPPORT
+       __u64 eflag;                    /* IA32 EFLAGS reg */
+       __u64 fsr;                      /* IA32 floating pt status reg */
+       __u64 fcr;                      /* IA32 floating pt control reg */
+       __u64 fir;                      /* IA32 fp except. instr. reg */
+       __u64 fdr;                      /* IA32 fp except. data reg */
+       __u64 old_k1;                   /* old value of ar.k1 */
+       __u64 old_iob;                  /* old IOBase value */
+       struct partial_page_list *ppl;  /* partial page list for 4K page size 
issue */
+        /* cached TLS descriptors. */
+       struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+
+# define INIT_THREAD_IA32      .eflag =        0,                      \
+                               .fsr =          0,                      \
+                               .fcr =          0x17800000037fULL,      \
+                               .fir =          0,                      \
+                               .fdr =          0,                      \
+                               .old_k1 =       0,                      \
+                               .old_iob =      0,                      \
+                               .ppl =          NULL,
+#else
+# define INIT_THREAD_IA32
+#endif /* CONFIG_IA32_SUPPORT */
+#ifdef CONFIG_PERFMON
+       __u64 pmcs[IA64_NUM_PMC_REGS];
+       __u64 pmds[IA64_NUM_PMD_REGS];
+       void *pfm_context;                   /* pointer to detailed PMU context 
*/
+       unsigned long pfm_needs_checking;    /* when >0, pending perfmon work 
on kernel exit */
+# define INIT_THREAD_PM                .pmcs =                 {0UL, },  \
+                               .pmds =                 {0UL, },  \
+                               .pfm_context =          NULL,     \
+                               .pfm_needs_checking =   0UL,
+#else
+# define INIT_THREAD_PM
+#endif
+       __u64 dbr[IA64_NUM_DBG_REGS];
+       __u64 ibr[IA64_NUM_DBG_REGS];
+       struct ia64_fpreg fph[96];      /* saved/loaded on demand */
+};
+
+#define INIT_THREAD {                                          \
+       .flags =        0,                                      \
+       .on_ustack =    0,                                      \
+       .ksp =          0,                                      \
+       .map_base =     DEFAULT_MAP_BASE,                       \
+       .rbs_bot =      STACK_TOP - DEFAULT_USER_STACK_SIZE,    \
+       .task_size =    DEFAULT_TASK_SIZE,                      \
+       .last_fph_cpu =  -1,                                    \
+       INIT_THREAD_IA32                                        \
+       INIT_THREAD_PM                                          \
+       .dbr =          {0, },                                  \
+       .ibr =          {0, },                                  \
+       .fph =          {{{{0}}}, }                             \
+}
+
+#define start_thread(regs,new_ip,new_sp) do {                                  
                \
+       set_fs(USER_DS);                                                        
                \
+       regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | 
IA64_PSR_CPL))                \
+                        & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | 
IA64_PSR_IS));              \
+       regs->cr_iip = new_ip;                                                  
                \
+       regs->ar_rsc = 0xf;             /* eager mode, privilege level 3 */     
                \
+       regs->ar_rnat = 0;                                                      
                \
+       regs->ar_bspstore = current->thread.rbs_bot;                            
                \
+       regs->ar_fpsr = FPSR_DEFAULT;                                           
                \
+       regs->loadrs = 0;                                                       
                \
+       regs->r8 = current->mm->dumpable;       /* set "don't zap registers" 
flag */            \
+       regs->r12 = new_sp - 16;        /* allocate 16 byte scratch area */     
                \
+       if (unlikely(!current->mm->dumpable)) {                                 
                \
+               /*                                                              
                \
+                * Zap scratch regs to avoid leaking bits between processes 
with different      \
+                * uid/privileges.                                              
                \
+                */                                                             
                \
+               regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;                   
                \
+               regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; 
regs->r15 = 0;       \
+       }                                                                       
                \
+} while (0)
+
+/* Forward declarations, a strange C thing... */
+struct mm_struct;
+struct task_struct;
+
+/*
+ * Free all resources held by a thread. This is called after the
+ * parent of DEAD_TASK has collected the exit status of the task via
+ * wait().
+ */
+#define release_thread(dead_task)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)   do { } while (0)
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE 1: Only a kernel-only process (ie the swapper or direct
+ * descendants who haven't done an "execve()") should use this: it
+ * will work within a system call from a "real" process, but the
+ * process memory space will not be free'd until both the parent and
+ * the child have exited.
+ *
+ * NOTE 2: This MUST NOT be an inlined function.  Otherwise, we get
+ * into trouble in init/main.c when the child thread returns to
+ * do_basic_setup() and the timing is such that free_initmem() has
+ * been called already.
+ */
+extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Get wait channel for task P.  */
+extern unsigned long get_wchan (struct task_struct *p);
+
+/* Return instruction pointer of blocked task TSK.  */
+#define KSTK_EIP(tsk)                                  \
+  ({                                                   \
+       struct pt_regs *_regs = ia64_task_regs(tsk);    \
+       _regs->cr_iip + ia64_psr(_regs)->ri;            \
+  })
+
+/* Return stack pointer of blocked task TSK.  */
+#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
+
+extern void ia64_getreg_unknown_kr (void);
+extern void ia64_setreg_unknown_kr (void);
+
+#define ia64_get_kr(regnum)                                    \
+({                                                             \
+       unsigned long r = 0;                                    \
+                                                               \
+       switch (regnum) {                                       \
+           case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;   \
+           case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;   \
+           case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;   \
+           case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;   \
+           case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;   \
+           case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;   \
+           case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;   \
+           case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;   \
+           default: ia64_getreg_unknown_kr(); break;           \
+       }                                                       \
+       r;                                                      \
+})
+
+#define ia64_set_kr(regnum, r)                                         \
+({                                                             \
+       switch (regnum) {                                       \
+           case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;    \
+           case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;    \
+           case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;    \
+           case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;    \
+           case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;    \
+           case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;    \
+           case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;    \
+           case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;    \
+           default: ia64_setreg_unknown_kr(); break;           \
+       }                                                       \
+})
+
+/*
+ * The following three macros can't be inline functions because we don't have 
struct
+ * task_struct at this point.
+ */
+
+/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
+#ifndef XEN
+#define ia64_is_local_fpu_owner(t)                                             
                \
+({                                                                             
                \
+       struct task_struct *__ia64_islfo_task = (t);                            
                \
+       (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()           
                \
+        && __ia64_islfo_task == (struct task_struct *) 
ia64_get_kr(IA64_KR_FPU_OWNER));        \
+})
+#endif
+
+/* Mark task T as owning the fph partition of the CPU we're running on. */
+#define ia64_set_local_fpu_owner(t) do {                                       
        \
+       struct task_struct *__ia64_slfo_task = (t);                             
        \
+       __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();             
        \
+       ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);       
        \
+} while (0)
+
+/* Mark the fph partition of task T as being invalid on all CPUs.  */
+#define ia64_drop_fpu(t)       ((t)->thread.last_fph_cpu = -1)
+
+extern void __ia64_init_fpu (void);
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#ifdef CONFIG_IA32_SUPPORT
+extern void ia32_save_state (struct task_struct *task);
+extern void ia32_load_state (struct task_struct *task);
+#endif
+
+#define ia64_fph_enable()      do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } 
while (0)
+#define ia64_fph_disable()     do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } 
while (0)
+
+/* load fp 0.0 into fph */
+static inline void
+ia64_init_fpu (void) {
+       ia64_fph_enable();
+       __ia64_init_fpu();
+       ia64_fph_disable();
+}
+
+/* save f32-f127 at FPH */
+static inline void
+ia64_save_fpu (struct ia64_fpreg *fph) {
+       ia64_fph_enable();
+       __ia64_save_fpu(fph);
+       ia64_fph_disable();
+}
+
+/* load f32-f127 from FPH */
+static inline void
+ia64_load_fpu (struct ia64_fpreg *fph) {
+       ia64_fph_enable();
+       __ia64_load_fpu(fph);
+       ia64_fph_disable();
+}
+
+static inline __u64
+ia64_clear_ic (void)
+{
+       __u64 psr;
+       psr = ia64_getreg(_IA64_REG_PSR);
+       ia64_stop();
+       ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
+       ia64_srlz_i();
+       return psr;
+}
+
+/*
+ * Restore the psr.
+ */
+static inline void
+ia64_set_psr (__u64 psr)
+{
+       ia64_stop();
+       ia64_setreg(_IA64_REG_PSR_L, psr);
+       ia64_srlz_d();
+}
+
+/*
+ * Insert a translation into an instruction and/or data translation
+ * register.
+ */
+static inline void
+ia64_itr (__u64 target_mask, __u64 tr_num,
+         __u64 vmaddr, __u64 pte,
+         __u64 log_page_size)
+{
+       ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+       ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+       ia64_stop();
+       if (target_mask & 0x1)
+               ia64_itri(tr_num, pte);
+       if (target_mask & 0x2)
+               ia64_itrd(tr_num, pte);
+}
+
+/*
+ * Insert a translation into the instruction and/or data translation
+ * cache.
+ */
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
+         __u64 log_page_size)
+{
+       ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+       ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+       ia64_stop();
+       /* as per EAS2.6, itc must be the last instruction in an instruction 
group */
+       if (target_mask & 0x1)
+               ia64_itci(pte);
+       if (target_mask & 0x2)
+               ia64_itcd(pte);
+}
+
+/*
+ * Purge a range of addresses from instruction and/or data translation
+ * register(s).
+ */
+static inline void
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
+{
+       if (target_mask & 0x1)
+               ia64_ptri(vmaddr, (log_size << 2));
+       if (target_mask & 0x2)
+               ia64_ptrd(vmaddr, (log_size << 2));
+}
+
+/* Set the interrupt vector address.  The address must be suitably aligned 
(32KB).  */
+static inline void
+ia64_set_iva (void *ivt_addr)
+{
+       ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
+       ia64_srlz_i();
+}
+
+/* Set the page table address and control bits.  */
+static inline void
+ia64_set_pta (__u64 pta)
+{
+       /* Note: srlz.i implies srlz.d */
+       ia64_setreg(_IA64_REG_CR_PTA, pta);
+       ia64_srlz_i();
+}
+
+static inline void
+ia64_eoi (void)
+{
+       ia64_setreg(_IA64_REG_CR_EOI, 0);
+       ia64_srlz_d();
+}
+
+#define cpu_relax()    ia64_hint(ia64_hint_pause)
+
+static inline void
+ia64_set_lrr0 (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_CR_LRR0, val);
+       ia64_srlz_d();
+}
+
+static inline void
+ia64_set_lrr1 (unsigned long val)
+{
+       ia64_setreg(_IA64_REG_CR_LRR1, val);
+       ia64_srlz_d();
+}
+
+
+/*
+ * Given the address to which a spill occurred, return the unat bit
+ * number that corresponds to this address.
+ */
+static inline __u64
+ia64_unat_pos (void *spill_addr)
+{
+       return ((__u64) spill_addr >> 3) & 0x3f;
+}
+
+/*
+ * Set the NaT bit of an integer register which was spilled at address
+ * SPILL_ADDR.  UNAT is the mask to be updated.
+ */
+static inline void
+ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
+{
+       __u64 bit = ia64_unat_pos(spill_addr);
+       __u64 mask = 1UL << bit;
+
+       *unat = (*unat & ~mask) | (nat << bit);
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * Note that the only way T can block is through a call to schedule() -> 
switch_to().
+ */
+static inline unsigned long
+thread_saved_pc (struct task_struct *t)
+{
+       struct unw_frame_info info;
+       unsigned long ip;
+
+       unw_init_from_blocked_task(&info, t);
+       if (unw_unwind(&info) < 0)
+               return 0;
+       unw_get_ip(&info, &ip);
+       return ip;
+}
+
+/*
+ * Get the current instruction/program counter value.
+ */
+#define current_text_addr() \
+       ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
+
+static inline __u64
+ia64_get_ivr (void)
+{
+       __u64 r;
+       ia64_srlz_d();
+       r = ia64_getreg(_IA64_REG_CR_IVR);
+       ia64_srlz_d();
+       return r;
+}
+
+static inline void
+ia64_set_dbr (__u64 regnum, __u64 value)
+{
+       __ia64_set_dbr(regnum, value);
+#ifdef CONFIG_ITANIUM
+       ia64_srlz_d();
+#endif
+}
+
+static inline __u64
+ia64_get_dbr (__u64 regnum)
+{
+       __u64 retval;
+
+       retval = __ia64_get_dbr(regnum);
+#ifdef CONFIG_ITANIUM
+       ia64_srlz_d();
+#endif
+       return retval;
+}
+
+static inline __u64
+ia64_rotr (__u64 w, __u64 n)
+{
+       return (w >> n) | (w << (64 - n));
+}
+
+#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
+
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+       void *result;
+       result = (void *) ia64_tpa(addr);
+       return __va(result);
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define PREFETCH_STRIDE                        L1_CACHE_BYTES
+
+static inline void
+prefetch (const void *x)
+{
+        ia64_lfetch(ia64_lfhint_none, x);
+}
+
+static inline void
+prefetchw (const void *x)
+{
+       ia64_lfetch_excl(ia64_lfhint_none, x);
+}
+
+#define spin_lock_prefetch(x)  prefetchw(x)
+
+extern unsigned long boot_option_idle_override;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PROCESSOR_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/ptrace.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h       Wed Aug  3 00:25:11 2005
@@ -0,0 +1,341 @@
+#ifndef _ASM_IA64_PTRACE_H
+#define _ASM_IA64_PTRACE_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ * Copyright (C) 2003 Intel Co
+ *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
+ *     Fenghua Yu <fenghua.yu@xxxxxxxxx>
+ *     Arun Sharma <arun.sharma@xxxxxxxxx>
+ *
+ * 12/07/98    S. Eranian      added pt_regs & switch_stack
+ * 12/21/98    D. Mosberger    updated to match latest code
+ *  6/17/99    D. Mosberger    added second unat member to "struct 
switch_stack"
+ *
+ */
+/*
+ * When a user process is blocked, its state looks as follows:
+ *
+ *            +----------------------+ ------- IA64_STK_OFFSET
+ *                   |                      |   ^
+ *            | struct pt_regs       |  |
+ *           |                      |   |
+ *            +----------------------+  |
+ *           |                      |   |
+ *                   |    memory stack      |   |
+ *           | (growing downwards)  |   |
+ *           //.....................//  |
+ *                                      |
+ *           //.....................//  |
+ *           |                      |   |
+ *            +----------------------+  |
+ *            | struct switch_stack  |  |
+ *           |                      |   |
+ *           +----------------------+   |
+ *           |                      |   |
+ *           //.....................//  |
+ *                                      |
+ *           //.....................//  |
+ *           |                      |   |
+ *           |  register stack      |   |
+ *           | (growing upwards)    |   |
+ *            |                             |   |
+ *           +----------------------+   |  --- IA64_RBS_OFFSET
+ *            |  struct thread_info  |  |  ^
+ *           +----------------------+   |  |
+ *           |                      |   |  |
+ *            |  struct task_struct  |  |  |
+ * current -> |                             |   |  |
+ *           +----------------------+ -------
+ *
+ * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
+ * This is because ar.ec is saved as part of ar.pfs.
+ */
+
+#include <linux/config.h>
+
+#include <asm/fpu.h>
+#include <asm/offsets.h>
+
+/*
+ * Base-2 logarithm of number of pages to allocate per task structure
+ * (including register backing store and memory stack):
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define KERNEL_STACK_SIZE_ORDER               3
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define KERNEL_STACK_SIZE_ORDER               2
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define KERNEL_STACK_SIZE_ORDER               1
+#else
+# define KERNEL_STACK_SIZE_ORDER               0
+#endif
+
+#define IA64_RBS_OFFSET                        ((IA64_TASK_SIZE + 
IA64_THREAD_INFO_SIZE + 15) & ~15)
+#define IA64_STK_OFFSET                        ((1 << 
KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
+
+#define KERNEL_STACK_SIZE              IA64_STK_OFFSET
+
+#ifndef __ASSEMBLY__
+
+#include <asm/current.h>
+#include <asm/page.h>
+
+/*
+ * This struct defines the way the registers are saved on system
+ * calls.
+ *
+ * We don't save all floating point register because the kernel
+ * is compiled to use only a very small subset, so the other are
+ * untouched.
+ *
+ * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
+ * (because the memory stack pointer MUST ALWAYS be aligned this way)
+ *
+ */
+#ifdef XEN
+#include <public/arch-ia64.h>
+#else
+struct pt_regs {
+       /* The following registers are saved by SAVE_MIN: */
+       unsigned long b6;               /* scratch */
+       unsigned long b7;               /* scratch */
+
+       unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
+       unsigned long ar_ssd;           /* reserved for future use (scratch) */
+
+       unsigned long r8;               /* scratch (return value register 0) */
+       unsigned long r9;               /* scratch (return value register 1) */
+       unsigned long r10;              /* scratch (return value register 2) */
+       unsigned long r11;              /* scratch (return value register 3) */
+
+       unsigned long cr_ipsr;          /* interrupted task's psr */
+       unsigned long cr_iip;           /* interrupted task's instruction 
pointer */
+       /*
+        * interrupted task's function state; if bit 63 is cleared, it
+        * contains syscall's ar.pfs.pfm:
+        */
+       unsigned long cr_ifs;
+
+       unsigned long ar_unat;          /* interrupted task's NaT register 
(preserved) */
+       unsigned long ar_pfs;           /* prev function state  */
+       unsigned long ar_rsc;           /* RSE configuration */
+       /* The following two are valid only if cr_ipsr.cpl > 0: */
+       unsigned long ar_rnat;          /* RSE NaT */
+       unsigned long ar_bspstore;      /* RSE bspstore */
+
+       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
+       unsigned long b0;               /* return pointer (bp) */
+       unsigned long loadrs;           /* size of dirty partition << 16 */
+
+       unsigned long r1;               /* the gp pointer */
+       unsigned long r12;              /* interrupted task's memory stack 
pointer */
+       unsigned long r13;              /* thread pointer */
+
+       unsigned long ar_fpsr;          /* floating point status (preserved) */
+       unsigned long r15;              /* scratch */
+
+       /* The remaining registers are NOT saved for system calls.  */
+
+       unsigned long r14;              /* scratch */
+       unsigned long r2;               /* scratch */
+       unsigned long r3;               /* scratch */
+
+       /* The following registers are saved by SAVE_REST: */
+       unsigned long r16;              /* scratch */
+       unsigned long r17;              /* scratch */
+       unsigned long r18;              /* scratch */
+       unsigned long r19;              /* scratch */
+       unsigned long r20;              /* scratch */
+       unsigned long r21;              /* scratch */
+       unsigned long r22;              /* scratch */
+       unsigned long r23;              /* scratch */
+       unsigned long r24;              /* scratch */
+       unsigned long r25;              /* scratch */
+       unsigned long r26;              /* scratch */
+       unsigned long r27;              /* scratch */
+       unsigned long r28;              /* scratch */
+       unsigned long r29;              /* scratch */
+       unsigned long r30;              /* scratch */
+       unsigned long r31;              /* scratch */
+
+       unsigned long ar_ccv;           /* compare/exchange value (scratch) */
+
+       /*
+        * Floating point registers that the kernel considers scratch:
+        */
+       struct ia64_fpreg f6;           /* scratch */
+       struct ia64_fpreg f7;           /* scratch */
+       struct ia64_fpreg f8;           /* scratch */
+       struct ia64_fpreg f9;           /* scratch */
+       struct ia64_fpreg f10;          /* scratch */
+       struct ia64_fpreg f11;          /* scratch */
+};
+#endif
+
+/*
+ * This structure contains the addition registers that need to
+ * preserved across a context switch.  This generally consists of
+ * "preserved" registers.
+ */
+struct switch_stack {
+       unsigned long caller_unat;      /* user NaT collection register 
(preserved) */
+       unsigned long ar_fpsr;          /* floating-point status register */
+
+       struct ia64_fpreg f2;           /* preserved */
+       struct ia64_fpreg f3;           /* preserved */
+       struct ia64_fpreg f4;           /* preserved */
+       struct ia64_fpreg f5;           /* preserved */
+
+       struct ia64_fpreg f12;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f13;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f14;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f15;          /* scratch, but untouched by kernel */
+       struct ia64_fpreg f16;          /* preserved */
+       struct ia64_fpreg f17;          /* preserved */
+       struct ia64_fpreg f18;          /* preserved */
+       struct ia64_fpreg f19;          /* preserved */
+       struct ia64_fpreg f20;          /* preserved */
+       struct ia64_fpreg f21;          /* preserved */
+       struct ia64_fpreg f22;          /* preserved */
+       struct ia64_fpreg f23;          /* preserved */
+       struct ia64_fpreg f24;          /* preserved */
+       struct ia64_fpreg f25;          /* preserved */
+       struct ia64_fpreg f26;          /* preserved */
+       struct ia64_fpreg f27;          /* preserved */
+       struct ia64_fpreg f28;          /* preserved */
+       struct ia64_fpreg f29;          /* preserved */
+       struct ia64_fpreg f30;          /* preserved */
+       struct ia64_fpreg f31;          /* preserved */
+
+       unsigned long r4;               /* preserved */
+       unsigned long r5;               /* preserved */
+       unsigned long r6;               /* preserved */
+       unsigned long r7;               /* preserved */
+
+       unsigned long b0;               /* so we can force a direct return in 
copy_thread */
+       unsigned long b1;
+       unsigned long b2;
+       unsigned long b3;
+       unsigned long b4;
+       unsigned long b5;
+
+       unsigned long ar_pfs;           /* previous function state */
+       unsigned long ar_lc;            /* loop counter (preserved) */
+       unsigned long ar_unat;          /* NaT bits for r4-r7 */
+       unsigned long ar_rnat;          /* RSE NaT collection register */
+       unsigned long ar_bspstore;      /* RSE dirty base (preserved) */
+       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
+};
+
+#ifdef __KERNEL__
+/*
+ * We use the ia64_psr(regs)->ri to determine which of the three
+ * instructions in bundle (16 bytes) took the sample. Generate
+ * the canonical representation by adding to instruction pointer.
+ */
+# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
+/* Conserve space in histogram by encoding slot bits in address
+ * bits 2 and 3 rather than bits 0 and 1.
+ */
+#define profile_pc(regs)                                               \
+({                                                                     \
+       unsigned long __ip = instruction_pointer(regs);                 \
+       (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
+})
+
+  /* given a pointer to a task_struct, return the user's pt_regs */
+# define ia64_task_regs(t)             (((struct pt_regs *) ((char *) (t) + 
IA64_STK_OFFSET)) - 1)
+# define ia64_psr(regs)                        ((struct ia64_psr *) 
&(regs)->cr_ipsr)
+# define user_mode(regs)               (((struct ia64_psr *) 
&(regs)->cr_ipsr)->cpl != 0)
+# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - 
sizeof(*regs))
+# define fsys_mode(task,regs)                                  \
+  ({                                                           \
+         struct task_struct *_task = (task);                   \
+         struct pt_regs *_regs = (regs);                       \
+         !user_mode(_regs) && user_stack(_task, _regs);        \
+  })
+
+  /*
+   * System call handlers that, upon successful completion, need to return a 
negative value
+   * should call force_successful_syscall_return() right before returning.  On 
architectures
+   * where the syscall convention provides for a separate error flag (e.g., 
alpha, ia64,
+   * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure 
that the error
+   * flag will not get set.  On architectures which do not support a separate 
error flag,
+   * the macro is a no-op and the spurious error condition needs to be 
filtered out by some
+   * other means (e.g., in user-level, by passing an extra argument to the 
syscall handler,
+   * or something along those lines).
+   *
+   * On ia64, we can clear the user's pt_regs->r8 to force a successful 
syscall.
+   */
+# define force_successful_syscall_return()     (ia64_task_regs(current)->r8 = 
0)
+
+  struct task_struct;                  /* forward decl */
+  struct unw_frame_info;               /* forward decl */
+
+  extern void show_regs (struct pt_regs *);
+  extern void ia64_do_show_stack (struct unw_frame_info *, void *);
+  extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct 
pt_regs *,
+                                             unsigned long *);
+  extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned 
long,
+                        unsigned long, long *);
+  extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned 
long,
+                        unsigned long, long);
+  extern void ia64_flush_fph (struct task_struct *);
+  extern void ia64_sync_fph (struct task_struct *);
+  extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
+                                 unsigned long, unsigned long);
+
+  /* get nat bits for scratch registers such that bit N==1 iff scratch 
register rN is a NaT */
+  extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned 
long scratch_unat);
+  /* put nat bits for scratch registers such that scratch register rN is a NaT 
iff bit N==1 */
+  extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned 
long nat);
+
+  extern void ia64_increment_ip (struct pt_regs *pt);
+  extern void ia64_decrement_ip (struct pt_regs *pt);
+
+#endif /* !__KERNEL__ */
+
+/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
+struct pt_all_user_regs {
+       unsigned long nat;
+       unsigned long cr_iip;
+       unsigned long cfm;
+       unsigned long cr_ipsr;
+       unsigned long pr;
+
+       unsigned long gr[32];
+       unsigned long br[8];
+       unsigned long ar[128];
+       struct ia64_fpreg fr[128];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/* indices to application-registers array in pt_all_user_regs */
+#define PT_AUR_RSC     16
+#define PT_AUR_BSP     17
+#define PT_AUR_BSPSTORE        18
+#define PT_AUR_RNAT    19
+#define PT_AUR_CCV     32
+#define PT_AUR_UNAT    36
+#define PT_AUR_FPSR    40
+#define PT_AUR_PFS     64
+#define PT_AUR_LC      65
+#define PT_AUR_EC      66
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
+#define PTRACE_OLD_GETSIGINFO  13      /* (replaced by PTRACE_GETSIGINFO in 
<linux/ptrace.h>)  */
+#define PTRACE_OLD_SETSIGINFO  14      /* (replaced by PTRACE_SETSIGINFO in 
<linux/ptrace.h>)  */
+#define PTRACE_GETREGS         18      /* get all registers (pt_all_user_regs) 
in one shot */
+#define PTRACE_SETREGS         19      /* set all registers (pt_all_user_regs) 
in one shot */
+
+#define PTRACE_OLDSETOPTIONS   21
+
+#endif /* _ASM_IA64_PTRACE_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,994 @@
+#ifndef _ASM_IA64_SN_SN_SAL_H
+#define _ASM_IA64_SN_SN_SAL_H
+
+/*
+ * System Abstraction Layer definitions for IA64
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+
+#include <linux/config.h>
+#include <asm/sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/nodepda.h>
+
+// SGI Specific Calls
+#define  SN_SAL_POD_MODE                           0x02000001
+#define  SN_SAL_SYSTEM_RESET                       0x02000002
+#define  SN_SAL_PROBE                              0x02000003
+#define  SN_SAL_GET_MASTER_NASID                   0x02000004
+#define         SN_SAL_GET_KLCONFIG_ADDR                  0x02000005
+#define  SN_SAL_LOG_CE                            0x02000006
+#define  SN_SAL_REGISTER_CE                       0x02000007
+#define  SN_SAL_GET_PARTITION_ADDR                0x02000009
+#define  SN_SAL_XP_ADDR_REGION                    0x0200000f
+#define  SN_SAL_NO_FAULT_ZONE_VIRTUAL             0x02000010
+#define  SN_SAL_NO_FAULT_ZONE_PHYSICAL            0x02000011
+#define  SN_SAL_PRINT_ERROR                       0x02000012
+#define  SN_SAL_SET_ERROR_HANDLING_FEATURES       0x0200001a   // reentrant
+#define  SN_SAL_GET_FIT_COMPT                     0x0200001b   // reentrant
+#define  SN_SAL_GET_HUB_INFO                       0x0200001c
+#define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
+#define  SN_SAL_CONSOLE_PUTC                       0x02000021
+#define  SN_SAL_CONSOLE_GETC                       0x02000022
+#define  SN_SAL_CONSOLE_PUTS                       0x02000023
+#define  SN_SAL_CONSOLE_GETS                       0x02000024
+#define  SN_SAL_CONSOLE_GETS_TIMEOUT               0x02000025
+#define  SN_SAL_CONSOLE_POLL                       0x02000026
+#define  SN_SAL_CONSOLE_INTR                       0x02000027
+#define  SN_SAL_CONSOLE_PUTB                      0x02000028
+#define  SN_SAL_CONSOLE_XMIT_CHARS                0x0200002a
+#define  SN_SAL_CONSOLE_READC                     0x0200002b
+#define  SN_SAL_SYSCTL_MODID_GET                  0x02000031
+#define  SN_SAL_SYSCTL_GET                         0x02000032
+#define  SN_SAL_SYSCTL_IOBRICK_MODULE_GET          0x02000033
+#define  SN_SAL_SYSCTL_IO_PORTSPEED_GET            0x02000035
+#define  SN_SAL_SYSCTL_SLAB_GET                    0x02000036
+#define  SN_SAL_BUS_CONFIG                        0x02000037
+#define  SN_SAL_SYS_SERIAL_GET                    0x02000038
+#define  SN_SAL_PARTITION_SERIAL_GET              0x02000039
+#define  SN_SAL_SYSCTL_PARTITION_GET              0x0200003a
+#define  SN_SAL_SYSTEM_POWER_DOWN                 0x0200003b
+#define  SN_SAL_GET_MASTER_BASEIO_NASID                   0x0200003c
+#define  SN_SAL_COHERENCE                          0x0200003d
+#define  SN_SAL_MEMPROTECT                         0x0200003e
+#define  SN_SAL_SYSCTL_FRU_CAPTURE                0x0200003f
+
+#define  SN_SAL_SYSCTL_IOBRICK_PCI_OP             0x02000042   // reentrant
+#define         SN_SAL_IROUTER_OP                         0x02000043
+#define  SN_SAL_IOIF_INTERRUPT                    0x0200004a
+#define  SN_SAL_HWPERF_OP                         0x02000050   // lock
+#define  SN_SAL_IOIF_ERROR_INTERRUPT              0x02000051
+
+#define  SN_SAL_IOIF_SLOT_ENABLE                  0x02000053
+#define  SN_SAL_IOIF_SLOT_DISABLE                 0x02000054
+#define  SN_SAL_IOIF_GET_HUBDEV_INFO              0x02000055
+#define  SN_SAL_IOIF_GET_PCIBUS_INFO              0x02000056
+#define  SN_SAL_IOIF_GET_PCIDEV_INFO              0x02000057
+#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058
+
+#define SN_SAL_HUB_ERROR_INTERRUPT                0x02000060
+
+
+/*
+ * Service-specific constants
+ */
+
+/* Console interrupt manipulation */
+       /* action codes */
+#define SAL_CONSOLE_INTR_OFF    0       /* turn the interrupt off */
+#define SAL_CONSOLE_INTR_ON     1       /* turn the interrupt on */
+#define SAL_CONSOLE_INTR_STATUS 2      /* retrieve the interrupt status */
+       /* interrupt specification & status return codes */
+#define SAL_CONSOLE_INTR_XMIT  1       /* output interrupt */
+#define SAL_CONSOLE_INTR_RECV  2       /* input interrupt */
+
+/* interrupt handling */
+#define SAL_INTR_ALLOC         1
+#define SAL_INTR_FREE          2
+
+/*
+ * IRouter (i.e. generalized system controller) operations
+ */
+#define SAL_IROUTER_OPEN       0       /* open a subchannel */
+#define SAL_IROUTER_CLOSE      1       /* close a subchannel */
+#define SAL_IROUTER_SEND       2       /* send part of an IRouter packet */
+#define SAL_IROUTER_RECV       3       /* receive part of an IRouter packet */
+#define SAL_IROUTER_INTR_STATUS        4       /* check the interrupt status 
for
+                                        * an open subchannel
+                                        */
+#define SAL_IROUTER_INTR_ON    5       /* enable an interrupt */
+#define SAL_IROUTER_INTR_OFF   6       /* disable an interrupt */
+#define SAL_IROUTER_INIT       7       /* initialize IRouter driver */
+
+/* IRouter interrupt mask bits */
+#define SAL_IROUTER_INTR_XMIT  SAL_CONSOLE_INTR_XMIT
+#define SAL_IROUTER_INTR_RECV  SAL_CONSOLE_INTR_RECV
+
+
+/*
+ * SAL Error Codes
+ */
+#define SALRET_MORE_PASSES     1
+#define SALRET_OK              0
+#define SALRET_NOT_IMPLEMENTED (-1)
+#define SALRET_INVALID_ARG     (-2)
+#define SALRET_ERROR           (-3)
+
+
+#ifndef XEN
+/**
+ * sn_sal_rev_major - get the major SGI SAL revision number
+ *
+ * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
+ * This routine simply extracts the major value from the
+ * @ia64_sal_systab structure constructed by ia64_sal_init().
+ */
+static inline int
+sn_sal_rev_major(void)
+{
+       struct ia64_sal_systab *systab = efi.sal_systab;
+
+       return (int)systab->sal_b_rev_major;
+}
+
+/**
+ * sn_sal_rev_minor - get the minor SGI SAL revision number
+ *
+ * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
+ * This routine simply extracts the minor value from the
+ * @ia64_sal_systab structure constructed by ia64_sal_init().
+ */
+static inline int
+sn_sal_rev_minor(void)
+{
+       struct ia64_sal_systab *systab = efi.sal_systab;
+       
+       return (int)systab->sal_b_rev_minor;
+}
+
+/*
+ * Specify the minimum PROM revsion required for this kernel.
+ * Note that they're stored in hex format...
+ */
+#define SN_SAL_MIN_MAJOR       0x4  /* SN2 kernels need at least PROM 4.0 */
+#define SN_SAL_MIN_MINOR       0x0
+
+/*
+ * Returns the master console nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_console_nasid(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       /* Master console nasid is in 'v0' */
+       return ret_stuff.v0;
+}
+
+/*
+ * Returns the master baseio nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_master_baseio_nasid(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 
0);
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       /* Master baseio nasid is in 'v0' */
+       return ret_stuff.v0;
+}
+
+static inline char *
+ia64_sn_get_klconfig_addr(nasid_t nasid)
+{
+       struct ia64_sal_retval ret_stuff;
+       int cnodeid;
+
+       cnodeid = nasid_to_cnodeid(nasid);
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 
0, 0);
+
+       /*
+        * We should panic if a valid cnode nasid does not produce
+        * a klconfig address.
+        */
+       if (ret_stuff.status != 0) {
+               panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", 
ret_stuff.status);
+       }
+       return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
+}
+#endif /* !XEN */
+
+/*
+ * Returns the next console character.
+ */
+static inline u64
+ia64_sn_console_getc(int *ch)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
+
+       /* character is in 'v0' */
+       *ch = (int)ret_stuff.v0;
+
+       return ret_stuff.status;
+}
+
+/*
+ * Read a character from the SAL console device, after a previous interrupt
+ * or poll operation has given us to know that a character is available
+ * to be read.
+ */
+static inline u64
+ia64_sn_console_readc(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
+
+       /* character is in 'v0' */
+       return ret_stuff.v0;
+}
+
+/*
+ * Sends the given character to the console.
+ */
+static inline u64
+ia64_sn_console_putc(char ch)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 
0, 0, 0);
+
+       return ret_stuff.status;
+}
+
+/*
+ * Sends the given buffer to the console.
+ */
+static inline u64
+ia64_sn_console_putb(const char *buf, int len)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0; 
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, 
(uint64_t)len, 0, 0, 0, 0, 0);
+
+       if ( ret_stuff.status == 0 ) {
+               return ret_stuff.v0;
+       }
+       return (u64)0;
+}
+
+#ifndef XEN
+/*
+ * Print a platform error record
+ */
+static inline u64
+ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, 
(uint64_t)rec, 0, 0, 0, 0, 0);
+
+       return ret_stuff.status;
+}
+
+/*
+ * Check for Platform errors
+ */
+static inline u64
+ia64_sn_plat_cpei_handler(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
+
+       return ret_stuff.status;
+}
+
+/*
+ * Checks for console input.
+ */
+static inline u64
+ia64_sn_console_check(int *result)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
+
+       /* result is in 'v0' */
+       *result = (int)ret_stuff.v0;
+
+       return ret_stuff.status;
+}
+
+/*
+ * Checks console interrupt status
+ */
+static inline u64
+ia64_sn_console_intr_status(void)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+                0, SAL_CONSOLE_INTR_STATUS,
+                0, 0, 0, 0, 0);
+
+       if (ret_stuff.status == 0) {
+           return ret_stuff.v0;
+       }
+       
+       return 0;
+}
+
+/*
+ * Enable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_enable(uint64_t intr)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+                intr, SAL_CONSOLE_INTR_ON,
+                0, 0, 0, 0, 0);
+}
+
+/*
+ * Disable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_disable(uint64_t intr)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
+                intr, SAL_CONSOLE_INTR_OFF,
+                0, 0, 0, 0, 0);
+}
+
+/*
+ * Sends a character buffer to the console asynchronously.
+ */
+static inline u64
+ia64_sn_console_xmit_chars(char *buf, int len)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
+                (uint64_t)buf, (uint64_t)len,
+                0, 0, 0, 0, 0);
+
+       if (ret_stuff.status == 0) {
+           return ret_stuff.v0;
+       }
+
+       return 0;
+}
+
+/*
+ * Returns the iobrick module Id
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 
0, 0, 0, 0, 0);
+
+       /* result is in 'v0' */
+       *result = (int)ret_stuff.v0;
+
+       return ret_stuff.status;
+}
+
+/**
+ * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
+ *
+ * SN_SAL_POD_MODE actually takes an argument, but it's always
+ * 0 when we call it from the kernel, so we don't have to expose
+ * it to the caller.
+ */
+static inline u64
+ia64_sn_pod_mode(void)
+{
+       struct ia64_sal_retval isrv;
+       SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
+       if (isrv.status)
+               return 0;
+       return isrv.v0;
+}
+
+/**
+ * ia64_sn_probe_mem - read from memory safely
+ * @addr: address to probe
+ * @size: number bytes to read (1,2,4,8)
+ * @data_ptr: address to store value read by probe (-1 returned if probe fails)
+ *
+ * Call into the SAL to do a memory read.  If the read generates a machine
+ * check, this routine will recover gracefully and return -1 to the caller.
+ * @addr is usually a kernel virtual address in uncached space (i.e. the
+ * address starts with 0xc), but if called in physical mode, @addr should
+ * be a physical address.
+ *
+ * Return values:
+ *  0 - probe successful
+ *  1 - probe failed (generated MCA)
+ *  2 - Bad arg
+ * <0 - PAL error
+ */
+static inline u64
+ia64_sn_probe_mem(long addr, long size, void *data_ptr)
+{
+       struct ia64_sal_retval isrv;
+
+       SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
+
+       if (data_ptr) {
+               switch (size) {
+               case 1:
+                       *((u8*)data_ptr) = (u8)isrv.v0;
+                       break;
+               case 2:
+                       *((u16*)data_ptr) = (u16)isrv.v0;
+                       break;
+               case 4:
+                       *((u32*)data_ptr) = (u32)isrv.v0;
+                       break;
+               case 8:
+                       *((u64*)data_ptr) = (u64)isrv.v0;
+                       break;
+               default:
+                       isrv.status = 2;
+               }
+       }
+       return isrv.status;
+}
+
+/*
+ * Retrieve the system serial number as an ASCII string.
+ */
+static inline u64
+ia64_sn_sys_serial_get(char *buf)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 
0);
+       return ret_stuff.status;
+}
+
+extern char sn_system_serial_number_string[];
+extern u64 sn_partition_serial_number;
+
+static inline char *
+sn_system_serial_number(void) {
+       if (sn_system_serial_number_string[0]) {
+               return(sn_system_serial_number_string);
+       } else {
+               ia64_sn_sys_serial_get(sn_system_serial_number_string);
+               return(sn_system_serial_number_string);
+       }
+}
+       
+
+/*
+ * Returns a unique id number for this system and partition (suitable for
+ * use with license managers), based in part on the system serial number.
+ */
+static inline u64
+ia64_sn_partition_serial_get(void)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
+       if (ret_stuff.status != 0)
+           return 0;
+       return ret_stuff.v0;
+}
+
+static inline u64
+sn_partition_serial_number_val(void) {
+       if (sn_partition_serial_number) {
+               return(sn_partition_serial_number);
+       } else {
+               return(sn_partition_serial_number = 
ia64_sn_partition_serial_get());
+       }
+}
+
+/*
+ * Returns the partition id of the nasid passed in as an argument,
+ * or INVALID_PARTID if the partition id cannot be retrieved.
+ */
+static inline partid_t
+ia64_sn_sysctl_partition_get(nasid_t nasid)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
+                0, 0, 0, 0, 0, 0);
+       if (ret_stuff.status != 0)
+           return INVALID_PARTID;
+       return ((partid_t)ret_stuff.v0);
+}
+
+/*
+ * Returns the partition id of the current processor.
+ */
+
+extern partid_t sn_partid;
+
+static inline partid_t
+sn_local_partid(void) {
+       if (sn_partid < 0) {
+               return (sn_partid = 
ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
+       } else {
+               return sn_partid;
+       }
+}
+
+/*
+ * Register or unregister a physical address range being referenced across
+ * a partition boundary for which certain SAL errors should be scanned for,
+ * cleaned up and ignored.  This is of value for kernel partitioning code only.
+ * Values for the operation argument:
+ *     1 = register this address range with SAL
+ *     0 = unregister this address range with SAL
+ * 
+ * SAL maintains a reference count on an address range in case it is registered
+ * multiple times.
+ * 
+ * On success, returns the reference count of the address range after the SAL
+ * call has performed the current registration/unregistration.  Returns a
+ * negative value if an error occurred.
+ */
+static inline int
+sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
+                0, 0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Register or unregister an instruction range for which SAL errors should
+ * be ignored.  If an error occurs while in the registered range, SAL jumps
+ * to return_addr after ignoring the error.  Values for the operation argument:
+ *     1 = register this instruction range with SAL
+ *     0 = unregister this instruction range with SAL
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
+                        int virtual, int operation)
+{
+       struct ia64_sal_retval ret_stuff;
+       u64 call;
+       if (virtual) {
+               call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
+       } else {
+               call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
+       }
+       SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
+                0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Change or query the coherence domain for this partition. Each cpu-based
+ * nasid is represented by a bit in an array of 64-bit words:
+ *      0 = not in this partition's coherency domain
+ *      1 = in this partition's coherency domain
+ *
+ * It is not possible for the local system's nasids to be removed from
+ * the coherency domain.  Purpose of the domain arguments:
+ *      new_domain = set the coherence domain to the given nasids
+ *      old_domain = return the current coherence domain
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_change_coherence(u64 *new_domain, u64 *old_domain)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
+                0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Change memory access protections for a physical address range.
+ * nasid_array is not used on Altix, but may be in future architectures.
+ * Available memory protection access classes are defined after the function.
+ */
+static inline int
+sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
+{
+       struct ia64_sal_retval ret_stuff;
+       int cnodeid;
+       unsigned long irq_flags;
+
+       cnodeid = nasid_to_cnodeid(get_node_number(paddr));
+       // spin_lock(&NODEPDA(cnodeid)->bist_lock);
+       local_irq_save(irq_flags);
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
+                perms, 0, 0, 0);
+       local_irq_restore(irq_flags);
+       // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
+       return ret_stuff.status;
+}
+#define SN_MEMPROT_ACCESS_CLASS_0              0x14a080
+#define SN_MEMPROT_ACCESS_CLASS_1              0x2520c2
+#define SN_MEMPROT_ACCESS_CLASS_2              0x14a1ca
+#define SN_MEMPROT_ACCESS_CLASS_3              0x14a290
+#define SN_MEMPROT_ACCESS_CLASS_6              0x084080
+#define SN_MEMPROT_ACCESS_CLASS_7              0x021080
+
+/*
+ * Turns off system power.
+ */
+static inline void
+ia64_sn_power_down(void)
+{
+       struct ia64_sal_retval ret_stuff;
+       SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
+       while(1);
+       /* never returns */
+}
+
+/**
+ * ia64_sn_fru_capture - tell the system controller to capture hw state
+ *
+ * This routine will call the SAL which will tell the system controller(s)
+ * to capture hw mmr information from each SHub in the system.
+ */
+static inline u64
+ia64_sn_fru_capture(void)
+{
+        struct ia64_sal_retval isrv;
+        SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
+        if (isrv.status)
+                return 0;
+        return isrv.v0;
+}
+
+/*
+ * Performs an operation on a PCI bus or slot -- power up, power down
+ * or reset.
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, 
+                             u64 bus, char slot, 
+                             u64 action)
+{
+       struct ia64_sal_retval rv = {0, 0, 0, 0};
+
+       SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, 
action,
+                bus, (u64) slot, 0, 0);
+       if (rv.status)
+               return rv.v0;
+       return 0;
+}
+
+
+/*
+ * Open a subchannel for sending arbitrary data to the system
+ * controller network via the system controller device associated with
+ * 'nasid'.  Return the subchannel number or a negative error code.
+ */
+static inline int
+ia64_sn_irtr_open(nasid_t nasid)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
+                          0, 0, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Close system controller subchannel 'subch' previously opened on 'nasid'.
+ */
+static inline int
+ia64_sn_irtr_close(nasid_t nasid, int subch)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
+                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Read data from system controller associated with 'nasid' on
+ * subchannel 'subch'.  The buffer to be filled is pointed to by
+ * 'buf', and its capacity is in the integer pointed to by 'len'.  The
+ * referent of 'len' is set to the number of bytes read by the SAL
+ * call.  The return value is either SALRET_OK (for bytes read) or
+ * SALRET_ERROR (for error or "no data available").
+ */
+static inline int
+ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
+                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+                          0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Write data to the system controller network via the system
+ * controller associated with 'nasid' on suchannel 'subch'.  The
+ * buffer to be written out is pointed to by 'buf', and 'len' is the
+ * number of bytes to be written.  The return value is either the
+ * number of bytes written (which could be zero) or a negative error
+ * code.
+ */
+static inline int
+ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
+                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+                          0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Check whether any interrupts are pending for the system controller
+ * associated with 'nasid' and its subchannel 'subch'.  The return
+ * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
+ * SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr(nasid_t nasid, int subch)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
+                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Enable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
+                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/*
+ * Disable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
+                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
+       return (int) rv.v0;
+}
+
+/**
+ * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
+ * @nasid: NASID of node to read
+ * @index: FIT entry index to be retrieved (0..n)
+ * @fitentry: 16 byte buffer where FIT entry will be stored.
+ * @banbuf: optional buffer for retrieving banner
+ * @banlen: length of banner buffer
+ *
+ * Access to the physical PROM chips needs to be serialized since reads and
+ * writes can't occur at the same time, so we need to call into the SAL when
+ * we want to look at the FIT entries on the chips.
+ *
+ * Returns:
+ *     %SALRET_OK if ok
+ *     %SALRET_INVALID_ARG if index too big
+ *     %SALRET_NOT_IMPLEMENTED if running on older PROM
+ *     ??? if nasid invalid OR banner buffer not large enough
+ */
+static inline int
+ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
+                     u64 banlen)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
+                       banbuf, banlen, 0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Initialize the SAL components of the system controller
+ * communication driver; specifically pass in a sizable buffer that
+ * can be used for allocation of subchannel queues as new subchannels
+ * are opened.  "buf" points to the buffer, and "len" specifies its
+ * length.
+ */
+static inline int
+ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
+                          (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
+       return (int) rv.status;
+}
+
+/*
+ * Returns the nasid, subnode & slice corresponding to a SAPIC ID
+ *
+ *  In:
+ *     arg0 - SN_SAL_GET_SAPIC_INFO
+ *     arg1 - sapicid (lid >> 16) 
+ *  Out:
+ *     v0 - nasid
+ *     v1 - subnode
+ *     v2 - slice
+ */
+static inline u64
+ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 
0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+               if (nasid) *nasid = sapicid & 0xfff;
+               if (subnode) *subnode = (sapicid >> 13) & 1;
+               if (slice) *slice = (sapicid >> 12) & 3;
+               return 0;
+       }
+/***** END HACK *******/
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       if (nasid) *nasid = (int) ret_stuff.v0;
+       if (subnode) *subnode = (int) ret_stuff.v1;
+       if (slice) *slice = (int) ret_stuff.v2;
+       return 0;
+}
+ 
+/*
+ * Returns information about the HUB/SHUB.
+ *  In:
+ *     arg0 - SN_SAL_GET_HUB_INFO
+ *     arg1 - 0 (other values reserved for future use)
+ *  Out:
+ *     v0 - shub type (0=shub1, 1=shub2)
+ *     v1 - masid mask (ex., 0x7ff for 11 bit nasid)
+ *     v2 - bit position of low nasid bit
+ */
+static inline u64
+ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_HUB_INFO, fc, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+               if (arg1) *arg1 = 0;
+               if (arg2) *arg2 = 0x7ff;
+               if (arg3) *arg3 = 38;
+               return 0;
+       }
+/***** END HACK *******/
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       if (arg1) *arg1 = ret_stuff.v0;
+       if (arg2) *arg2 = ret_stuff.v1;
+       if (arg3) *arg3 = ret_stuff.v2;
+       return 0;
+}
+ 
+/*
+ * This is the access point to the Altix PROM hardware performance
+ * and status monitoring interface. For info on using this, see
+ * include/asm-ia64/sn/sn2/sn_hwperf.h
+ */
+static inline int
+ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
+                  u64 a3, u64 a4, int *v0)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
+               opcode, a0, a1, a2, a3, a4);
+       if (v0)
+               *v0 = (int) rv.v0;
+       return (int) rv.status;
+}
+#endif /* !XEN */
+#endif /* _ASM_IA64_SN_SN_SAL_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/system.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h       Wed Aug  3 00:25:11 2005
@@ -0,0 +1,299 @@
+#ifndef _ASM_IA64_SYSTEM_H
+#define _ASM_IA64_SYSTEM_H
+
+/*
+ * System defines. Note that this is included both from .c and .S
+ * files, so it does only defines, not any C code.  This is based
+ * on information published in the Processor Abstraction Layer
+ * and the System Abstraction Layer manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
+ * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
+ */
+#include <linux/config.h>
+
+#include <asm/kregs.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/percpu.h>
+#ifdef XEN
+#include <asm/xensystem.h>
+#endif
+
+#define GATE_ADDR              __IA64_UL_CONST(0xa000000000000000)
+/*
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
+#ifndef XEN
+#define KERNEL_START            __IA64_UL_CONST(0xa000000100000000)
+#define PERCPU_ADDR            (-PERCPU_PAGE_SIZE)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct pci_vector_struct {
+       __u16 segment;  /* PCI Segment number */
+       __u16 bus;      /* PCI Bus number */
+       __u32 pci_id;   /* ACPI split 16 bits device, 16 bits function (see 
section 6.1.1) */
+       __u8 pin;       /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
+       __u32 irq;      /* IRQ assigned */
+};
+
+extern struct ia64_boot_param {
+       __u64 command_line;             /* physical address of command line 
arguments */
+       __u64 efi_systab;               /* physical address of EFI system table 
*/
+       __u64 efi_memmap;               /* physical address of EFI memory map */
+       __u64 efi_memmap_size;          /* size of EFI memory map */
+       __u64 efi_memdesc_size;         /* size of an EFI memory map descriptor 
*/
+       __u32 efi_memdesc_version;      /* memory descriptor version */
+       struct {
+               __u16 num_cols; /* number of columns on console output device */
+               __u16 num_rows; /* number of rows on console output device */
+               __u16 orig_x;   /* cursor's x position */
+               __u16 orig_y;   /* cursor's y position */
+       } console_info;
+       __u64 fpswa;            /* physical address of the fpswa interface */
+       __u64 initrd_start;
+       __u64 initrd_size;
+} *ia64_boot_param;
+
+/*
+ * Macros to force memory ordering.  In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ *   wmb():    Guarantees that all preceding stores to memory-
+ *             like regions are visible before any subsequent
+ *             stores and that all following stores will be
+ *             visible only after all previous stores.
+ *   rmb():    Like wmb(), but for reads.
+ *   mb():     wmb()/rmb() combo, i.e., all previous memory
+ *             accesses are visible before all subsequent
+ *             accesses and vice versa.  This is also known as
+ *             a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers.  For that, mf.a needs to
+ * be used.  However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+#define mb()   ia64_mf()
+#define rmb()  mb()
+#define wmb()  mb()
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_SMP
+# define smp_mb()      mb()
+# define smp_rmb()     rmb()
+# define smp_wmb()     wmb()
+# define smp_read_barrier_depends()    read_barrier_depends()
+#else
+# define smp_mb()      barrier()
+# define smp_rmb()     barrier()
+# define smp_wmb()     barrier()
+# define smp_read_barrier_depends()    do { } while(0)
+#endif
+
+/*
+ * XXX check on these---I suspect what Linus really wants here is
+ * acquire vs release semantics but we can't discuss this stuff with
+ * Linus just yet.  Grrr...
+ */
+#define set_mb(var, value)     do { (var) = (value); mb(); } while (0)
+#define set_wmb(var, value)    do { (var) = (value); mb(); } while (0)
+
+#define safe_halt()         ia64_pal_halt_light()    /* PAL_HALT_LIGHT */
+
+/*
+ * The group barrier in front of the rsm & ssm are necessary to ensure
+ * that none of the previous instructions in the same group are
+ * affected by the rsm/ssm.
+ */
+/* For spinlocks etc */
+
+/*
+ * - clearing psr.i is implicitly serialized (visible by next insn)
+ * - setting psr.i requires data serialization
+ * - we need a stop-bit before reading PSR because we sometimes
+ *   write a floating-point register right before reading the PSR
+ *   and that writes to PSR.mfl
+ */
+#define __local_irq_save(x)                    \
+do {                                           \
+       ia64_stop();                            \
+       (x) = ia64_getreg(_IA64_REG_PSR);       \
+       ia64_stop();                            \
+       ia64_rsm(IA64_PSR_I);                   \
+} while (0)
+
+#define __local_irq_disable()                  \
+do {                                           \
+       ia64_stop();                            \
+       ia64_rsm(IA64_PSR_I);                   \
+} while (0)
+
+#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+  extern unsigned long last_cli_ip;
+
+# define __save_ip()           last_cli_ip = ia64_getreg(_IA64_REG_IP)
+
+# define local_irq_save(x)                                     \
+do {                                                           \
+       unsigned long psr;                                      \
+                                                               \
+       __local_irq_save(psr);                                  \
+       if (psr & IA64_PSR_I)                                   \
+               __save_ip();                                    \
+       (x) = psr;                                              \
+} while (0)
+
+# define local_irq_disable()   do { unsigned long x; local_irq_save(x); } 
while (0)
+
+# define local_irq_restore(x)                                  \
+do {                                                           \
+       unsigned long old_psr, psr = (x);                       \
+                                                               \
+       local_save_flags(old_psr);                              \
+       __local_irq_restore(psr);                               \
+       if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I))      \
+               __save_ip();                                    \
+} while (0)
+
+#else /* !CONFIG_IA64_DEBUG_IRQ */
+# define local_irq_save(x)     __local_irq_save(x)
+# define local_irq_disable()   __local_irq_disable()
+# define local_irq_restore(x)  __local_irq_restore(x)
+#endif /* !CONFIG_IA64_DEBUG_IRQ */
+
+#define local_irq_enable()     ({ ia64_stop(); ia64_ssm(IA64_PSR_I); 
ia64_srlz_d(); })
+#define local_save_flags(flags)        ({ ia64_stop(); (flags) = 
ia64_getreg(_IA64_REG_PSR); })
+
+#define irqs_disabled()                                \
+({                                             \
+       unsigned long __ia64_id_flags;          \
+       local_save_flags(__ia64_id_flags);      \
+       (__ia64_id_flags & IA64_PSR_I) == 0;    \
+})
+
+#ifdef __KERNEL__
+
+#define prepare_to_switch()    do { } while(0)
+
+#ifdef CONFIG_IA32_SUPPORT
+# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
+#else
+# define IS_IA32_PROCESS(regs)         0
+struct task_struct;
+static inline void ia32_save_state(struct task_struct *t 
__attribute__((unused))){}
+static inline void ia32_load_state(struct task_struct *t 
__attribute__((unused))){}
+#endif
+
+/*
+ * Context switch from one thread to another.  If the two threads have
+ * different address spaces, schedule() has already taken care of
+ * switching to the new address space by calling switch_mm().
+ *
+ * Disabling access to the fph partition and the debug-register
+ * context switch MUST be done before calling ia64_switch_to() since a
+ * newly created thread returns directly to
+ * ia64_ret_from_syscall_clear_r8.
+ */
+extern struct task_struct *ia64_switch_to (void *next_task);
+
+struct task_struct;
+
+extern void ia64_save_extra (struct task_struct *task);
+extern void ia64_load_extra (struct task_struct *task);
+
+#ifdef CONFIG_PERFMON
+  DECLARE_PER_CPU(unsigned long, pfm_syst_info);
+# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
+#else
+# define PERFMON_IS_SYSWIDE() (0)
+#endif
+
+#ifndef XEN
+#define IA64_HAS_EXTRA_STATE(t)                                                
        \
+       ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       
\
+        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+
+#define __switch_to(prev,next,last) do {                                       
                 \
+       if (IA64_HAS_EXTRA_STATE(prev))                                         
                 \
+               ia64_save_extra(prev);                                          
                 \
+       if (IA64_HAS_EXTRA_STATE(next))                                         
                 \
+               ia64_load_extra(next);                                          
                 \
+       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);   
                 \
+       (last) = ia64_switch_to((next));                                        
                 \
+} while (0)
+#endif 
+
+#ifdef CONFIG_SMP
+/*
+ * In the SMP case, we save the fph state when context-switching away from a 
thread that
+ * modified fph.  This way, when the thread gets scheduled on another CPU, the 
CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of 
having to fetch
+ * the latest fph state from another CPU.  In other words: eager save, lazy 
restore.
+ */
+# define switch_to(prev,next,last) do {                                        
        \
+       if (ia64_psr(ia64_task_regs(prev))->mfh && 
ia64_is_local_fpu_owner(prev)) {                             \
+               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        
\
+               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  
\
+               __ia64_save_fpu((prev)->thread.fph);                            
\
+       }                                                                       
\
+       __switch_to(prev, next, last);                                          
\
+} while (0)
+#else
+# define switch_to(prev,next,last)     __switch_to(prev, next, last)
+#endif
+
+/*
+ * On IA-64, we don't want to hold the runqueue's lock during the low-level 
context-switch,
+ * because that could cause a deadlock.  Here is an example by Erich Focht:
+ *
+ * Example:
+ * CPU#0:
+ * schedule()
+ *    -> spin_lock_irq(&rq->lock)
+ *    -> context_switch()
+ *       -> wrap_mmu_context()
+ *          -> read_lock(&tasklist_lock)
+ *
+ * CPU#1:
+ * sys_wait4() or release_task() or forget_original_parent()
+ *    -> write_lock(&tasklist_lock)
+ *    -> do_notify_parent()
+ *       -> wake_up_parent()
+ *          -> try_to_wake_up()
+ *             -> spin_lock_irq(&parent_rq->lock)
+ *
+ * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
+ * of that CPU which will not be released, because there we wait for the
+ * tasklist_lock to become available.
+ */
+#define prepare_arch_switch(rq, next)          \
+do {                                           \
+       spin_lock(&(next)->switch_lock);        \
+       spin_unlock(&(rq)->lock);               \
+} while (0)
+#define finish_arch_switch(rq, prev)   spin_unlock_irq(&(prev)->switch_lock)
+#define task_running(rq, p)            ((rq)->curr == (p) || 
spin_is_locked(&(p)->switch_lock))
+
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
+
+void cpu_idle_wait(void);
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SYSTEM_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux-xen/asm/types.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/types.h        Wed Aug  3 00:25:11 2005
@@ -0,0 +1,104 @@
+#ifndef _ASM_IA64_TYPES_H
+#define _ASM_IA64_TYPES_H
+#ifdef XEN
+#ifndef __ASSEMBLY__
+typedef unsigned long ssize_t;
+typedef unsigned long size_t;
+typedef long long loff_t;
+#endif
+#endif
+
+/*
+ * This file is never included by application software unless explicitly 
requested (e.g.,
+ * via linux/types.h) in which case the application is Linux specific so 
(user-) name
+ * space pollution is not a major issue.  However, for interoperability, 
libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
+ */
+
+#ifdef __ASSEMBLY__
+# define __IA64_UL(x)          (x)
+# define __IA64_UL_CONST(x)    x
+
+# ifdef __KERNEL__
+#  define BITS_PER_LONG 64
+# endif
+
+#else
+# define __IA64_UL(x)          ((unsigned long)(x))
+# define __IA64_UL_CONST(x)    x##UL
+
+typedef unsigned int umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+# ifdef __KERNEL__
+
+typedef __s8 s8;
+typedef __u8 u8;
+
+typedef __s16 s16;
+typedef __u16 u16;
+
+typedef __s32 s32;
+typedef __u32 u32;
+
+typedef __s64 s64;
+typedef __u64 u64;
+
+#ifdef XEN
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+#ifdef __CHECKER__
+#define __bitwise __attribute__((bitwise))
+#else
+#define __bitwise
+#endif
+
+typedef __u16 __bitwise __le16;
+typedef __u16 __bitwise __be16;
+typedef __u32 __bitwise __le32;
+typedef __u32 __bitwise __be32;
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __u64 __bitwise __le64;
+typedef __u64 __bitwise __be64;
+#endif
+#endif
+
+#define BITS_PER_LONG 64
+
+/* DMA addresses are 64-bits wide, in general.  */
+
+typedef u64 dma_addr_t;
+
+typedef unsigned short kmem_bufctl_t;
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_TYPES_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/asm/uaccess.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/uaccess.h      Wed Aug  3 00:25:11 2005
@@ -0,0 +1,381 @@
+#ifndef _ASM_IA64_UACCESS_H
+#define _ASM_IA64_UACCESS_H
+
+/*
+ * This file defines various macros to transfer memory areas across
+ * the user/kernel boundary.  This needs to be done carefully because
+ * this code is executed in kernel mode and uses user-specified
+ * addresses.  Thus, we need to be careful not to let the user to
+ * trick us into accessing kernel memory that would normally be
+ * inaccessible.  This code is also fairly performance sensitive,
+ * so we want to spend as little time doing safety checks as
+ * possible.
+ *
+ * To make matters a bit more interesting, these macros sometimes also
+ * called from within the kernel itself, in which case the address
+ * validity check must be skipped.  The get_fs() macro tells us what
+ * to do: if get_fs()==USER_DS, checking is performed, if
+ * get_fs()==KERNEL_DS, checking is bypassed.
+ *
+ * Note that even if the memory area specified by the user is in a
+ * valid address range, it is still possible that we'll get a page
+ * fault while accessing it.  This is handled by filling out an
+ * exception handler fixup entry for each instruction that has the
+ * potential to fault.  When such a fault occurs, the page fault
+ * handler checks to see whether the faulting instruction has a fixup
+ * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
+ * then resumes execution at the continuation point.
+ *
+ * Based on <asm-alpha/uaccess.h>.
+ *
+ * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#ifdef CONFIG_VTI
+#include <asm/vmx_uaccess.h>
+#else // CONFIG_VTI
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+#include <asm/intrinsics.h>
+#include <asm/pgtable.h>
+
+/*
+ * For historical reasons, the following macros are grossly misnamed:
+ */
+#define KERNEL_DS      ((mm_segment_t) { ~0UL })               /* cf. 
access_ok() */
+#define USER_DS                ((mm_segment_t) { TASK_SIZE-1 })        /* cf. 
access_ok() */
+
+#define VERIFY_READ    0
+#define VERIFY_WRITE   1
+
+#define get_ds()  (KERNEL_DS)
+#define get_fs()  (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b)       ((a).seg == (b).seg)
+
+/*
+ * When accessing user memory, we need to make sure the entire area really is 
in
+ * user-level space.  In order to do this efficiently, we make sure that the 
page at
+ * address TASK_SIZE is never valid.  We also need to make sure that the 
address doesn't
+ * point inside the virtually mapped linear page table.
+ */
+#ifdef XEN
+/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
+#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
+#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned 
long)(addr)))
+#else
+#define __access_ok(addr, size, segment)                                       
        \
+({                                                                             
        \
+       __chk_user_ptr(addr);                                                   
        \
+       (likely((unsigned long) (addr) <= (segment).seg)                        
        \
+        && ((segment).seg == KERNEL_DS.seg                                     
        \
+            || likely(REGION_OFFSET((unsigned long) (addr)) < 
RGN_MAP_LIMIT)));        \
+})
+#endif
+#define access_ok(type, addr, size)    __access_ok((addr), (size), get_fs())
+
+static inline int
+verify_area (int type, const void __user *addr, unsigned long size)
+{
+       return access_ok(type, addr, size) ? 0 : -EFAULT;
+}
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr)       __put_user_check((__typeof__(*(ptr))) (x), 
(ptr), sizeof(*(ptr)), get_fs())
+#define get_user(x, ptr)       __get_user_check((x), (ptr), sizeof(*(ptr)), 
get_fs())
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x, ptr)     __put_user_nocheck((__typeof__(*(ptr))) (x), 
(ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr)     __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_unaligned_unknown (void);
+
+#define __put_user_unaligned(x, ptr)                                           
                \
+({                                                                             
                \
+       long __ret;                                                             
                \
+       switch (sizeof(*(ptr))) {                                               
                \
+               case 1: __ret = __put_user((x), (ptr)); break;                  
                \
+               case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))           
                \
+                       | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); 
break;              \
+               case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))          
                \
+                       | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); 
break;            \
+               case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))          
                \
+                       | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); 
break;            \
+               default: __ret = __put_user_unaligned_unknown();                
                \
+       }                                                                       
                \
+       __ret;                                                                  
                \
+})
+
+extern long __get_user_unaligned_unknown (void);
+
+#define __get_user_unaligned(x, ptr)                                           
                \
+({                                                                             
                \
+       long __ret;                                                             
                \
+       switch (sizeof(*(ptr))) {                                               
                \
+               case 1: __ret = __get_user((x), (ptr)); break;                  
                \
+               case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))           
                \
+                       | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); 
break;              \
+               case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))          
                \
+                       | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); 
break;            \
+               case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))          
                \
+                       | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); 
break;            \
+               default: __ret = __get_user_unaligned_unknown();                
                \
+       }                                                                       
                \
+       __ret;                                                                  
                \
+})
+
+#ifdef ASM_SUPPORTED
+  struct __large_struct { unsigned long buf[100]; };
+# define __m(x) (*(struct __large_struct __user *)(x))
+
+/* We need to declare the __ex_table section before we can use it in .xdata.  
*/
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+# define __get_user_size(val, addr, n, err)                                    
                \
+do {                                                                           
                \
+       register long __gu_r8 asm ("r8") = 0;                                   
                \
+       register long __gu_r9 asm ("r9");                                       
                \
+       asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by 
exception handler\n"     \
+            "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"                         
                \
+            "[1:]"                                                             
                \
+            : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));    
                \
+       (err) = __gu_r8;                                                        
                \
+       (val) = __gu_r9;                                                        
                \
+} while (0)
+
+/*
+ * The "__put_user_size()" macro tells gcc it reads from memory instead of 
writing it.  This
+ * is because they do not write to any memory gcc knows about, so there are no 
aliasing
+ * issues.
+ */
+# define __put_user_size(val, addr, n, err)                                    
                \
+do {                                                                           
                \
+       register long __pu_r8 asm ("r8") = 0;                                   
                \
+       asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by 
exception handler\n" \
+                     "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"                  
                \
+                     "[1:]"                                                    
                \
+                     : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), 
"0"(__pu_r8));               \
+       (err) = __pu_r8;                                                        
                \
+} while (0)
+
+#else /* !ASM_SUPPORTED */
+# define RELOC_TYPE    2       /* ip-rel */
+# define __get_user_size(val, addr, n, err)                            \
+do {                                                                   \
+       __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);   \
+       (err) = ia64_getreg(_IA64_REG_R8);                              \
+       (val) = ia64_getreg(_IA64_REG_R9);                              \
+} while (0)
+# define __put_user_size(val, addr, n, err)                                    
                \
+do {                                                                           
                \
+       __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned 
long) (val));    \
+       (err) = ia64_getreg(_IA64_REG_R8);                                      
                \
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+extern void __get_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve 
subroutine-calls, which
+ * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate 
it while
+ * using r8/r9.
+ */
+#define __do_get_user(check, x, ptr, size, segment)                            
        \
+({                                                                             
        \
+       const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);                      
        \
+       __typeof__ (size) __gu_size = (size);                                   
        \
+       long __gu_err = -EFAULT, __gu_val = 0;                                  
        \
+                                                                               
        \
+       if (!check || __access_ok(__gu_ptr, size, segment))                     
        \
+               switch (__gu_size) {                                            
        \
+                     case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); 
break;  \
+                     case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); 
break;  \
+                     case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); 
break;  \
+                     case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); 
break;  \
+                     default: __get_user_unknown(); break;                     
        \
+               }                                                               
        \
+       (x) = (__typeof__(*(__gu_ptr))) __gu_val;                               
        \
+       __gu_err;                                                               
        \
+})
+
+#define __get_user_nocheck(x, ptr, size)       __do_get_user(0, x, ptr, size, 
KERNEL_DS)
+#define __get_user_check(x, ptr, size, segment)        __do_get_user(1, x, 
ptr, size, segment)
+
+extern void __put_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve 
subroutine-calls, which
+ * could clobber r8 (among others).  Thus, be careful not to evaluate them 
while using r8.
+ */
+#define __do_put_user(check, x, ptr, size, segment)                            
        \
+({                                                                             
        \
+       __typeof__ (x) __pu_x = (x);                                            
        \
+       __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);                           
        \
+       __typeof__ (size) __pu_size = (size);                                   
        \
+       long __pu_err = -EFAULT;                                                
        \
+                                                                               
        \
+       if (!check || __access_ok(__pu_ptr, __pu_size, segment))                
        \
+               switch (__pu_size) {                                            
        \
+                     case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); 
break;    \
+                     case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); 
break;    \
+                     case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); 
break;    \
+                     case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); 
break;    \
+                     default: __put_user_unknown(); break;                     
        \
+               }                                                               
        \
+       __pu_err;                                                               
        \
+})
+
+#define __put_user_nocheck(x, ptr, size)       __do_put_user(0, x, ptr, size, 
KERNEL_DS)
+#define __put_user_check(x, ptr, size, segment)        __do_put_user(1, x, 
ptr, size, segment)
+
+/*
+ * Complex access routines
+ */
+extern unsigned long __must_check __copy_user (void __user *to, const void 
__user *from,
+                                              unsigned long count);
+
+static inline unsigned long
+__copy_to_user (void __user *to, const void *from, unsigned long count)
+{
+       return __copy_user(to, (void __user *) from, count);
+}
+
+static inline unsigned long
+__copy_from_user (void *to, const void __user *from, unsigned long count)
+{
+       return __copy_user((void __user *) to, from, count);
+}
+
+#define __copy_to_user_inatomic                __copy_to_user
+#define __copy_from_user_inatomic      __copy_from_user
+#define copy_to_user(to, from, n)                                              
        \
+({                                                                             
        \
+       void __user *__cu_to = (to);                                            
        \
+       const void *__cu_from = (from);                                         
        \
+       long __cu_len = (n);                                                    
        \
+                                                                               
        \
+       if (__access_ok(__cu_to, __cu_len, get_fs()))                           
        \
+               __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, 
__cu_len);   \
+       __cu_len;                                                               
        \
+})
+
+#define copy_from_user(to, from, n)                                            
        \
+({                                                                             
        \
+       void *__cu_to = (to);                                                   
        \
+       const void __user *__cu_from = (from);                                  
        \
+       long __cu_len = (n);                                                    
        \
+                                                                               
        \
+       __chk_user_ptr(__cu_from);                                              
        \
+       if (__access_ok(__cu_from, __cu_len, get_fs()))                         
        \
+               __cu_len = __copy_user((void __user *) __cu_to, __cu_from, 
__cu_len);   \
+       __cu_len;                                                               
        \
+})
+
+#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
+
+static inline unsigned long
+copy_in_user (void __user *to, const void __user *from, unsigned long n)
+{
+       if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, 
to, n)))
+               n = __copy_user(to, from, n);
+       return n;
+}
+
+extern unsigned long __do_clear_user (void __user *, unsigned long);
+
+#define __clear_user(to, n)            __do_clear_user(to, n)
+
+#define clear_user(to, n)                                      \
+({                                                             \
+       unsigned long __cu_len = (n);                           \
+       if (__access_ok(to, __cu_len, get_fs()))                \
+               __cu_len = __do_clear_user(to, __cu_len);       \
+       __cu_len;                                               \
+})
+
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire buffer 
filled, else
+ * strlen.
+ */
+extern long __must_check __strncpy_from_user (char *to, const char __user 
*from, long to_len);
+
+#define strncpy_from_user(to, from, n)                                 \
+({                                                                     \
+       const char __user * __sfu_from = (from);                        \
+       long __sfu_ret = -EFAULT;                                       \
+       if (__access_ok(__sfu_from, 0, get_fs()))                       \
+               __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
+       __sfu_ret;                                                      \
+})
+
+/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
+extern unsigned long __strlen_user (const char __user *);
+
+#define strlen_user(str)                               \
+({                                                     \
+       const char __user *__su_str = (str);            \
+       unsigned long __su_ret = 0;                     \
+       if (__access_ok(__su_str, 0, get_fs()))         \
+               __su_ret = __strlen_user(__su_str);     \
+       __su_ret;                                       \
+})
+
+/*
+ * Returns: 0 if exception before NUL or reaching the supplied limit
+ * (N), a value greater than N if the limit would be exceeded, else
+ * strlen.
+ */
+extern unsigned long __strnlen_user (const char __user *, long);
+
+#define strnlen_user(str, len)                                 \
+({                                                             \
+       const char __user *__su_str = (str);                    \
+       unsigned long __su_ret = 0;                             \
+       if (__access_ok(__su_str, 0, get_fs()))                 \
+               __su_ret = __strnlen_user(__su_str, len);       \
+       __su_ret;                                               \
+})
+
+#endif // CONFIG_VTI
+/* Generic code can't deal with the location-relative format that we use for 
compactness.  */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
+struct exception_table_entry {
+       int addr;       /* location-relative address of insn this fixup is for 
*/
+       int cont;       /* location-relative continuation addr.; if bit 2 is 
set, r9 is set to 0 */
+};
+
+extern void ia64_handle_exception (struct pt_regs *regs, const struct 
exception_table_entry *e);
+extern const struct exception_table_entry *search_exception_tables (unsigned 
long addr);
+
+static inline int
+ia64_done_with_exception (struct pt_regs *regs)
+{
+       const struct exception_table_entry *e;
+       e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+       if (e) {
+               ia64_handle_exception(regs, e);
+               return 1;
+       }
+       return 0;
+}
+
+#endif /* _ASM_IA64_UACCESS_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/linux/cpumask.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/cpumask.h    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,379 @@
+#ifndef __LINUX_CPUMASK_H
+#define __LINUX_CPUMASK_H
+
+/*
+ * Cpumasks provide a bitmap suitable for representing the
+ * set of CPU's in a system, one bit position per CPU number.
+ *
+ * See detailed comments in the file linux/bitmap.h describing the
+ * data type on which these cpumasks are based.
+ *
+ * For details of cpumask_scnprintf() and cpumask_parse(),
+ * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ *
+ * The available cpumask operations are:
+ *
+ * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
+ * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
+ * void cpus_setall(mask)              set all bits
+ * void cpus_clear(mask)               clear all bits
+ * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
+ * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
+ *
+ * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
+ * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
+ * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
+ * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
+ * void cpus_complement(dst, src)      dst = ~src
+ *
+ * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
+ * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
+ * int cpus_subset(mask1, mask2)       Is mask1 a subset of mask2?
+ * int cpus_empty(mask)                        Is mask empty (no bits sets)?
+ * int cpus_full(mask)                 Is mask full (all bits sets)?
+ * int cpus_weight(mask)               Hamming weigh - number of set bits
+ *
+ * void cpus_shift_right(dst, src, n)  Shift right
+ * void cpus_shift_left(dst, src, n)   Shift left
+ *
+ * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
+ * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
+ *
+ * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ * CPU_MASK_ALL                                Initializer - all bits set
+ * CPU_MASK_NONE                       Initializer - no bits set
+ * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
+ *
+ * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
+ * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
+ *
+ * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ *
+ * int num_online_cpus()               Number of online CPUs
+ * int num_possible_cpus()             Number of all possible CPUs
+ * int num_present_cpus()              Number of present CPUs
+ *
+ * int cpu_online(cpu)                 Is some cpu online?
+ * int cpu_possible(cpu)               Is some cpu possible?
+ * int cpu_present(cpu)                        Is some cpu present (can 
schedule)?
+ *
+ * int any_online_cpu(mask)            First online cpu in mask
+ *
+ * for_each_cpu(cpu)                   for-loop cpu over cpu_possible_map
+ * for_each_online_cpu(cpu)            for-loop cpu over cpu_online_map
+ * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
+ *
+ * Subtlety:
+ * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
+ *    to generate slightly worse code.  Note for example the additional
+ *    40 lines of assembly code compiling the "for each possible cpu"
+ *    loops buried in the disk_stat_read() macros calls when compiling
+ *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
+ *    one-line #define for cpu_isset(), instead of wrapping an inline
+ *    inside a macro, the way we do the other calls.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/bitmap.h>
+#include <asm/bug.h>
+
+typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+extern cpumask_t _unused_cpumask_arg_;
+
+#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+       set_bit(cpu, dstp->bits);
+}
+
+#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
+static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+       clear_bit(cpu, dstp->bits);
+}
+
+#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
+static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+       bitmap_fill(dstp->bits, nbits);
+}
+
+#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
+static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+       bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
+
+#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
+static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+       return test_and_set_bit(cpu, addr->bits);
+}
+
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_andnot(dst, src1, src2) \
+                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
+static inline void __cpus_complement(cpumask_t *dstp,
+                                       const cpumask_t *srcp, int nbits)
+{
+       bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_equal(const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), 
NR_CPUS)
+static inline int __cpus_intersects(const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_subset(const cpumask_t *src1p,
+                                       const cpumask_t *src2p, int nbits)
+{
+       return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+       return bitmap_empty(srcp->bits, nbits);
+}
+
+#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
+static inline int __cpus_full(const cpumask_t *srcp, int nbits)
+{
+       return bitmap_full(srcp->bits, nbits);
+}
+
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+       return bitmap_weight(srcp->bits, nbits);
+}
+
+#define cpus_shift_right(dst, src, n) \
+                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_right(cpumask_t *dstp,
+                                       const cpumask_t *srcp, int n, int nbits)
+{
+       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define cpus_shift_left(dst, src, n) \
+                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_left(cpumask_t *dstp,
+                                       const cpumask_t *srcp, int n, int nbits)
+{
+       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
+static inline int __first_cpu(const cpumask_t *srcp, int nbits)
+{
+       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+}
+
+#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
+static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
+{
+       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+}
+
+#define cpumask_of_cpu(cpu)                                            \
+({                                                                     \
+       typeof(_unused_cpumask_arg_) m;                                 \
+       if (sizeof(m) == sizeof(unsigned long)) {                       \
+               m.bits[0] = 1UL<<(cpu);                                 \
+       } else {                                                        \
+               cpus_clear(m);                                          \
+               cpu_set((cpu), m);                                      \
+       }                                                               \
+       m;                                                              \
+})
+
+#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
+
+#if NR_CPUS <= BITS_PER_LONG
+
+#define CPU_MASK_ALL                                                   \
+(cpumask_t) { {                                                                
\
+       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
+} }
+
+#else
+
+#define CPU_MASK_ALL                                                   \
+(cpumask_t) { {                                                                
\
+       [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                        \
+       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
+} }
+
+#endif
+
+#define CPU_MASK_NONE                                                  \
+(cpumask_t) { {                                                                
\
+       [0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL                         \
+} }
+
+#define CPU_MASK_CPU0                                                  \
+(cpumask_t) { {                                                                
\
+       [0] =  1UL                                                      \
+} }
+
+#define cpus_addr(src) ((src).bits)
+
+#define cpumask_scnprintf(buf, len, src) \
+                       __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
+static inline int __cpumask_scnprintf(char *buf, int len,
+                                       const cpumask_t *srcp, int nbits)
+{
+       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+}
+
+#define cpumask_parse(ubuf, ulen, src) \
+                       __cpumask_parse((ubuf), (ulen), &(src), NR_CPUS)
+static inline int __cpumask_parse(const char __user *buf, int len,
+                                       cpumask_t *dstp, int nbits)
+{
+       return bitmap_parse(buf, len, dstp->bits, nbits);
+}
+
+#if NR_CPUS > 1
+#define for_each_cpu_mask(cpu, mask)           \
+       for ((cpu) = first_cpu(mask);           \
+               (cpu) < NR_CPUS;                \
+               (cpu) = next_cpu((cpu), (mask)))
+#else /* NR_CPUS == 1 */
+#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#endif /* NR_CPUS */
+
+/*
+ * The following particular system cpumasks and operations manage
+ * possible, present and online cpus.  Each of them is a fixed size
+ * bitmap of size NR_CPUS.
+ *
+ *  #ifdef CONFIG_HOTPLUG_CPU
+ *     cpu_possible_map - all NR_CPUS bits set
+ *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
+ *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
+ *  #else
+ *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
+ *     cpu_present_map  - copy of cpu_possible_map
+ *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
+ *  #endif
+ *
+ *  In either case, NR_CPUS is fixed at compile time, as the static
+ *  size of these bitmaps.  The cpu_possible_map is fixed at boot
+ *  time, as the set of CPU id's that it is possible might ever
+ *  be plugged in at anytime during the life of that system boot.
+ *  The cpu_present_map is dynamic(*), representing which CPUs
+ *  are currently plugged in.  And cpu_online_map is the dynamic
+ *  subset of cpu_present_map, indicating those CPUs available
+ *  for scheduling.
+ *
+ *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
+ *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
+ *  ACPI reports present at boot.
+ *
+ *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
+ *  depending on what ACPI reports as currently plugged in, otherwise
+ *  cpu_present_map is just a copy of cpu_possible_map.
+ *
+ *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
+ *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
+ *
+ * Subtleties:
+ * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ *    assumption that their single CPU is online.  The UP
+ *    cpu_{online,possible,present}_maps are placebos.  Changing them
+ *    will have no useful affect on the following num_*_cpus()
+ *    and cpu_*() macros in the UP case.  This ugliness is a UP
+ *    optimization - don't waste any instructions or memory references
+ *    asking if you're online or how many CPUs there are if there is
+ *    only one CPU.
+ * 2) Most SMP arch's #define some of these maps to be some
+ *    other map specific to that arch.  Therefore, the following
+ *    must be #define macros, not inlines.  To see why, examine
+ *    the assembly code produced by the following.  Note that
+ *    set1() writes phys_x_map, but set2() writes x_map:
+ *        int x_map, phys_x_map;
+ *        #define set1(a) x_map = a
+ *        inline void set2(int a) { x_map = a; }
+ *        #define x_map phys_x_map
+ *        main(){ set1(3); set2(5); }
+ */
+
+extern cpumask_t cpu_possible_map;
+#ifndef XEN
+extern cpumask_t cpu_online_map;
+#endif
+extern cpumask_t cpu_present_map;
+
+#if NR_CPUS > 1
+#define num_online_cpus()      cpus_weight(cpu_online_map)
+#define num_possible_cpus()    cpus_weight(cpu_possible_map)
+#define num_present_cpus()     cpus_weight(cpu_present_map)
+#define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
+#define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
+#define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
+#else
+#define num_online_cpus()      1
+#define num_possible_cpus()    1
+#define num_present_cpus()     1
+#define cpu_online(cpu)                ((cpu) == 0)
+#define cpu_possible(cpu)      ((cpu) == 0)
+#define cpu_present(cpu)       ((cpu) == 0)
+#endif
+
+#define any_online_cpu(mask)                   \
+({                                             \
+       int cpu;                                \
+       for_each_cpu_mask(cpu, (mask))          \
+               if (cpu_online(cpu))            \
+                       break;                  \
+       cpu;                                    \
+})
+
+#define for_each_cpu(cpu)        for_each_cpu_mask((cpu), cpu_possible_map)
+#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
+#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+
+#endif /* __LINUX_CPUMASK_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/linux/hardirq.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h    Wed Aug  3 00:25:11 2005
@@ -0,0 +1,110 @@
+#ifndef LINUX_HARDIRQ_H
+#define LINUX_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/smp_lock.h>
+#include <asm/hardirq.h>
+#include <asm/system.h>
+
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count can be overridden per architecture, the default is:
+ *
+ * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
+ * - ( bit 28 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x0fff0000
+ */
+#define PREEMPT_BITS   8
+#define SOFTIRQ_BITS   8
+
+#ifndef HARDIRQ_BITS
+#define HARDIRQ_BITS   12
+/*
+ * The hardirq mask has to be large enough to have space for potentially
+ * all IRQ sources in the system nesting on a single CPU.
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+#endif
+
+#define PREEMPT_SHIFT  0
+#define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+
+#define __IRQ_MASK(x)  ((1UL << (x))-1)
+
+#define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+#define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
+#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
+#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq()               (hardirq_count())
+#define in_softirq()           (softirq_count())
+#ifndef XEN
+#define in_interrupt()         (irq_count())
+#else
+#define in_interrupt()         0               // FIXME LATER
+#endif
+
+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
+# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
+#else
+# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
+#endif
+
+#ifdef CONFIG_PREEMPT
+# define preemptible() (preempt_count() == 0 && !irqs_disabled())
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define preemptible() 0
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+
+#ifdef CONFIG_SMP
+extern void synchronize_irq(unsigned int irq);
+#else
+# define synchronize_irq(irq)  barrier()
+#endif
+
+#define nmi_enter()            irq_enter()
+#define nmi_exit()             sub_preempt_count(HARDIRQ_OFFSET)
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+static inline void account_user_vtime(struct task_struct *tsk)
+{
+}
+
+static inline void account_system_vtime(struct task_struct *tsk)
+{
+}
+#endif
+
+#define irq_enter()                                    \
+       do {                                            \
+               account_system_vtime(current);          \
+               add_preempt_count(HARDIRQ_OFFSET);      \
+       } while (0)
+
+extern void irq_exit(void);
+
+#endif /* LINUX_HARDIRQ_H */
diff -r e2127f19861b -r f242de2e5a3c 
xen/include/asm-ia64/linux-xen/linux/interrupt.h
--- /dev/null   Tue Aug  2 23:59:09 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h  Wed Aug  3 00:25:11 2005
@@ -0,0 +1,291 @@
+/* interrupt.h */
+#ifndef _LINUX_INTERRUPT_H
+#define _LINUX_INTERRUPT_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/bitops.h>
+#include <linux/preempt.h>
+#include <linux/cpumask.h>
+#include <linux/hardirq.h>
+#include <asm/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+/*
+ * For 2.4.x compatibility, 2.4.x can use
+ *
+ *     typedef void irqreturn_t;
+ *     #define IRQ_NONE
+ *     #define IRQ_HANDLED
+ *     #define IRQ_RETVAL(x)
+ *
+ * To mix old-style and new-style irq handler returns.
+ *
+ * IRQ_NONE means we didn't handle it.
+ * IRQ_HANDLED means that we did have a valid interrupt and handled it.
+ * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
+ */
+typedef int irqreturn_t;
+
+#define IRQ_NONE       (0)
+#define IRQ_HANDLED    (1)
+#define IRQ_RETVAL(x)  ((x) != 0)
+
+#ifndef XEN
+struct irqaction {
+       irqreturn_t (*handler)(int, void *, struct pt_regs *);
+       unsigned long flags;
+       cpumask_t mask;
+       const char *name;
+       void *dev_id;
+       struct irqaction *next;
+       int irq;
+       struct proc_dir_entry *dir;
+};
+
+extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
+extern int request_irq(unsigned int,
+                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                      unsigned long, const char *, void *);
+extern void free_irq(unsigned int, void *);
+#endif
+
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern void disable_irq_nosync(unsigned int irq);
+extern void disable_irq(unsigned int irq);
+extern void enable_irq(unsigned int irq);
+#endif
+
+/*
+ * Temporary defines for UP kernels, until all code gets fixed.
+ */
+#ifndef CONFIG_SMP
+static inline void __deprecated cli(void)
+{
+       local_irq_disable();
+}
+static inline void __deprecated sti(void)
+{
+       local_irq_enable();
+}
+static inline void __deprecated save_flags(unsigned long *x)
+{
+       local_save_flags(*x);
+}
+#define save_flags(x) save_flags(&x);
+static inline void __deprecated restore_flags(unsigned long x)
+{
+       local_irq_restore(x);
+}
+
+static inline void __deprecated save_and_cli(unsigned long *x)
+{
+       local_irq_save(*x);
+}
+#define save_and_cli(x)        save_and_cli(&x)
+#endif /* CONFIG_SMP */
+
+/* SoftIRQ primitives.  */
+#define local_bh_disable() \
+               do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
+#define __local_bh_enable() \
+               do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
+
+extern void local_bh_enable(void);
+
+/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
+   frequency threaded job scheduling. For almost all the purposes
+   tasklets are more than enough. F.e. all serial device BHs et
+   al. should be converted to tasklets, not to softirqs.
+ */
+
+enum
+{
+       HI_SOFTIRQ=0,
+       TIMER_SOFTIRQ,
+       NET_TX_SOFTIRQ,
+       NET_RX_SOFTIRQ,
+       SCSI_SOFTIRQ,
+       TASKLET_SOFTIRQ
+};
+
+/* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage.  KAO
+ */
+
+struct softirq_action
+{
+       void    (*action)(struct softirq_action *);
+       void    *data;
+};
+
+asmlinkage void do_softirq(void);
+//extern void open_softirq(int nr, void (*action)(struct softirq_action*), 
void *data);
+extern void softirq_init(void);
+#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << 
(nr); } while (0)
+extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
+extern void FASTCALL(raise_softirq(unsigned int nr));
+
+
+/* Tasklets --- multithreaded analogue of BHs.
+
+   Main feature differing them of generic softirqs: tasklet
+   is running only on one CPU simultaneously.
+
+   Main feature differing them of BHs: different tasklets
+   may be run simultaneously on different CPUs.
+
+   Properties:
+   * If tasklet_schedule() is called, then tasklet is guaranteed
+     to be executed on some cpu at least once after this.
+   * If the tasklet is already scheduled, but its excecution is still not
+     started, it will be executed only once.
+   * If this tasklet is already running on another CPU (or schedule is called
+     from tasklet itself), it is rescheduled for later.
+   * Tasklet is strictly serialized wrt itself, but not
+     wrt another tasklets. If client needs some intertask synchronization,
+     he makes it with spinlocks.
+ */
+
+struct tasklet_struct
+{
+       struct tasklet_struct *next;
+       unsigned long state;
+       atomic_t count;
+       void (*func)(unsigned long);
+       unsigned long data;
+};
+
+#define DECLARE_TASKLET(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+
+#define DECLARE_TASKLET_DISABLED(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+
+
+enum
+{
+       TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
+       TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
+};
+
+#ifdef CONFIG_SMP
+static inline int tasklet_trylock(struct tasklet_struct *t)
+{
+       return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock(struct tasklet_struct *t)
+{
+       smp_mb__before_clear_bit(); 
+       clear_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
+#else
+#define tasklet_trylock(t) 1
+#define tasklet_unlock_wait(t) do { } while (0)
+#define tasklet_unlock(t) do { } while (0)
+#endif
+
+extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
+
+static inline void tasklet_schedule(struct tasklet_struct *t)
+{
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+               __tasklet_schedule(t);
+}
+
+extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
+
+static inline void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+               __tasklet_hi_schedule(t);
+}
+
+
+static inline void tasklet_disable_nosync(struct tasklet_struct *t)
+{
+       atomic_inc(&t->count);
+       smp_mb__after_atomic_inc();
+}
+
+static inline void tasklet_disable(struct tasklet_struct *t)
+{
+       tasklet_disable_nosync(t);
+       tasklet_unlock_wait(t);
+       smp_mb();
+}
+
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+       smp_mb__before_atomic_dec();
+       atomic_dec(&t->count);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+       smp_mb__before_atomic_dec();
+       atomic_dec(&t->count);
+}
+
+extern void tasklet_kill(struct tasklet_struct *t);
+extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+extern void tasklet_init(struct tasklet_struct *t,
+                        void (*func)(unsigned long), unsigned long data);
+
+/*
+ * Autoprobing for irqs:
+ *
+ * probe_irq_on() and probe_irq_off() provide robust primitives
+ * for accurate IRQ probing during kernel initialization.  They are
+ * reasonably simple to use, are not "fooled" by spurious interrupts,
+ * and, unlike other attempts at IRQ probing, they do not get hung on
+ * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
+ *
+ * For reasonably foolproof probing, use them as follows:
+ *
+ * 1. clear and/or mask the device's internal interrupt.
+ * 2. sti();
+ * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
+ * 4. enable the device and cause it to trigger an interrupt.
+ * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
+ * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
+ * 7. service the device to clear its pending interrupt.
+ * 8. loop again if paranoia is required.
+ *
+ * probe_irq_on() returns a mask of allocated irq's.
+ *
+ * probe_irq_off() takes the mask as a parameter,
+ * and returns the irq number which occurred,
+ * or zero if none occurred, or a negative irq number
+ * if more than one irq occurred.
+ */
+
+#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 
+static inline unsigned long probe_irq_on(void)
+{
+       return 0;
+}
+static inline int probe_irq_off(unsigned long val)
+{
+       return 0;
+}
+static inline unsigned int probe_irq_mask(unsigned long val)
+{
+       return 0;
+}
+#else
+extern unsigned long probe_irq_on(void);       /* returns 0 on failure */
+extern int probe_irq_off(unsigned long);       /* returns 0 or negative on 
failure */
+extern unsigned int probe_irq_mask(unsigned long);     /* returns mask of ISA 
interrupts */
+#endif
+
+#endif
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/efi.c
--- a/xen/arch/ia64/efi.c       Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,866 +0,0 @@
-/*
- * Extensible Firmware Interface
- *
- * Based on Extensible Firmware Interface Specification version 0.9 April 30, 
1999
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 1999-2003 Hewlett-Packard Co.
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- *
- * All EFI Runtime Services are not implemented yet as EFI only
- * supports physical mode addressing on SoftSDV. This is to be fixed
- * in a future version.  --drummond 1999-07-20
- *
- * Implemented EFI runtime services and virtual mode calls.  --davidm
- *
- * Goutham Rao: <goutham.rao@xxxxxxxxx>
- *     Skip non-WB memory and ignore empty memory ranges.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/efi.h>
-
-#include <asm/io.h>
-#include <asm/kregs.h>
-#include <asm/meminit.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/mca.h>
-
-#define EFI_DEBUG      0
-
-extern efi_status_t efi_call_phys (void *, ...);
-
-struct efi efi;
-EXPORT_SYMBOL(efi);
-static efi_runtime_services_t *runtime;
-static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
-
-#define efi_call_virt(f, args...)      (*(f))(args)
-
-#define STUB_GET_TIME(prefix, adjust_arg)                                      
                  \
-static efi_status_t                                                            
                  \
-prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)                         
                  \
-{                                                                              
                  \
-       struct ia64_fpreg fr[6];                                                
                  \
-       efi_time_cap_t *atc = NULL;                                             
                  \
-       efi_status_t ret;                                                       
                  \
-                                                                               
                  \
-       if (tc)                                                                 
                  \
-               atc = adjust_arg(tc);                                           
                  \
-       ia64_save_scratch_fpregs(fr);                                           
                  \
-       ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), 
adjust_arg(tm), atc); \
-       ia64_load_scratch_fpregs(fr);                                           
                  \
-       return ret;                                                             
                  \
-}
-
-#define STUB_SET_TIME(prefix, adjust_arg)                                      
                \
-static efi_status_t                                                            
                \
-prefix##_set_time (efi_time_t *tm)                                             
                \
-{                                                                              
                \
-       struct ia64_fpreg fr[6];                                                
                \
-       efi_status_t ret;                                                       
                \
-                                                                               
                \
-       ia64_save_scratch_fpregs(fr);                                           
                \
-       ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), 
adjust_arg(tm));    \
-       ia64_load_scratch_fpregs(fr);                                           
                \
-       return ret;                                                             
                \
-}
-
-#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)                               
                \
-static efi_status_t                                                            
                \
-prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t 
*tm)            \
-{                                                                              
                \
-       struct ia64_fpreg fr[6];                                                
                \
-       efi_status_t ret;                                                       
                \
-                                                                               
                \
-       ia64_save_scratch_fpregs(fr);                                           
                \
-       ret = efi_call_##prefix((efi_get_wakeup_time_t *) 
__va(runtime->get_wakeup_time),       \
-                               adjust_arg(enabled), adjust_arg(pending), 
adjust_arg(tm));      \
-       ia64_load_scratch_fpregs(fr);                                           
                \
-       return ret;                                                             
                \
-}
-
-#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)                               
                \
-static efi_status_t                                                            
                \
-prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)                  
                \
-{                                                                              
                \
-       struct ia64_fpreg fr[6];                                                
                \
-       efi_time_t *atm = NULL;                                                 
                \
-       efi_status_t ret;                                                       
                \
-                                                                               
                \
-       if (tm)                                                                 
                \
-               atm = adjust_arg(tm);                                           
                \
-       ia64_save_scratch_fpregs(fr);                                           
                \
-       ret = efi_call_##prefix((efi_set_wakeup_time_t *) 
__va(runtime->set_wakeup_time),       \
-                               enabled, atm);                                  
                \
-       ia64_load_scratch_fpregs(fr);                                           
                \
-       return ret;                                                             
                \
-}
-
-#define STUB_GET_VARIABLE(prefix, adjust_arg)                                  
        \
-static efi_status_t                                                            
        \
-prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,      
        \
-                      unsigned long *data_size, void *data)                    
        \
-{                                                                              
        \
-       struct ia64_fpreg fr[6];                                                
        \
-       u32 *aattr = NULL;                                                      
                \
-       efi_status_t ret;                                                       
        \
-                                                                               
        \
-       if (attr)                                                               
        \
-               aattr = adjust_arg(attr);                                       
        \
-       ia64_save_scratch_fpregs(fr);                                           
        \
-       ret = efi_call_##prefix((efi_get_variable_t *) 
__va(runtime->get_variable),     \
-                               adjust_arg(name), adjust_arg(vendor), aattr,    
        \
-                               adjust_arg(data_size), adjust_arg(data));       
        \
-       ia64_load_scratch_fpregs(fr);                                           
        \
-       return ret;                                                             
        \
-}
-
-#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)                             
                \
-static efi_status_t                                                            
                \
-prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, 
efi_guid_t *vendor)  \
-{                                                                              
                \
-       struct ia64_fpreg fr[6];                                                
                \
-       efi_status_t ret;                                                       
                \
-                                                                               
                \
-       ia64_save_scratch_fpregs(fr);                                           
                \
-       ret = efi_call_##prefix((efi_get_next_variable_t *) 
__va(runtime->get_next_variable),   \
-                               adjust_arg(name_size), adjust_arg(name), 
adjust_arg(vendor));   \
-       ia64_load_scratch_fpregs(fr);                                           
                \
-       return ret;                                                             
                \
-}
-
-#define STUB_SET_VARIABLE(prefix, adjust_arg)                                  
        \
-static efi_status_t                                                            
        \
-prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long 
attr,     \
-                      unsigned long data_size, void *data)                     
        \
-{                                                                              
        \
-       struct ia64_fpreg fr[6];                                                
        \
-       efi_status_t ret;                                                       
        \
-                                                                               
        \
-       ia64_save_scratch_fpregs(fr);                                           
        \
-       ret = efi_call_##prefix((efi_set_variable_t *) 
__va(runtime->set_variable),     \
-                               adjust_arg(name), adjust_arg(vendor), attr, 
data_size,  \
-                               adjust_arg(data));                              
        \
-       ia64_load_scratch_fpregs(fr);                                           
        \
-       return ret;                                                             
        \
-}
-
-#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)                      
                \
-static efi_status_t                                                            
                \
-prefix##_get_next_high_mono_count (u32 *count)                                 
                \
-{                                                                              
                \
-       struct ia64_fpreg fr[6];                                                
                \
-       efi_status_t ret;                                                       
                \
-                                                                               
                \
-       ia64_save_scratch_fpregs(fr);                                           
                \
-       ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)              
                \
-                               __va(runtime->get_next_high_mono_count), 
adjust_arg(count));    \
-       ia64_load_scratch_fpregs(fr);                                           
                \
-       return ret;                                                             
                \
-}
-
-#define STUB_RESET_SYSTEM(prefix, adjust_arg)                                  
\
-static void                                                                    
\
-prefix##_reset_system (int reset_type, efi_status_t status,                    
\
-                      unsigned long data_size, efi_char16_t *data)             
\
-{                                                                              
\
-       struct ia64_fpreg fr[6];                                                
\
-       efi_char16_t *adata = NULL;                                             
\
-                                                                               
\
-       if (data)                                                               
\
-               adata = adjust_arg(data);                                       
\
-                                                                               
\
-       ia64_save_scratch_fpregs(fr);                                           
\
-       efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),   
\
-                         reset_type, status, data_size, adata);                
\
-       /* should not return, but just in case... */                            
\
-       ia64_load_scratch_fpregs(fr);                                           
\
-}
-
-#define phys_ptr(arg)  ((__typeof__(arg)) ia64_tpa(arg))
-
-STUB_GET_TIME(phys, phys_ptr)
-STUB_SET_TIME(phys, phys_ptr)
-STUB_GET_WAKEUP_TIME(phys, phys_ptr)
-STUB_SET_WAKEUP_TIME(phys, phys_ptr)
-STUB_GET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
-STUB_SET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
-STUB_RESET_SYSTEM(phys, phys_ptr)
-
-#define id(arg)        arg
-
-STUB_GET_TIME(virt, id)
-STUB_SET_TIME(virt, id)
-STUB_GET_WAKEUP_TIME(virt, id)
-STUB_SET_WAKEUP_TIME(virt, id)
-STUB_GET_VARIABLE(virt, id)
-STUB_GET_NEXT_VARIABLE(virt, id)
-STUB_SET_VARIABLE(virt, id)
-STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
-STUB_RESET_SYSTEM(virt, id)
-
-void
-efi_gettimeofday (struct timespec *ts)
-{
-       efi_time_t tm;
-
-       memset(ts, 0, sizeof(ts));
-       if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
-               return;
-
-       ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, 
tm.second);
-       ts->tv_nsec = tm.nanosecond;
-}
-
-static int
-is_available_memory (efi_memory_desc_t *md)
-{
-       if (!(md->attribute & EFI_MEMORY_WB))
-               return 0;
-
-       switch (md->type) {
-             case EFI_LOADER_CODE:
-             case EFI_LOADER_DATA:
-             case EFI_BOOT_SERVICES_CODE:
-             case EFI_BOOT_SERVICES_DATA:
-             case EFI_CONVENTIONAL_MEMORY:
-               return 1;
-       }
-       return 0;
-}
-
-/*
- * Trim descriptor MD so its starts at address START_ADDR.  If the descriptor 
covers
- * memory that is normally available to the kernel, issue a warning that some 
memory
- * is being ignored.
- */
-static void
-trim_bottom (efi_memory_desc_t *md, u64 start_addr)
-{
-       u64 num_skipped_pages;
-
-       if (md->phys_addr >= start_addr || !md->num_pages)
-               return;
-
-       num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
-       if (num_skipped_pages > md->num_pages)
-               num_skipped_pages = md->num_pages;
-
-       if (is_available_memory(md))
-               printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx 
due to granule hole "
-                      "at 0x%lx\n", __FUNCTION__,
-                      (num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
-                      md->phys_addr, start_addr - IA64_GRANULE_SIZE);
-       /*
-        * NOTE: Don't set md->phys_addr to START_ADDR because that could cause 
the memory
-        * descriptor list to become unsorted.  In such a case, md->num_pages 
will be
-        * zero, so the Right Thing will happen.
-        */
-       md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
-       md->num_pages -= num_skipped_pages;
-}
-
-static void
-trim_top (efi_memory_desc_t *md, u64 end_addr)
-{
-       u64 num_dropped_pages, md_end_addr;
-
-       md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
-       if (md_end_addr <= end_addr || !md->num_pages)
-               return;
-
-       num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
-       if (num_dropped_pages > md->num_pages)
-               num_dropped_pages = md->num_pages;
-
-       if (is_available_memory(md))
-               printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx 
due to granule hole "
-                      "at 0x%lx\n", __FUNCTION__,
-                      (num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
-                      md->phys_addr, end_addr);
-       md->num_pages -= num_dropped_pages;
-}
-
-/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory 
descriptor that
- * has memory that is available for OS use.
- */
-void
-efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
-{
-       int prev_valid = 0;
-       struct range {
-               u64 start;
-               u64 end;
-       } prev, curr;
-       void *efi_map_start, *efi_map_end, *p, *q;
-       efi_memory_desc_t *md, *check_md;
-       u64 efi_desc_size, start, end, granule_addr, last_granule_addr, 
first_non_wb_addr = 0;
-       unsigned long total_mem = 0;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-
-               /* skip over non-WB memory descriptors; that's all we're 
interested in... */
-               if (!(md->attribute & EFI_MEMORY_WB))
-                       continue;
-
-#ifdef XEN
-// this works around a problem in the ski bootloader
-{
-               extern long running_on_sim;
-               if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
-                       continue;
-}
-// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
-               if (md->phys_addr >= 0x100000000) continue;
-#endif
-               /*
-                * granule_addr is the base of md's first granule.
-                * [granule_addr - first_non_wb_addr) is guaranteed to
-                * be contiguous WB memory.
-                */
-               granule_addr = GRANULEROUNDDOWN(md->phys_addr);
-               first_non_wb_addr = max(first_non_wb_addr, granule_addr);
-
-               if (first_non_wb_addr < md->phys_addr) {
-                       trim_bottom(md, granule_addr + IA64_GRANULE_SIZE);
-                       granule_addr = GRANULEROUNDDOWN(md->phys_addr);
-                       first_non_wb_addr = max(first_non_wb_addr, 
granule_addr);
-               }
-
-               for (q = p; q < efi_map_end; q += efi_desc_size) {
-                       check_md = q;
-
-                       if ((check_md->attribute & EFI_MEMORY_WB) &&
-                           (check_md->phys_addr == first_non_wb_addr))
-                               first_non_wb_addr += check_md->num_pages << 
EFI_PAGE_SHIFT;
-                       else
-                               break;          /* non-WB or hole */
-               }
-
-               last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr);
-               if (last_granule_addr < md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT))
-                       trim_top(md, last_granule_addr);
-
-               if (is_available_memory(md)) {
-                       if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) 
>= max_addr) {
-                               if (md->phys_addr >= max_addr)
-                                       continue;
-                               md->num_pages = (max_addr - md->phys_addr) >> 
EFI_PAGE_SHIFT;
-                               first_non_wb_addr = max_addr;
-                       }
-
-                       if (total_mem >= mem_limit)
-                               continue;
-
-                       if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > 
mem_limit) {
-                               unsigned long limit_addr = md->phys_addr;
-
-                               limit_addr += mem_limit - total_mem;
-                               limit_addr = GRANULEROUNDDOWN(limit_addr);
-
-                               if (md->phys_addr > limit_addr)
-                                       continue;
-
-                               md->num_pages = (limit_addr - md->phys_addr) >>
-                                               EFI_PAGE_SHIFT;
-                               first_non_wb_addr = max_addr = md->phys_addr +
-                                             (md->num_pages << EFI_PAGE_SHIFT);
-                       }
-                       total_mem += (md->num_pages << EFI_PAGE_SHIFT);
-
-                       if (md->num_pages == 0)
-                               continue;
-
-                       curr.start = PAGE_OFFSET + md->phys_addr;
-                       curr.end   = curr.start + (md->num_pages << 
EFI_PAGE_SHIFT);
-
-                       if (!prev_valid) {
-                               prev = curr;
-                               prev_valid = 1;
-                       } else {
-                               if (curr.start < prev.start)
-                                       printk(KERN_ERR "Oops: EFI memory table 
not ordered!\n");
-
-                               if (prev.end == curr.start) {
-                                       /* merge two consecutive memory ranges 
*/
-                                       prev.end = curr.end;
-                               } else {
-                                       start = PAGE_ALIGN(prev.start);
-                                       end = prev.end & PAGE_MASK;
-                                       if ((end > start) && (*callback)(start, 
end, arg) < 0)
-                                               return;
-                                       prev = curr;
-                               }
-                       }
-               }
-       }
-       if (prev_valid) {
-               start = PAGE_ALIGN(prev.start);
-               end = prev.end & PAGE_MASK;
-               if (end > start)
-                       (*callback)(start, end, arg);
-       }
-}
-
-/*
- * Look for the PAL_CODE region reported by EFI and maps it using an
- * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
- * Abstraction Layer chapter 11 in ADAG
- */
-
-void *
-efi_get_pal_addr (void)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-       int pal_code_count = 0;
-       u64 vaddr, mask;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-               if (md->type != EFI_PAL_CODE)
-                       continue;
-
-               if (++pal_code_count > 1) {
-                       printk(KERN_ERR "Too many EFI Pal Code memory ranges, 
dropped @ %lx\n",
-                              md->phys_addr);
-                       continue;
-               }
-               /*
-                * The only ITLB entry in region 7 that is used is the one 
installed by
-                * __start().  That entry covers a 64MB range.
-                */
-               mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
-               vaddr = PAGE_OFFSET + md->phys_addr;
-
-               /*
-                * We must check that the PAL mapping won't overlap with the 
kernel
-                * mapping.
-                *
-                * PAL code is guaranteed to be aligned on a power of 2 between 
4k and
-                * 256KB and that only one ITR is needed to map it. This 
implies that the
-                * PAL code is always aligned on its size, i.e., the closest 
matching page
-                * size supported by the TLB. Therefore PAL code is guaranteed 
never to
-                * cross a 64MB unless it is bigger than 64MB (very unlikely!). 
 So for
-                * now the following test is enough to determine whether or not 
we need a
-                * dedicated ITR for the PAL code.
-                */
-               if ((vaddr & mask) == (KERNEL_START & mask)) {
-                       printk(KERN_INFO "%s: no need to install ITR for PAL 
code\n",
-                              __FUNCTION__);
-                       continue;
-               }
-
-               if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
-                       panic("Woah!  PAL code size bigger than a granule!");
-
-#if EFI_DEBUG
-               mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
-
-               printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into 
[0x%lx-0x%lx)\n",
-                       smp_processor_id(), md->phys_addr,
-                       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
-                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
-#endif
-               return __va(md->phys_addr);
-       }
-       printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
-              __FUNCTION__);
-       return NULL;
-}
-
-void
-efi_map_pal_code (void)
-{
-       void *pal_vaddr = efi_get_pal_addr ();
-       u64 psr;
-
-       if (!pal_vaddr)
-               return;
-
-       /*
-        * Cannot write to CRx with PSR.ic=1
-        */
-       psr = ia64_clear_ic();
-       ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) 
pal_vaddr),
-                pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
-                IA64_GRANULE_SHIFT);
-       ia64_set_psr(psr);              /* restore psr */
-       ia64_srlz_i();
-}
-
-void __init
-efi_init (void)
-{
-       void *efi_map_start, *efi_map_end;
-       efi_config_table_t *config_tables;
-       efi_char16_t *c16;
-       u64 efi_desc_size;
-       char *cp, *end, vendor[100] = "unknown";
-       extern char saved_command_line[];
-       int i;
-
-       /* it's too early to be able to use the standard kernel command line 
support... */
-       for (cp = saved_command_line; *cp; ) {
-               if (memcmp(cp, "mem=", 4) == 0) {
-                       cp += 4;
-                       mem_limit = memparse(cp, &end);
-                       if (end != cp)
-                               break;
-                       cp = end;
-               } else if (memcmp(cp, "max_addr=", 9) == 0) {
-                       cp += 9;
-                       max_addr = GRANULEROUNDDOWN(memparse(cp, &end));
-                       if (end != cp)
-                               break;
-                       cp = end;
-               } else {
-                       while (*cp != ' ' && *cp)
-                               ++cp;
-                       while (*cp == ' ')
-                               ++cp;
-               }
-       }
-       if (max_addr != ~0UL)
-               printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 
20);
-
-       efi.systab = __va(ia64_boot_param->efi_systab);
-
-       /*
-        * Verify the EFI Table
-        */
-       if (efi.systab == NULL)
-               panic("Woah! Can't find EFI system table.\n");
-       if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
-               panic("Woah! EFI system table signature incorrect\n");
-       if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0)
-               printk(KERN_WARNING "Warning: EFI system table major version 
mismatch: "
-                      "got %d.%02d, expected %d.%02d\n",
-                      efi.systab->hdr.revision >> 16, efi.systab->hdr.revision 
& 0xffff,
-                      EFI_SYSTEM_TABLE_REVISION >> 16, 
EFI_SYSTEM_TABLE_REVISION & 0xffff);
-
-       config_tables = __va(efi.systab->tables);
-
-       /* Show what we know for posterity */
-       c16 = __va(efi.systab->fw_vendor);
-       if (c16) {
-               for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
-                       vendor[i] = *c16++;
-               vendor[i] = '\0';
-       }
-
-       printk(KERN_INFO "EFI v%u.%.02u by %s:",
-              efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 
0xffff, vendor);
-
-       for (i = 0; i < (int) efi.systab->nr_tables; i++) {
-               if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
-                       efi.mps = __va(config_tables[i].table);
-                       printk(" MPS=0x%lx", config_tables[i].table);
-               } else if (efi_guidcmp(config_tables[i].guid, 
ACPI_20_TABLE_GUID) == 0) {
-                       efi.acpi20 = __va(config_tables[i].table);
-                       printk(" ACPI 2.0=0x%lx", config_tables[i].table);
-               } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) 
== 0) {
-                       efi.acpi = __va(config_tables[i].table);
-                       printk(" ACPI=0x%lx", config_tables[i].table);
-               } else if (efi_guidcmp(config_tables[i].guid, 
SMBIOS_TABLE_GUID) == 0) {
-                       efi.smbios = __va(config_tables[i].table);
-                       printk(" SMBIOS=0x%lx", config_tables[i].table);
-               } else if (efi_guidcmp(config_tables[i].guid, 
SAL_SYSTEM_TABLE_GUID) == 0) {
-                       efi.sal_systab = __va(config_tables[i].table);
-                       printk(" SALsystab=0x%lx", config_tables[i].table);
-               } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) 
== 0) {
-                       efi.hcdp = __va(config_tables[i].table);
-                       printk(" HCDP=0x%lx", config_tables[i].table);
-               }
-       }
-       printk("\n");
-
-       runtime = __va(efi.systab->runtime);
-       efi.get_time = phys_get_time;
-       efi.set_time = phys_set_time;
-       efi.get_wakeup_time = phys_get_wakeup_time;
-       efi.set_wakeup_time = phys_set_wakeup_time;
-       efi.get_variable = phys_get_variable;
-       efi.get_next_variable = phys_get_next_variable;
-       efi.set_variable = phys_set_variable;
-       efi.get_next_high_mono_count = phys_get_next_high_mono_count;
-       efi.reset_system = phys_reset_system;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-#if EFI_DEBUG
-       /* print EFI memory map: */
-       {
-               efi_memory_desc_t *md;
-               void *p;
-
-               for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += 
efi_desc_size) {
-                       md = p;
-                       printk("mem%02u: type=%u, attr=0x%lx, 
range=[0x%016lx-0x%016lx) (%luMB)\n",
-                              i, md->type, md->attribute, md->phys_addr,
-                              md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT),
-                              md->num_pages >> (20 - EFI_PAGE_SHIFT));
-               }
-       }
-#endif
-
-       efi_map_pal_code();
-       efi_enter_virtual_mode();
-}
-
-void
-efi_enter_virtual_mode (void)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       efi_status_t status;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-               if (md->attribute & EFI_MEMORY_RUNTIME) {
-                       /*
-                        * Some descriptors have multiple bits set, so the 
order of
-                        * the tests is relevant.
-                        */
-                       if (md->attribute & EFI_MEMORY_WB) {
-                               md->virt_addr = (u64) __va(md->phys_addr);
-                       } else if (md->attribute & EFI_MEMORY_UC) {
-                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-                       } else if (md->attribute & EFI_MEMORY_WC) {
-#if 0
-                               md->virt_addr = ia64_remap(md->phys_addr, 
(_PAGE_A | _PAGE_P
-                                                                          | 
_PAGE_D
-                                                                          | 
_PAGE_MA_WC
-                                                                          | 
_PAGE_PL_0
-                                                                          | 
_PAGE_AR_RW));
-#else
-                               printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
-                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
-                       } else if (md->attribute & EFI_MEMORY_WT) {
-#if 0
-                               md->virt_addr = ia64_remap(md->phys_addr, 
(_PAGE_A | _PAGE_P
-                                                                          | 
_PAGE_D | _PAGE_MA_WT
-                                                                          | 
_PAGE_PL_0
-                                                                          | 
_PAGE_AR_RW));
-#else
-                               printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
-                               md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
-                       }
-               }
-       }
-
-       status = efi_call_phys(__va(runtime->set_virtual_address_map),
-                              ia64_boot_param->efi_memmap_size,
-                              efi_desc_size, 
ia64_boot_param->efi_memdesc_version,
-                              ia64_boot_param->efi_memmap);
-       if (status != EFI_SUCCESS) {
-               printk(KERN_WARNING "warning: unable to switch EFI into virtual 
mode "
-                      "(status=%lu)\n", status);
-               return;
-       }
-
-       /*
-        * Now that EFI is in virtual mode, we call the EFI functions more 
efficiently:
-        */
-       efi.get_time = virt_get_time;
-       efi.set_time = virt_set_time;
-       efi.get_wakeup_time = virt_get_wakeup_time;
-       efi.set_wakeup_time = virt_set_wakeup_time;
-       efi.get_variable = virt_get_variable;
-       efi.get_next_variable = virt_get_next_variable;
-       efi.set_variable = virt_set_variable;
-       efi.get_next_high_mono_count = virt_get_next_high_mono_count;
-       efi.reset_system = virt_reset_system;
-}
-
-/*
- * Walk the EFI memory map looking for the I/O port range.  There can only be 
one entry of
- * this type, other I/O port ranges should be described via ACPI.
- */
-u64
-efi_get_iobase (void)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-               if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
-                       if (md->attribute & EFI_MEMORY_UC)
-                               return md->phys_addr;
-               }
-       }
-       return 0;
-}
-
-#ifdef XEN
-// variation of efi_get_iobase which returns entire memory descriptor
-efi_memory_desc_t *
-efi_get_io_md (void)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-               if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
-                       if (md->attribute & EFI_MEMORY_UC)
-                               return md;
-               }
-       }
-       return 0;
-}
-#endif
-
-u32
-efi_mem_type (unsigned long phys_addr)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-
-               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT))
-                        return md->type;
-       }
-       return 0;
-}
-
-u64
-efi_mem_attributes (unsigned long phys_addr)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-
-               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT))
-                       return md->attribute;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(efi_mem_attributes);
-
-int
-valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-
-               if (phys_addr - md->phys_addr < (md->num_pages << 
EFI_PAGE_SHIFT)) {
-                       if (!(md->attribute & EFI_MEMORY_WB))
-                               return 0;
-
-                       if (*size > md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT) - phys_addr)
-                               *size = md->phys_addr + (md->num_pages << 
EFI_PAGE_SHIFT) - phys_addr;
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-int __init
-efi_uart_console_only(void)
-{
-       efi_status_t status;
-       char *s, name[] = "ConOut";
-       efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
-       efi_char16_t *utf16, name_utf16[32];
-       unsigned char data[1024];
-       unsigned long size = sizeof(data);
-       struct efi_generic_dev_path *hdr, *end_addr;
-       int uart = 0;
-
-       /* Convert to UTF-16 */
-       utf16 = name_utf16;
-       s = name;
-       while (*s)
-               *utf16++ = *s++ & 0x7f;
-       *utf16 = 0;
-
-       status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
-       if (status != EFI_SUCCESS) {
-               printk(KERN_ERR "No EFI %s variable?\n", name);
-               return 0;
-       }
-
-       hdr = (struct efi_generic_dev_path *) data;
-       end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
-       while (hdr < end_addr) {
-               if (hdr->type == EFI_DEV_MSG &&
-                   hdr->sub_type == EFI_DEV_MSG_UART)
-                       uart = 1;
-               else if (hdr->type == EFI_DEV_END_PATH ||
-                         hdr->type == EFI_DEV_END_PATH2) {
-                       if (!uart)
-                               return 0;
-                       if (hdr->sub_type == EFI_DEV_END_ENTIRE)
-                               return 1;
-                       uart = 0;
-               }
-               hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + 
hdr->length);
-       }
-       printk(KERN_ERR "Malformed %s value\n", name);
-       return 0;
-}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/entry.S
--- a/xen/arch/ia64/entry.S     Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,1653 +0,0 @@
-/*
- * ia64/kernel/entry.S
- *
- * Kernel entry points.
- *
- * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999, 2002-2003
- *     Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
- *     Don Dugger <Don.Dugger@xxxxxxxxx>
- *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
- *     Fenghua Yu <fenghua.yu@xxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- */
-/*
- * ia64_switch_to now places correct virtual mapping in in TR2 for
- * kernel stack. This allows us to handle interrupts without changing
- * to physical mode.
- *
- * Jonathan Nicklin    <nicklin@xxxxxxxxxxxxxxxxxxxxxxxx>
- * Patrick O'Rourke    <orourke@xxxxxxxxxxxxxxxxxxxxxxxx>
- * 11/07/2000
- */
-/*
- * Global (preserved) predicate usage on syscall entry/exit path:
- *
- *     pKStk:          See entry.h.
- *     pUStk:          See entry.h.
- *     pSys:           See entry.h.
- *     pNonSys:        !pSys
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/pgtable.h>
-#include <asm/percpu.h>
-#include <asm/processor.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-
-#include "minstate.h"
-
-#ifndef XEN
-       /*
-        * execve() is special because in case of success, we need to
-        * setup a null register window frame.
-        */
-ENTRY(ia64_execve)
-       /*
-        * Allocate 8 input registers since ptrace() may clobber them
-        */
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
-       alloc loc1=ar.pfs,8,2,4,0
-       mov loc0=rp
-       .body
-       mov out0=in0                    // filename
-       ;;                              // stop bit between alloc and call
-       mov out1=in1                    // argv
-       mov out2=in2                    // envp
-       add out3=16,sp                  // regs
-       br.call.sptk.many rp=sys_execve
-.ret0:
-#ifdef CONFIG_IA32_SUPPORT
-       /*
-        * Check if we're returning to ia32 mode. If so, we need to restore 
ia32 registers
-        * from pt_regs.
-        */
-       adds r16=PT(CR_IPSR)+16,sp
-       ;;
-       ld8 r16=[r16]
-#endif
-       cmp4.ge p6,p7=r8,r0
-       mov ar.pfs=loc1                 // restore ar.pfs
-       sxt4 r8=r8                      // return 64-bit result
-       ;;
-       stf.spill [sp]=f0
-(p6)   cmp.ne pKStk,pUStk=r0,r0        // a successful execve() lands us in 
user-mode...
-       mov rp=loc0
-(p6)   mov ar.pfs=r0                   // clear ar.pfs on success
-(p7)   br.ret.sptk.many rp
-
-       /*
-        * In theory, we'd have to zap this state only to prevent leaking of
-        * security sensitive state (e.g., if current->mm->dumpable is zero).  
However,
-        * this executes in less than 20 cycles even on Itanium, so it's not 
worth
-        * optimizing for...).
-        */
-       mov ar.unat=0;          mov ar.lc=0
-       mov r4=0;               mov f2=f0;              mov b1=r0
-       mov r5=0;               mov f3=f0;              mov b2=r0
-       mov r6=0;               mov f4=f0;              mov b3=r0
-       mov r7=0;               mov f5=f0;              mov b4=r0
-       ldf.fill f12=[sp];      mov f13=f0;             mov b5=r0
-       ldf.fill f14=[sp];      ldf.fill f15=[sp];      mov f16=f0
-       ldf.fill f17=[sp];      ldf.fill f18=[sp];      mov f19=f0
-       ldf.fill f20=[sp];      ldf.fill f21=[sp];      mov f22=f0
-       ldf.fill f23=[sp];      ldf.fill f24=[sp];      mov f25=f0
-       ldf.fill f26=[sp];      ldf.fill f27=[sp];      mov f28=f0
-       ldf.fill f29=[sp];      ldf.fill f30=[sp];      mov f31=f0
-#ifdef CONFIG_IA32_SUPPORT
-       tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
-       movl loc0=ia64_ret_from_ia32_execve
-       ;;
-(p6)   mov rp=loc0
-#endif
-       br.ret.sptk.many rp
-END(ia64_execve)
-
-/*
- * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, 
u64 child_tidptr,
- *           u64 tls)
- */
-GLOBAL_ENTRY(sys_clone2)
-       /*
-        * Allocate 8 input registers since ptrace() may clobber them
-        */
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
-       alloc r16=ar.pfs,8,2,6,0
-       DO_SAVE_SWITCH_STACK
-       adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
-       mov loc0=rp
-       mov loc1=r16                            // save ar.pfs across do_fork
-       .body
-       mov out1=in1
-       mov out3=in2
-       tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
-       mov out4=in3    // parent_tidptr: valid only w/CLONE_PARENT_SETTID
-       ;;
-(p6)   st8 [r2]=in5                            // store TLS in r16 for 
copy_thread()
-       mov out5=in4    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or 
CLONE_CHILD_CLEARTID
-       adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
-       mov out0=in0                            // out0 = clone_flags
-       br.call.sptk.many rp=do_fork
-.ret1: .restore sp
-       adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
-       mov ar.pfs=loc1
-       mov rp=loc0
-       br.ret.sptk.many rp
-END(sys_clone2)
-
-/*
- * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, 
u64 tls)
- *     Deprecated.  Use sys_clone2() instead.
- */
-GLOBAL_ENTRY(sys_clone)
-       /*
-        * Allocate 8 input registers since ptrace() may clobber them
-        */
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
-       alloc r16=ar.pfs,8,2,6,0
-       DO_SAVE_SWITCH_STACK
-       adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
-       mov loc0=rp
-       mov loc1=r16                            // save ar.pfs across do_fork
-       .body
-       mov out1=in1
-       mov out3=16                             // stacksize (compensates for 
16-byte scratch area)
-       tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
-       mov out4=in2    // parent_tidptr: valid only w/CLONE_PARENT_SETTID
-       ;;
-(p6)   st8 [r2]=in4                            // store TLS in r13 (tp)
-       mov out5=in3    // child_tidptr:  valid only w/CLONE_CHILD_SETTID or 
CLONE_CHILD_CLEARTID
-       adds out2=IA64_SWITCH_STACK_SIZE+16,sp  // out2 = &regs
-       mov out0=in0                            // out0 = clone_flags
-       br.call.sptk.many rp=do_fork
-.ret2: .restore sp
-       adds sp=IA64_SWITCH_STACK_SIZE,sp       // pop the switch stack
-       mov ar.pfs=loc1
-       mov rp=loc0
-       br.ret.sptk.many rp
-END(sys_clone)
-#endif /* !XEN */
-
-/*
- * prev_task <- ia64_switch_to(struct task_struct *next)
- *     With Ingo's new scheduler, interrupts are disabled when this routine 
gets
- *     called.  The code starting at .map relies on this.  The rest of the code
- *     doesn't care about the interrupt masking status.
- */
-GLOBAL_ENTRY(ia64_switch_to)
-       .prologue
-       alloc r16=ar.pfs,1,0,0,0
-       DO_SAVE_SWITCH_STACK
-       .body
-
-       adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-       movl r25=init_task
-       mov r27=IA64_KR(CURRENT_STACK)
-       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-#ifdef XEN
-       dep r20=0,in0,60,4              // physical address of "next"
-#else
-       dep r20=0,in0,61,3              // physical address of "next"
-#endif
-       ;;
-       st8 [r22]=sp                    // save kernel stack pointer of old task
-       shr.u r26=r20,IA64_GRANULE_SHIFT
-       cmp.eq p7,p6=r25,in0
-       ;;
-       /*
-        * If we've already mapped this task's page, we can skip doing it again.
-        */
-(p6)   cmp.eq p7,p6=r26,r27
-(p6)   br.cond.dpnt .map
-       ;;
-.done:
-(p6)   ssm psr.ic                      // if we had to map, reenable the 
psr.ic bit FIRST!!!
-       ;;
-(p6)   srlz.d
-       ld8 sp=[r21]                    // load kernel stack pointer of new task
-       mov IA64_KR(CURRENT)=in0        // update "current" application register
-       mov r8=r13                      // return pointer to previously running 
task
-       mov r13=in0                     // set "current" pointer
-       ;;
-       DO_LOAD_SWITCH_STACK
-
-#ifdef CONFIG_SMP
-       sync.i                          // ensure "fc"s done by this CPU are 
visible on other CPUs
-#endif
-       br.ret.sptk.many rp             // boogie on out in new context
-
-.map:
-#ifdef XEN
-       // avoid overlapping with kernel TR
-       movl r25=KERNEL_START
-       dep  r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
-       ;;
-       cmp.eq p7,p0=r25,r23
-       ;;
-(p7)   mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
-(p7)   br.cond.sptk .done
-#endif
-       rsm psr.ic                      // interrupts (psr.i) are already 
disabled here
-       movl r25=PAGE_KERNEL
-       ;;
-       srlz.d
-       or r23=r25,r20                  // construct PA | page properties
-       mov r25=IA64_GRANULE_SHIFT<<2
-       ;;
-       mov cr.itir=r25
-       mov cr.ifa=in0                  // VA of next task...
-       ;;
-       mov r25=IA64_TR_CURRENT_STACK
-       mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
-       ;;
-       itr.d dtr[r25]=r23              // wire in new mapping...
-       br.cond.sptk .done
-END(ia64_switch_to)
-
-/*
- * Note that interrupts are enabled during save_switch_stack and 
load_switch_stack.  This
- * means that we may get an interrupt with "sp" pointing to the new kernel 
stack while
- * ar.bspstore is still pointing to the old kernel backing store area.  Since 
ar.rsc,
- * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is 
not a
- * problem.  Also, we don't need to specify unwind information for preserved 
registers
- * that are not modified in save_switch_stack as the right unwind information 
is already
- * specified at the call-site of save_switch_stack.
- */
-
-/*
- * save_switch_stack:
- *     - r16 holds ar.pfs
- *     - b7 holds address to return to
- *     - rp (b0) holds return address to save
- */
-GLOBAL_ENTRY(save_switch_stack)
-       .prologue
-       .altrp b7
-       flushrs                 // flush dirty regs to backing store (must be 
first in insn group)
-       .save @priunat,r17
-       mov r17=ar.unat         // preserve caller's
-       .body
-#ifdef CONFIG_ITANIUM
-       adds r2=16+128,sp
-       adds r3=16+64,sp
-       adds r14=SW(R4)+16,sp
-       ;;
-       st8.spill [r14]=r4,16           // spill r4
-       lfetch.fault.excl.nt1 [r3],128
-       ;;
-       lfetch.fault.excl.nt1 [r2],128
-       lfetch.fault.excl.nt1 [r3],128
-       ;;
-       lfetch.fault.excl [r2]
-       lfetch.fault.excl [r3]
-       adds r15=SW(R5)+16,sp
-#else
-       add r2=16+3*128,sp
-       add r3=16,sp
-       add r14=SW(R4)+16,sp
-       ;;
-       st8.spill [r14]=r4,SW(R6)-SW(R4)        // spill r4 and prefetch offset 
0x1c0
-       lfetch.fault.excl.nt1 [r3],128  //              prefetch offset 0x010
-       ;;
-       lfetch.fault.excl.nt1 [r3],128  //              prefetch offset 0x090
-       lfetch.fault.excl.nt1 [r2],128  //              prefetch offset 0x190
-       ;;
-       lfetch.fault.excl.nt1 [r3]      //              prefetch offset 0x110
-       lfetch.fault.excl.nt1 [r2]      //              prefetch offset 0x210
-       adds r15=SW(R5)+16,sp
-#endif
-       ;;
-       st8.spill [r15]=r5,SW(R7)-SW(R5)        // spill r5
-       mov.m ar.rsc=0                  // put RSE in mode: enforced lazy, 
little endian, pl 0
-       add r2=SW(F2)+16,sp             // r2 = &sw->f2
-       ;;
-       st8.spill [r14]=r6,SW(B0)-SW(R6)        // spill r6
-       mov.m r18=ar.fpsr               // preserve fpsr
-       add r3=SW(F3)+16,sp             // r3 = &sw->f3
-       ;;
-       stf.spill [r2]=f2,32
-       mov.m r19=ar.rnat
-       mov r21=b0
-
-       stf.spill [r3]=f3,32
-       st8.spill [r15]=r7,SW(B2)-SW(R7)        // spill r7
-       mov r22=b1
-       ;;
-       // since we're done with the spills, read and save ar.unat:
-       mov.m r29=ar.unat
-       mov.m r20=ar.bspstore
-       mov r23=b2
-       stf.spill [r2]=f4,32
-       stf.spill [r3]=f5,32
-       mov r24=b3
-       ;;
-       st8 [r14]=r21,SW(B1)-SW(B0)             // save b0
-       st8 [r15]=r23,SW(B3)-SW(B2)             // save b2
-       mov r25=b4
-       mov r26=b5
-       ;;
-       st8 [r14]=r22,SW(B4)-SW(B1)             // save b1
-       st8 [r15]=r24,SW(AR_PFS)-SW(B3)         // save b3
-       mov r21=ar.lc           // I-unit
-       stf.spill [r2]=f12,32
-       stf.spill [r3]=f13,32
-       ;;
-       st8 [r14]=r25,SW(B5)-SW(B4)             // save b4
-       st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)      // save ar.pfs
-       stf.spill [r2]=f14,32
-       stf.spill [r3]=f15,32
-       ;;
-       st8 [r14]=r26                           // save b5
-       st8 [r15]=r21                           // save ar.lc
-       stf.spill [r2]=f16,32
-       stf.spill [r3]=f17,32
-       ;;
-       stf.spill [r2]=f18,32
-       stf.spill [r3]=f19,32
-       ;;
-       stf.spill [r2]=f20,32
-       stf.spill [r3]=f21,32
-       ;;
-       stf.spill [r2]=f22,32
-       stf.spill [r3]=f23,32
-       ;;
-       stf.spill [r2]=f24,32
-       stf.spill [r3]=f25,32
-       ;;
-       stf.spill [r2]=f26,32
-       stf.spill [r3]=f27,32
-       ;;
-       stf.spill [r2]=f28,32
-       stf.spill [r3]=f29,32
-       ;;
-       stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
-       stf.spill [r3]=f31,SW(PR)-SW(F31)
-       add r14=SW(CALLER_UNAT)+16,sp
-       ;;
-       st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)    // save ar.unat
-       st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
-       mov r21=pr
-       ;;
-       st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
-       st8 [r3]=r21                            // save predicate registers
-       ;;
-       st8 [r2]=r20                            // save ar.bspstore
-       st8 [r14]=r18                           // save fpsr
-       mov ar.rsc=3            // put RSE back into eager mode, pl 0
-       br.cond.sptk.many b7
-END(save_switch_stack)
-
-/*
- * load_switch_stack:
- *     - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
- *     - b7 holds address to return to
- *     - must not touch r8-r11
- */
-#ifdef XEN
-GLOBAL_ENTRY(load_switch_stack)
-#else
-ENTRY(load_switch_stack)
-#endif
-       .prologue
-       .altrp b7
-
-       .body
-       lfetch.fault.nt1 [sp]
-       adds r2=SW(AR_BSPSTORE)+16,sp
-       adds r3=SW(AR_UNAT)+16,sp
-       mov ar.rsc=0                                            // put RSE into 
enforced lazy mode
-       adds r14=SW(CALLER_UNAT)+16,sp
-       adds r15=SW(AR_FPSR)+16,sp
-       ;;
-       ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))   // bspstore
-       ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))       // unat
-       ;;
-       ld8 r21=[r2],16         // restore b0
-       ld8 r22=[r3],16         // restore b1
-       ;;
-       ld8 r23=[r2],16         // restore b2
-       ld8 r24=[r3],16         // restore b3
-       ;;
-       ld8 r25=[r2],16         // restore b4
-       ld8 r26=[r3],16         // restore b5
-       ;;
-       ld8 r16=[r2],(SW(PR)-SW(AR_PFS))        // ar.pfs
-       ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))    // ar.lc
-       ;;
-       ld8 r28=[r2]            // restore pr
-       ld8 r30=[r3]            // restore rnat
-       ;;
-       ld8 r18=[r14],16        // restore caller's unat
-       ld8 r19=[r15],24        // restore fpsr
-       ;;
-       ldf.fill f2=[r14],32
-       ldf.fill f3=[r15],32
-       ;;
-       ldf.fill f4=[r14],32
-       ldf.fill f5=[r15],32
-       ;;
-       ldf.fill f12=[r14],32
-       ldf.fill f13=[r15],32
-       ;;
-       ldf.fill f14=[r14],32
-       ldf.fill f15=[r15],32
-       ;;
-       ldf.fill f16=[r14],32
-       ldf.fill f17=[r15],32
-       ;;
-       ldf.fill f18=[r14],32
-       ldf.fill f19=[r15],32
-       mov b0=r21
-       ;;
-       ldf.fill f20=[r14],32
-       ldf.fill f21=[r15],32
-       mov b1=r22
-       ;;
-       ldf.fill f22=[r14],32
-       ldf.fill f23=[r15],32
-       mov b2=r23
-       ;;
-       mov ar.bspstore=r27
-       mov ar.unat=r29         // establish unat holding the NaT bits for r4-r7
-       mov b3=r24
-       ;;
-       ldf.fill f24=[r14],32
-       ldf.fill f25=[r15],32
-       mov b4=r25
-       ;;
-       ldf.fill f26=[r14],32
-       ldf.fill f27=[r15],32
-       mov b5=r26
-       ;;
-       ldf.fill f28=[r14],32
-       ldf.fill f29=[r15],32
-       mov ar.pfs=r16
-       ;;
-       ldf.fill f30=[r14],32
-       ldf.fill f31=[r15],24
-       mov ar.lc=r17
-       ;;
-       ld8.fill r4=[r14],16
-       ld8.fill r5=[r15],16
-       mov pr=r28,-1
-       ;;
-       ld8.fill r6=[r14],16
-       ld8.fill r7=[r15],16
-
-       mov ar.unat=r18                         // restore caller's unat
-       mov ar.rnat=r30                         // must restore after bspstore 
but before rsc!
-       mov ar.fpsr=r19                         // restore fpsr
-       mov ar.rsc=3                            // put RSE back into eager 
mode, pl 0
-       br.cond.sptk.many b7
-END(load_switch_stack)
-
-#ifndef XEN
-GLOBAL_ENTRY(__ia64_syscall)
-       .regstk 6,0,0,0
-       mov r15=in5                             // put syscall number in place
-       break __BREAK_SYSCALL
-       movl r2=errno
-       cmp.eq p6,p7=-1,r10
-       ;;
-(p6)   st4 [r2]=r8
-(p6)   mov r8=-1
-       br.ret.sptk.many rp
-END(__ia64_syscall)
-
-GLOBAL_ENTRY(execve)
-       mov r15=__NR_execve                     // put syscall number in place
-       break __BREAK_SYSCALL
-       br.ret.sptk.many rp
-END(execve)
-
-GLOBAL_ENTRY(clone)
-       mov r15=__NR_clone                      // put syscall number in place
-       break __BREAK_SYSCALL
-       br.ret.sptk.many rp
-END(clone)
-
-       /*
-        * Invoke a system call, but do some tracing before and after the call.
-        * We MUST preserve the current register frame throughout this routine
-        * because some system calls (such as ia64_execve) directly
-        * manipulate ar.pfs.
-        */
-GLOBAL_ENTRY(ia64_trace_syscall)
-       PT_REGS_UNWIND_INFO(0)
-       /*
-        * We need to preserve the scratch registers f6-f11 in case the system
-        * call is sigreturn.
-        */
-       adds r16=PT(F6)+16,sp
-       adds r17=PT(F7)+16,sp
-       ;;
-       stf.spill [r16]=f6,32
-       stf.spill [r17]=f7,32
-       ;;
-       stf.spill [r16]=f8,32
-       stf.spill [r17]=f9,32
-       ;;
-       stf.spill [r16]=f10
-       stf.spill [r17]=f11
-       br.call.sptk.many rp=syscall_trace_enter // give parent a chance to 
catch syscall args
-       adds r16=PT(F6)+16,sp
-       adds r17=PT(F7)+16,sp
-       ;;
-       ldf.fill f6=[r16],32
-       ldf.fill f7=[r17],32
-       ;;
-       ldf.fill f8=[r16],32
-       ldf.fill f9=[r17],32
-       ;;
-       ldf.fill f10=[r16]
-       ldf.fill f11=[r17]
-       // the syscall number may have changed, so re-load it and re-calculate 
the
-       // syscall entry-point:
-       adds r15=PT(R15)+16,sp                  // r15 = &pt_regs.r15 (syscall 
#)
-       ;;
-       ld8 r15=[r15]
-       mov r3=NR_syscalls - 1
-       ;;
-       adds r15=-1024,r15
-       movl r16=sys_call_table
-       ;;
-       shladd r20=r15,3,r16                    // r20 = sys_call_table + 
8*(syscall-1024)
-       cmp.leu p6,p7=r15,r3
-       ;;
-(p6)   ld8 r20=[r20]                           // load address of syscall 
entry point
-(p7)   movl r20=sys_ni_syscall
-       ;;
-       mov b6=r20
-       br.call.sptk.many rp=b6                 // do the syscall
-.strace_check_retval:
-       cmp.lt p6,p0=r8,r0                      // syscall failed?
-       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
-       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
-       mov r10=0
-(p6)   br.cond.sptk strace_error               // syscall failed ->
-       ;;                                      // avoid RAW on r10
-.strace_save_retval:
-.mem.offset 0,0; st8.spill [r2]=r8             // store return value in slot 
for r8
-.mem.offset 8,0; st8.spill [r3]=r10            // clear error indication in 
slot for r10
-       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to 
catch return value
-.ret3: br.cond.sptk .work_pending_syscall_end
-
-strace_error:
-       ld8 r3=[r2]                             // load pt_regs.r8
-       sub r9=0,r8                             // negate return value to get 
errno value
-       ;;
-       cmp.ne p6,p0=r3,r0                      // is pt_regs.r8!=0?
-       adds r3=16,r2                           // r3=&pt_regs.r10
-       ;;
-(p6)   mov r10=-1
-(p6)   mov r8=r9
-       br.cond.sptk .strace_save_retval
-END(ia64_trace_syscall)
-
-       /*
-        * When traced and returning from sigreturn, we invoke syscall_trace 
but then
-        * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
-        */
-GLOBAL_ENTRY(ia64_strace_leave_kernel)
-       PT_REGS_UNWIND_INFO(0)
-{      /*
-        * Some versions of gas generate bad unwind info if the first 
instruction of a
-        * procedure doesn't go into the first slot of a bundle.  This is a 
workaround.
-        */
-       nop.m 0
-       nop.i 0
-       br.call.sptk.many rp=syscall_trace_leave // give parent a chance to 
catch return value
-}
-.ret4: br.cond.sptk ia64_leave_kernel
-END(ia64_strace_leave_kernel)
-#endif
-
-GLOBAL_ENTRY(ia64_ret_from_clone)
-       PT_REGS_UNWIND_INFO(0)
-{      /*
-        * Some versions of gas generate bad unwind info if the first 
instruction of a
-        * procedure doesn't go into the first slot of a bundle.  This is a 
workaround.
-        */
-       nop.m 0
-       nop.i 0
-       /*
-        * We need to call schedule_tail() to complete the scheduling process.
-        * Called by ia64_switch_to() after do_fork()->copy_thread().  r8 
contains the
-        * address of the previously executing task.
-        */
-       br.call.sptk.many rp=ia64_invoke_schedule_tail
-}
-#ifdef XEN
-       // new domains are cloned but not exec'ed so switch to user mode here
-       cmp.ne pKStk,pUStk=r0,r0
-#ifdef CONFIG_VTI
-       br.cond.spnt ia64_leave_hypervisor
-#else // CONFIG_VTI
-       br.cond.spnt ia64_leave_kernel
-#endif // CONFIG_VTI
-#else
-.ret8:
-       adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
-       ld4 r2=[r2]
-       ;;
-       mov r8=0
-       and r2=_TIF_SYSCALL_TRACEAUDIT,r2
-       ;;
-       cmp.ne p6,p0=r2,r0
-(p6)   br.cond.spnt .strace_check_retval
-#endif
-       ;;                                      // added stop bits to prevent 
r8 dependency
-END(ia64_ret_from_clone)
-       // fall through
-GLOBAL_ENTRY(ia64_ret_from_syscall)
-       PT_REGS_UNWIND_INFO(0)
-       cmp.ge p6,p7=r8,r0                      // syscall executed 
successfully?
-       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
-       mov r10=r0                              // clear error indication in r10
-(p7)   br.cond.spnt handle_syscall_error       // handle potential syscall 
failure
-END(ia64_ret_from_syscall)
-       // fall through
-/*
- * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
- *     need to switch to bank 0 and doesn't restore the scratch registers.
- *     To avoid leaking kernel bits, the scratch registers are set to
- *     the following known-to-be-safe values:
- *
- *               r1: restored (global pointer)
- *               r2: cleared
- *               r3: 1 (when returning to user-level)
- *           r8-r11: restored (syscall return value(s))
- *              r12: restored (user-level stack pointer)
- *              r13: restored (user-level thread pointer)
- *              r14: cleared
- *              r15: restored (syscall #)
- *          r16-r17: cleared
- *              r18: user-level b6
- *              r19: cleared
- *              r20: user-level ar.fpsr
- *              r21: user-level b0
- *              r22: cleared
- *              r23: user-level ar.bspstore
- *              r24: user-level ar.rnat
- *              r25: user-level ar.unat
- *              r26: user-level ar.pfs
- *              r27: user-level ar.rsc
- *              r28: user-level ip
- *              r29: user-level psr
- *              r30: user-level cfm
- *              r31: user-level pr
- *           f6-f11: cleared
- *               pr: restored (user-level pr)
- *               b0: restored (user-level rp)
- *               b6: restored
- *               b7: cleared
- *          ar.unat: restored (user-level ar.unat)
- *           ar.pfs: restored (user-level ar.pfs)
- *           ar.rsc: restored (user-level ar.rsc)
- *          ar.rnat: restored (user-level ar.rnat)
- *      ar.bspstore: restored (user-level ar.bspstore)
- *          ar.fpsr: restored (user-level ar.fpsr)
- *           ar.ccv: cleared
- *           ar.csd: cleared
- *           ar.ssd: cleared
- */
-ENTRY(ia64_leave_syscall)
-       PT_REGS_UNWIND_INFO(0)
-       /*
-        * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
-        * user- or fsys-mode, hence we disable interrupts early on.
-        *
-        * p6 controls whether current_thread_info()->flags needs to be check 
for
-        * extra work.  We always check for extra work when returning to 
user-level.
-        * With CONFIG_PREEMPT, we also check for extra work when the 
preempt_count
-        * is 0.  After extra work processing has been completed, execution
-        * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
-        * needs to be redone.
-        */
-#ifdef CONFIG_PREEMPT
-       rsm psr.i                               // disable interrupts
-       cmp.eq pLvSys,p0=r0,r0                  // pLvSys=1: leave from syscall
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-       ;;
-       .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20]                  // r21 <- preempt_count
-(pUStk)        mov r21=0                       // r21 <- 0
-       ;;
-       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
-#else /* !CONFIG_PREEMPT */
-(pUStk)        rsm psr.i
-       cmp.eq pLvSys,p0=r0,r0          // pLvSys=1: leave from syscall
-(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
-#endif
-.work_processed_syscall:
-       adds r2=PT(LOADRS)+16,r12
-       adds r3=PT(AR_BSPSTORE)+16,r12
-#ifdef XEN
-       ;;
-#else
-       adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
-(p6)   ld4 r31=[r18]                           // load 
current_thread_info()->flags
-#endif
-       ld8 r19=[r2],PT(B6)-PT(LOADRS)          // load ar.rsc value for 
"loadrs"
-       mov b7=r0               // clear b7
-       ;;
-       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be 
garbage)
-       ld8 r18=[r2],PT(R9)-PT(B6)              // load b6
-#ifndef XEN
-(p6)   and r15=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
-#endif
-       ;;
-       mov r16=ar.bsp                          // M2  get existing backing 
store pointer
-#ifndef XEN
-(p6)   cmp4.ne.unc p6,p0=r15, r0               // any special work pending?
-(p6)   br.cond.spnt .work_pending_syscall
-#endif
-       ;;
-       // start restoring the state saved on the kernel stack (struct pt_regs):
-       ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
-       ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-       mov f6=f0               // clear f6
-       ;;
-       invala                  // M0|1 invalidate ALAT
-       rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and 
interruption collection
-       mov f9=f0               // clear f9
-
-       ld8 r29=[r2],16         // load cr.ipsr
-       ld8 r28=[r3],16                 // load cr.iip
-       mov f8=f0               // clear f8
-       ;;
-       ld8 r30=[r2],16         // M0|1 load cr.ifs
-       mov.m ar.ssd=r0         // M2 clear ar.ssd
-       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
-       ;;
-       ld8 r25=[r3],16         // M0|1 load ar.unat
-       mov.m ar.csd=r0         // M2 clear ar.csd
-       mov r22=r0              // clear r22
-       ;;
-       ld8 r26=[r2],PT(B0)-PT(AR_PFS)  // M0|1 load ar.pfs
-(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
-       mov f10=f0              // clear f10
-       ;;
-       ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
-       ld8 r27=[r3],PT(PR)-PT(AR_RSC)  // load ar.rsc
-       mov f11=f0              // clear f11
-       ;;
-       ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)    // load ar.rnat (may be garbage)
-       ld8 r31=[r3],PT(R1)-PT(PR)              // load predicates
-(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
-       ;;
-       ld8 r20=[r2],PT(R12)-PT(AR_FPSR)        // load ar.fpsr
-       ld8.fill r1=[r3],16     // load r1
-(pUStk) mov r17=1
-       ;;
-       srlz.d                  // M0  ensure interruption collection is off
-       ld8.fill r13=[r3],16
-       mov f7=f0               // clear f7
-       ;;
-       ld8.fill r12=[r2]       // restore r12 (sp)
-       ld8.fill r15=[r3]       // restore r15
-#ifdef XEN
-       movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
-#else
-       addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
-#endif
-       ;;
-(pUStk)        ld4 r3=[r3]             // r3 = cpu_data->phys_stacked_size_p8
-(pUStk) st1 [r14]=r17
-       mov b6=r18              // I0  restore b6
-       ;;
-       mov r14=r0              // clear r14
-       shr.u r18=r19,16        // I0|1 get byte size of existing "dirty" 
partition
-(pKStk) br.cond.dpnt.many skip_rbs_switch
-
-       mov.m ar.ccv=r0         // clear ar.ccv
-(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
-       br.cond.sptk.many rbs_switch
-END(ia64_leave_syscall)
-
-#ifdef CONFIG_IA32_SUPPORT
-GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
-       PT_REGS_UNWIND_INFO(0)
-       adds r2=PT(R8)+16,sp                    // r2 = &pt_regs.r8
-       adds r3=PT(R10)+16,sp                   // r3 = &pt_regs.r10
-       ;;
-       .mem.offset 0,0
-       st8.spill [r2]=r8       // store return value in slot for r8 and set 
unat bit
-       .mem.offset 8,0
-       st8.spill [r3]=r0       // clear error indication in slot for r10 and 
set unat bit
-END(ia64_ret_from_ia32_execve_syscall)
-       // fall through
-#endif /* CONFIG_IA32_SUPPORT */
-GLOBAL_ENTRY(ia64_leave_kernel)
-       PT_REGS_UNWIND_INFO(0)
-       /*
-        * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
-        * user- or fsys-mode, hence we disable interrupts early on.
-        *
-        * p6 controls whether current_thread_info()->flags needs to be check 
for
-        * extra work.  We always check for extra work when returning to 
user-level.
-        * With CONFIG_PREEMPT, we also check for extra work when the 
preempt_count
-        * is 0.  After extra work processing has been completed, execution
-        * resumes at .work_processed_syscall with p6 set to 1 if the 
extra-work-check
-        * needs to be redone.
-        */
-#ifdef CONFIG_PREEMPT
-       rsm psr.i                               // disable interrupts
-       cmp.eq p0,pLvSys=r0,r0                  // pLvSys=0: leave from kernel
-(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-       ;;
-       .pred.rel.mutex pUStk,pKStk
-(pKStk)        ld4 r21=[r20]                   // r21 <- preempt_count
-(pUStk)        mov r21=0                       // r21 <- 0
-       ;;
-       cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
-#else
-(pUStk)        rsm psr.i
-       cmp.eq p0,pLvSys=r0,r0          // pLvSys=0: leave from kernel
-(pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
-#endif
-.work_processed_kernel:
-#ifdef XEN
-       alloc loc0=ar.pfs,0,1,1,0
-       adds out0=16,r12
-       ;;
-(p6)   br.call.sptk.many b0=deliver_pending_interrupt
-       mov ar.pfs=loc0
-       mov r31=r0
-#else
-       adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
-       ;;
-(p6)   ld4 r31=[r17]                           // load 
current_thread_info()->flags
-#endif
-       adds r21=PT(PR)+16,r12
-       ;;
-
-       lfetch [r21],PT(CR_IPSR)-PT(PR)
-       adds r2=PT(B6)+16,r12
-       adds r3=PT(R16)+16,r12
-       ;;
-       lfetch [r21]
-       ld8 r28=[r2],8          // load b6
-       adds r29=PT(R24)+16,r12
-
-       ld8.fill r16=[r3]
-       adds r30=PT(AR_CCV)+16,r12
-(p6)   and r19=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
-       ;;
-       adds r3=PT(AR_CSD)-PT(R16),r3
-       ld8.fill r24=[r29]
-       ld8 r15=[r30]           // load ar.ccv
-(p6)   cmp4.ne.unc p6,p0=r19, r0               // any special work pending?
-       ;;
-       ld8 r29=[r2],16         // load b7
-       ld8 r30=[r3],16         // load ar.csd
-#ifndef XEN
-(p6)   br.cond.spnt .work_pending
-#endif
-       ;;
-       ld8 r31=[r2],16         // load ar.ssd
-       ld8.fill r8=[r3],16
-       ;;
-       ld8.fill r9=[r2],16
-       ld8.fill r10=[r3],PT(R17)-PT(R10)
-       ;;
-       ld8.fill r11=[r2],PT(R18)-PT(R11)
-       ld8.fill r17=[r3],16
-       ;;
-       ld8.fill r18=[r2],16
-       ld8.fill r19=[r3],16
-       ;;
-       ld8.fill r20=[r2],16
-       ld8.fill r21=[r3],16
-       mov ar.csd=r30
-       mov ar.ssd=r31
-       ;;
-       rsm psr.i | psr.ic      // initiate turning off of interrupt and 
interruption collection
-       invala                  // invalidate ALAT
-       ;;
-       ld8.fill r22=[r2],24
-       ld8.fill r23=[r3],24
-       mov b6=r28
-       ;;
-       ld8.fill r25=[r2],16
-       ld8.fill r26=[r3],16
-       mov b7=r29
-       ;;
-       ld8.fill r27=[r2],16
-       ld8.fill r28=[r3],16
-       ;;
-       ld8.fill r29=[r2],16
-       ld8.fill r30=[r3],24
-       ;;
-       ld8.fill r31=[r2],PT(F9)-PT(R31)
-       adds r3=PT(F10)-PT(F6),r3
-       ;;
-       ldf.fill f9=[r2],PT(F6)-PT(F9)
-       ldf.fill f10=[r3],PT(F8)-PT(F10)
-       ;;
-       ldf.fill f6=[r2],PT(F7)-PT(F6)
-       ;;
-       ldf.fill f7=[r2],PT(F11)-PT(F7)
-       ldf.fill f8=[r3],32
-       ;;
-       srlz.i                  // ensure interruption collection is off
-       mov ar.ccv=r15
-       ;;
-       ldf.fill f11=[r2]
-       bsw.0                   // switch back to bank 0 (no stop bit required 
beforehand...)
-       ;;
-(pUStk)        mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
-       adds r16=PT(CR_IPSR)+16,r12
-       adds r17=PT(CR_IIP)+16,r12
-
-(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
-       nop.i 0
-       nop.i 0
-       ;;
-       ld8 r29=[r16],16        // load cr.ipsr
-       ld8 r28=[r17],16        // load cr.iip
-       ;;
-       ld8 r30=[r16],16        // load cr.ifs
-       ld8 r25=[r17],16        // load ar.unat
-       ;;
-       ld8 r26=[r16],16        // load ar.pfs
-       ld8 r27=[r17],16        // load ar.rsc
-       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
-       ;;
-       ld8 r24=[r16],16        // load ar.rnat (may be garbage)
-       ld8 r23=[r17],16        // load ar.bspstore (may be garbage)
-       ;;
-       ld8 r31=[r16],16        // load predicates
-       ld8 r21=[r17],16        // load b0
-       ;;
-       ld8 r19=[r16],16        // load ar.rsc value for "loadrs"
-       ld8.fill r1=[r17],16    // load r1
-       ;;
-       ld8.fill r12=[r16],16
-       ld8.fill r13=[r17],16
-(pUStk)        adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
-       ;;
-       ld8 r20=[r16],16        // ar.fpsr
-       ld8.fill r15=[r17],16
-       ;;
-       ld8.fill r14=[r16],16
-       ld8.fill r2=[r17]
-(pUStk)        mov r17=1
-       ;;
-       ld8.fill r3=[r16]
-(pUStk)        st1 [r18]=r17           // restore current->thread.on_ustack
-       shr.u r18=r19,16        // get byte size of existing "dirty" partition
-       ;;
-       mov r16=ar.bsp          // get existing backing store pointer
-#ifdef XEN
-       movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
-#else
-       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-#endif
-       ;;
-       ld4 r17=[r17]           // r17 = cpu_data->phys_stacked_size_p8
-(pKStk)        br.cond.dpnt skip_rbs_switch
-
-       /*
-        * Restore user backing store.
-        *
-        * NOTE: alloc, loadrs, and cover can't be predicated.
-        */
-(pNonSys) br.cond.dpnt dont_preserve_current_frame
-
-rbs_switch:
-       cover                           // add current frame into dirty 
partition and set cr.ifs
-       ;;
-       mov r19=ar.bsp                  // get new backing store pointer
-       sub r16=r16,r18                 // krbs = old bsp - size of dirty 
partition
-       cmp.ne p9,p0=r0,r0              // clear p9 to skip restore of cr.ifs
-       ;;
-       sub r19=r19,r16                 // calculate total byte size of dirty 
partition
-       add r18=64,r18                  // don't force in0-in7 into memory...
-       ;;
-       shl r19=r19,16                  // shift size of dirty partition into 
loadrs position
-       ;;
-dont_preserve_current_frame:
-       /*
-        * To prevent leaking bits between the kernel and user-space,
-        * we must clear the stacked registers in the "invalid" partition here.
-        * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
-        * 5 registers/cycle on McKinley).
-        */
-#      define pRecurse p6
-#      define pReturn  p7
-#ifdef CONFIG_ITANIUM
-#      define Nregs    10
-#else
-#      define Nregs    14
-#endif
-       alloc loc0=ar.pfs,2,Nregs-2,2,0
-       shr.u loc1=r18,9                // RNaTslots <= floor(dirtySize / 
(64*8))
-       sub r17=r17,r18                 // r17 = (physStackedSize + 8) - 
dirtySize
-       ;;
-       mov ar.rsc=r19                  // load ar.rsc to be used for "loadrs"
-       shladd in0=loc1,3,r17
-       mov in1=0
-       ;;
-       TEXT_ALIGN(32)
-rse_clear_invalid:
-#ifdef CONFIG_ITANIUM
-       // cycle 0
- { .mii
-       alloc loc0=ar.pfs,2,Nregs-2,2,0
-       cmp.lt pRecurse,p0=Nregs*8,in0  // if more than Nregs regs left to 
clear, (re)curse
-       add out0=-Nregs*8,in0
-}{ .mfb
-       add out1=1,in1                  // increment recursion count
-       nop.f 0
-       nop.b 0                         // can't do br.call here because of 
alloc (WAW on CFM)
-       ;;
-}{ .mfi        // cycle 1
-       mov loc1=0
-       nop.f 0
-       mov loc2=0
-}{ .mib
-       mov loc3=0
-       mov loc4=0
-(pRecurse) br.call.sptk.many b0=rse_clear_invalid
-
-}{ .mfi        // cycle 2
-       mov loc5=0
-       nop.f 0
-       cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
-}{ .mib
-       mov loc6=0
-       mov loc7=0
-(pReturn) br.ret.sptk.many b0
-}
-#else /* !CONFIG_ITANIUM */
-       alloc loc0=ar.pfs,2,Nregs-2,2,0
-       cmp.lt pRecurse,p0=Nregs*8,in0  // if more than Nregs regs left to 
clear, (re)curse
-       add out0=-Nregs*8,in0
-       add out1=1,in1                  // increment recursion count
-       mov loc1=0
-       mov loc2=0
-       ;;
-       mov loc3=0
-       mov loc4=0
-       mov loc5=0
-       mov loc6=0
-       mov loc7=0
-(pRecurse) br.call.sptk.few b0=rse_clear_invalid
-       ;;
-       mov loc8=0
-       mov loc9=0
-       cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
-       mov loc10=0
-       mov loc11=0
-(pReturn) br.ret.sptk.many b0
-#endif /* !CONFIG_ITANIUM */
-#      undef pRecurse
-#      undef pReturn
-       ;;
-       alloc r17=ar.pfs,0,0,0,0        // drop current register frame
-       ;;
-       loadrs
-       ;;
-skip_rbs_switch:
-       mov ar.unat=r25         // M2
-(pKStk)        extr.u r22=r22,21,1     // I0 extract current value of psr.pp 
from r22
-(pLvSys)mov r19=r0             // A  clear r19 for leave_syscall, no-op 
otherwise
-       ;;
-(pUStk)        mov ar.bspstore=r23     // M2
-(pKStk)        dep r29=r22,r29,21,1    // I0 update ipsr.pp with psr.pp
-(pLvSys)mov r16=r0             // A  clear r16 for leave_syscall, no-op 
otherwise
-       ;;
-       mov cr.ipsr=r29         // M2
-       mov ar.pfs=r26          // I0
-(pLvSys)mov r17=r0             // A  clear r17 for leave_syscall, no-op 
otherwise
-
-(p9)   mov cr.ifs=r30          // M2
-       mov b0=r21              // I0
-(pLvSys)mov r18=r0             // A  clear r18 for leave_syscall, no-op 
otherwise
-
-       mov ar.fpsr=r20         // M2
-       mov cr.iip=r28          // M2
-       nop 0
-       ;;
-(pUStk)        mov ar.rnat=r24         // M2 must happen with RSE in lazy mode
-       nop 0
-(pLvSys)mov r2=r0
-
-       mov ar.rsc=r27          // M2
-       mov pr=r31,-1           // I0
-       rfi                     // B
-
-#ifndef XEN
-       /*
-        * On entry:
-        *      r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
-        *      r31 = current->thread_info->flags
-        * On exit:
-        *      p6 = TRUE if work-pending-check needs to be redone
-        */
-.work_pending_syscall:
-       add r2=-8,r2
-       add r3=-8,r3
-       ;;
-       st8 [r2]=r8
-       st8 [r3]=r10
-.work_pending:
-       tbit.nz p6,p0=r31,TIF_SIGDELAYED                // signal delayed from  
MCA/INIT/NMI/PMI context?
-(p6)   br.cond.sptk.few .sigdelayed
-       ;;
-       tbit.z p6,p0=r31,TIF_NEED_RESCHED               // 
current_thread_info()->need_resched==0?
-(p6)   br.cond.sptk.few .notify
-#ifdef CONFIG_PREEMPT
-(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
-       ;;
-(pKStk) st4 [r20]=r21
-       ssm psr.i               // enable interrupts
-#endif
-       br.call.spnt.many rp=schedule
-.ret9: cmp.eq p6,p0=r0,r0                              // p6 <- 1
-       rsm psr.i               // disable interrupts
-       ;;
-#ifdef CONFIG_PREEMPT
-(pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-       ;;
-(pKStk)        st4 [r20]=r0            // preempt_count() <- 0
-#endif
-(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
-       br.cond.sptk.many .work_processed_kernel        // re-check
-
-.notify:
-(pUStk)        br.call.spnt.many rp=notify_resume_user
-.ret10:        cmp.ne p6,p0=r0,r0                              // p6 <- 0
-(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
-       br.cond.sptk.many .work_processed_kernel        // don't re-check
-
-// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context 
where
-// it could not be delivered.  Deliver it now.  The signal might be for us and
-// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
-// signal.
-
-.sigdelayed:
-       br.call.sptk.many rp=do_sigdelayed
-       cmp.eq p6,p0=r0,r0                              // p6 <- 1, always 
re-check
-(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
-       br.cond.sptk.many .work_processed_kernel        // re-check
-
-.work_pending_syscall_end:
-       adds r2=PT(R8)+16,r12
-       adds r3=PT(R10)+16,r12
-       ;;
-       ld8 r8=[r2]
-       ld8 r10=[r3]
-       br.cond.sptk.many .work_processed_syscall       // re-check
-#endif
-
-END(ia64_leave_kernel)
-
-ENTRY(handle_syscall_error)
-       /*
-        * Some system calls (e.g., ptrace, mmap) can return arbitrary values 
which could
-        * lead us to mistake a negative return value as a failed syscall.  
Those syscall
-        * must deposit a non-zero value in pt_regs.r8 to indicate an error.  If
-        * pt_regs.r8 is zero, we assume that the call completed successfully.
-        */
-       PT_REGS_UNWIND_INFO(0)
-       ld8 r3=[r2]             // load pt_regs.r8
-       ;;
-       cmp.eq p6,p7=r3,r0      // is pt_regs.r8==0?
-       ;;
-(p7)   mov r10=-1
-(p7)   sub r8=0,r8             // negate return value to get errno
-       br.cond.sptk ia64_leave_syscall
-END(handle_syscall_error)
-
-       /*
-        * Invoke schedule_tail(task) while preserving in0-in7, which may be 
needed
-        * in case a system call gets restarted.
-        */
-GLOBAL_ENTRY(ia64_invoke_schedule_tail)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
-       alloc loc1=ar.pfs,8,2,1,0
-       mov loc0=rp
-       mov out0=r8                             // Address of previous task
-       ;;
-       br.call.sptk.many rp=schedule_tail
-.ret11:        mov ar.pfs=loc1
-       mov rp=loc0
-       br.ret.sptk.many rp
-END(ia64_invoke_schedule_tail)
-
-#ifndef XEN
-       /*
-        * Setup stack and call do_notify_resume_user().  Note that pSys and 
pNonSys need to
-        * be set up by the caller.  We declare 8 input registers so the system 
call
-        * args get preserved, in case we need to restart a system call.
-        */
-ENTRY(notify_resume_user)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
-       alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of 
syscall restart!
-       mov r9=ar.unat
-       mov loc0=rp                             // save return address
-       mov out0=0                              // there is no "oldset"
-       adds out1=8,sp                          // out1=&sigscratch->ar_pfs
-(pSys) mov out2=1                              // out2==1 => we're in a syscall
-       ;;
-(pNonSys) mov out2=0                           // out2==0 => not a syscall
-       .fframe 16
-       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
-       st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
-       st8 [out1]=loc1,-8                      // save ar.pfs, out1=&sigscratch
-       .body
-       br.call.sptk.many rp=do_notify_resume_user
-.ret15:        .restore sp
-       adds sp=16,sp                           // pop scratch stack space
-       ;;
-       ld8 r9=[sp]                             // load new unat from 
sigscratch->scratch_unat
-       mov rp=loc0
-       ;;
-       mov ar.unat=r9
-       mov ar.pfs=loc1
-       br.ret.sptk.many rp
-END(notify_resume_user)
-
-GLOBAL_ENTRY(sys_rt_sigsuspend)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
-       alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of 
syscall restart!
-       mov r9=ar.unat
-       mov loc0=rp                             // save return address
-       mov out0=in0                            // mask
-       mov out1=in1                            // sigsetsize
-       adds out2=8,sp                          // out2=&sigscratch->ar_pfs
-       ;;
-       .fframe 16
-       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
-       st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
-       st8 [out2]=loc1,-8                      // save ar.pfs, out2=&sigscratch
-       .body
-       br.call.sptk.many rp=ia64_rt_sigsuspend
-.ret17:        .restore sp
-       adds sp=16,sp                           // pop scratch stack space
-       ;;
-       ld8 r9=[sp]                             // load new unat from 
sw->caller_unat
-       mov rp=loc0
-       ;;
-       mov ar.unat=r9
-       mov ar.pfs=loc1
-       br.ret.sptk.many rp
-END(sys_rt_sigsuspend)
-
-ENTRY(sys_rt_sigreturn)
-       PT_REGS_UNWIND_INFO(0)
-       /*
-        * Allocate 8 input registers since ptrace() may clobber them
-        */
-       alloc r2=ar.pfs,8,0,1,0
-       .prologue
-       PT_REGS_SAVES(16)
-       adds sp=-16,sp
-       .body
-       cmp.eq pNonSys,pSys=r0,r0               // sigreturn isn't a normal 
syscall...
-       ;;
-       /*
-        * leave_kernel() restores f6-f11 from pt_regs, but since the 
streamlined
-        * syscall-entry path does not save them we save them here instead.  
Note: we
-        * don't need to save any other registers that are not saved by the 
stream-lined
-        * syscall path, because restore_sigcontext() restores them.
-        */
-       adds r16=PT(F6)+32,sp
-       adds r17=PT(F7)+32,sp
-       ;;
-       stf.spill [r16]=f6,32
-       stf.spill [r17]=f7,32
-       ;;
-       stf.spill [r16]=f8,32
-       stf.spill [r17]=f9,32
-       ;;
-       stf.spill [r16]=f10
-       stf.spill [r17]=f11
-       adds out0=16,sp                         // out0 = &sigscratch
-       br.call.sptk.many rp=ia64_rt_sigreturn
-.ret19:        .restore sp 0
-       adds sp=16,sp
-       ;;
-       ld8 r9=[sp]                             // load new ar.unat
-       mov.sptk b7=r8,ia64_leave_kernel
-       ;;
-       mov ar.unat=r9
-       br.many b7
-END(sys_rt_sigreturn)
-#endif
-
-GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
-       .prologue
-       /*
-        * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
-        */
-       mov r16=r0
-       DO_SAVE_SWITCH_STACK
-       br.call.sptk.many rp=ia64_handle_unaligned      // stack frame setup in 
ivt
-.ret21:        .body
-       DO_LOAD_SWITCH_STACK
-       br.cond.sptk.many rp                            // goes to 
ia64_leave_kernel
-END(ia64_prepare_handle_unaligned)
-
-#ifndef XEN
-       //
-       // unw_init_running(void (*callback)(info, arg), void *arg)
-       //
-#      define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
-
-GLOBAL_ENTRY(unw_init_running)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
-       alloc loc1=ar.pfs,2,3,3,0
-       ;;
-       ld8 loc2=[in0],8
-       mov loc0=rp
-       mov r16=loc1
-       DO_SAVE_SWITCH_STACK
-       .body
-
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
-       .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
-       SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
-       adds sp=-EXTRA_FRAME_SIZE,sp
-       .body
-       ;;
-       adds out0=16,sp                         // &info
-       mov out1=r13                            // current
-       adds out2=16+EXTRA_FRAME_SIZE,sp        // &switch_stack
-       br.call.sptk.many rp=unw_init_frame_info
-1:     adds out0=16,sp                         // &info
-       mov b6=loc2
-       mov loc2=gp                             // save gp across indirect 
function call
-       ;;
-       ld8 gp=[in0]
-       mov out1=in1                            // arg
-       br.call.sptk.many rp=b6                 // invoke the callback function
-1:     mov gp=loc2                             // restore gp
-
-       // For now, we don't allow changing registers from within
-       // unw_init_running; if we ever want to allow that, we'd
-       // have to do a load_switch_stack here:
-       .restore sp
-       adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
-
-       mov ar.pfs=loc1
-       mov rp=loc0
-       br.ret.sptk.many rp
-END(unw_init_running)
-
-       .rodata
-       .align 8
-       .globl sys_call_table
-sys_call_table:
-       data8 sys_ni_syscall            //  This must be sys_ni_syscall!  See 
ivt.S.
-       data8 sys_exit                          // 1025
-       data8 sys_read
-       data8 sys_write
-       data8 sys_open
-       data8 sys_close
-       data8 sys_creat                         // 1030
-       data8 sys_link
-       data8 sys_unlink
-       data8 ia64_execve
-       data8 sys_chdir
-       data8 sys_fchdir                        // 1035
-       data8 sys_utimes
-       data8 sys_mknod
-       data8 sys_chmod
-       data8 sys_chown
-       data8 sys_lseek                         // 1040
-       data8 sys_getpid
-       data8 sys_getppid
-       data8 sys_mount
-       data8 sys_umount
-       data8 sys_setuid                        // 1045
-       data8 sys_getuid
-       data8 sys_geteuid
-       data8 sys_ptrace
-       data8 sys_access
-       data8 sys_sync                          // 1050
-       data8 sys_fsync
-       data8 sys_fdatasync
-       data8 sys_kill
-       data8 sys_rename
-       data8 sys_mkdir                         // 1055
-       data8 sys_rmdir
-       data8 sys_dup
-       data8 sys_pipe
-       data8 sys_times
-       data8 ia64_brk                          // 1060
-       data8 sys_setgid
-       data8 sys_getgid
-       data8 sys_getegid
-       data8 sys_acct
-       data8 sys_ioctl                         // 1065
-       data8 sys_fcntl
-       data8 sys_umask
-       data8 sys_chroot
-       data8 sys_ustat
-       data8 sys_dup2                          // 1070
-       data8 sys_setreuid
-       data8 sys_setregid
-       data8 sys_getresuid
-       data8 sys_setresuid
-       data8 sys_getresgid                     // 1075
-       data8 sys_setresgid
-       data8 sys_getgroups
-       data8 sys_setgroups
-       data8 sys_getpgid
-       data8 sys_setpgid                       // 1080
-       data8 sys_setsid
-       data8 sys_getsid
-       data8 sys_sethostname
-       data8 sys_setrlimit
-       data8 sys_getrlimit                     // 1085
-       data8 sys_getrusage
-       data8 sys_gettimeofday
-       data8 sys_settimeofday
-       data8 sys_select
-       data8 sys_poll                          // 1090
-       data8 sys_symlink
-       data8 sys_readlink
-       data8 sys_uselib
-       data8 sys_swapon
-       data8 sys_swapoff                       // 1095
-       data8 sys_reboot
-       data8 sys_truncate
-       data8 sys_ftruncate
-       data8 sys_fchmod
-       data8 sys_fchown                        // 1100
-       data8 ia64_getpriority
-       data8 sys_setpriority
-       data8 sys_statfs
-       data8 sys_fstatfs
-       data8 sys_gettid                        // 1105
-       data8 sys_semget
-       data8 sys_semop
-       data8 sys_semctl
-       data8 sys_msgget
-       data8 sys_msgsnd                        // 1110
-       data8 sys_msgrcv
-       data8 sys_msgctl
-       data8 sys_shmget
-       data8 ia64_shmat
-       data8 sys_shmdt                         // 1115
-       data8 sys_shmctl
-       data8 sys_syslog
-       data8 sys_setitimer
-       data8 sys_getitimer
-       data8 sys_ni_syscall                    // 1120         /* was: 
ia64_oldstat */
-       data8 sys_ni_syscall                                    /* was: 
ia64_oldlstat */
-       data8 sys_ni_syscall                                    /* was: 
ia64_oldfstat */
-       data8 sys_vhangup
-       data8 sys_lchown
-       data8 sys_remap_file_pages              // 1125
-       data8 sys_wait4
-       data8 sys_sysinfo
-       data8 sys_clone
-       data8 sys_setdomainname
-       data8 sys_newuname                      // 1130
-       data8 sys_adjtimex
-       data8 sys_ni_syscall                                    /* was: 
ia64_create_module */
-       data8 sys_init_module
-       data8 sys_delete_module
-       data8 sys_ni_syscall                    // 1135         /* was: 
sys_get_kernel_syms */
-       data8 sys_ni_syscall                                    /* was: 
sys_query_module */
-       data8 sys_quotactl
-       data8 sys_bdflush
-       data8 sys_sysfs
-       data8 sys_personality                   // 1140
-       data8 sys_ni_syscall            // sys_afs_syscall
-       data8 sys_setfsuid
-       data8 sys_setfsgid
-       data8 sys_getdents
-       data8 sys_flock                         // 1145
-       data8 sys_readv
-       data8 sys_writev
-       data8 sys_pread64
-       data8 sys_pwrite64
-       data8 sys_sysctl                        // 1150
-       data8 sys_mmap
-       data8 sys_munmap
-       data8 sys_mlock
-       data8 sys_mlockall
-       data8 sys_mprotect                      // 1155
-       data8 ia64_mremap
-       data8 sys_msync
-       data8 sys_munlock
-       data8 sys_munlockall
-       data8 sys_sched_getparam                // 1160
-       data8 sys_sched_setparam
-       data8 sys_sched_getscheduler
-       data8 sys_sched_setscheduler
-       data8 sys_sched_yield
-       data8 sys_sched_get_priority_max        // 1165
-       data8 sys_sched_get_priority_min
-       data8 sys_sched_rr_get_interval
-       data8 sys_nanosleep
-       data8 sys_nfsservctl
-       data8 sys_prctl                         // 1170
-       data8 sys_getpagesize
-       data8 sys_mmap2
-       data8 sys_pciconfig_read
-       data8 sys_pciconfig_write
-       data8 sys_perfmonctl                    // 1175
-       data8 sys_sigaltstack
-       data8 sys_rt_sigaction
-       data8 sys_rt_sigpending
-       data8 sys_rt_sigprocmask
-       data8 sys_rt_sigqueueinfo               // 1180
-       data8 sys_rt_sigreturn
-       data8 sys_rt_sigsuspend
-       data8 sys_rt_sigtimedwait
-       data8 sys_getcwd
-       data8 sys_capget                        // 1185
-       data8 sys_capset
-       data8 sys_sendfile64
-       data8 sys_ni_syscall            // sys_getpmsg (STREAMS)
-       data8 sys_ni_syscall            // sys_putpmsg (STREAMS)
-       data8 sys_socket                        // 1190
-       data8 sys_bind
-       data8 sys_connect
-       data8 sys_listen
-       data8 sys_accept
-       data8 sys_getsockname                   // 1195
-       data8 sys_getpeername
-       data8 sys_socketpair
-       data8 sys_send
-       data8 sys_sendto
-       data8 sys_recv                          // 1200
-       data8 sys_recvfrom
-       data8 sys_shutdown
-       data8 sys_setsockopt
-       data8 sys_getsockopt
-       data8 sys_sendmsg                       // 1205
-       data8 sys_recvmsg
-       data8 sys_pivot_root
-       data8 sys_mincore
-       data8 sys_madvise
-       data8 sys_newstat                       // 1210
-       data8 sys_newlstat
-       data8 sys_newfstat
-       data8 sys_clone2
-       data8 sys_getdents64
-       data8 sys_getunwind                     // 1215
-       data8 sys_readahead
-       data8 sys_setxattr
-       data8 sys_lsetxattr
-       data8 sys_fsetxattr
-       data8 sys_getxattr                      // 1220
-       data8 sys_lgetxattr
-       data8 sys_fgetxattr
-       data8 sys_listxattr
-       data8 sys_llistxattr
-       data8 sys_flistxattr                    // 1225
-       data8 sys_removexattr
-       data8 sys_lremovexattr
-       data8 sys_fremovexattr
-       data8 sys_tkill
-       data8 sys_futex                         // 1230
-       data8 sys_sched_setaffinity
-       data8 sys_sched_getaffinity
-       data8 sys_set_tid_address
-       data8 sys_fadvise64_64
-       data8 sys_tgkill                        // 1235
-       data8 sys_exit_group
-       data8 sys_lookup_dcookie
-       data8 sys_io_setup
-       data8 sys_io_destroy
-       data8 sys_io_getevents                  // 1240
-       data8 sys_io_submit
-       data8 sys_io_cancel
-       data8 sys_epoll_create
-       data8 sys_epoll_ctl
-       data8 sys_epoll_wait                    // 1245
-       data8 sys_restart_syscall
-       data8 sys_semtimedop
-       data8 sys_timer_create
-       data8 sys_timer_settime
-       data8 sys_timer_gettime                 // 1250
-       data8 sys_timer_getoverrun
-       data8 sys_timer_delete
-       data8 sys_clock_settime
-       data8 sys_clock_gettime
-       data8 sys_clock_getres                  // 1255
-       data8 sys_clock_nanosleep
-       data8 sys_fstatfs64
-       data8 sys_statfs64
-       data8 sys_mbind
-       data8 sys_get_mempolicy                 // 1260
-       data8 sys_set_mempolicy
-       data8 sys_mq_open
-       data8 sys_mq_unlink
-       data8 sys_mq_timedsend
-       data8 sys_mq_timedreceive               // 1265
-       data8 sys_mq_notify
-       data8 sys_mq_getsetattr
-       data8 sys_ni_syscall                    // reserved for kexec_load
-       data8 sys_ni_syscall                    // reserved for vserver
-       data8 sys_waitid                        // 1270
-       data8 sys_add_key
-       data8 sys_request_key
-       data8 sys_keyctl
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall                    // 1275
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
-
-       .org sys_call_table + 8*NR_syscalls     // guard against failures to 
increase NR_syscalls
-#endif
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/entry.h
--- a/xen/arch/ia64/entry.h     Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,97 +0,0 @@
-#include <linux/config.h>
-
-/*
- * Preserved registers that are shared between code in ivt.S and
- * entry.S.  Be careful not to step on these!
- */
-#define PRED_LEAVE_SYSCALL     1 /* TRUE iff leave from syscall */
-#define PRED_KERNEL_STACK      2 /* returning to kernel-stacks? */
-#define PRED_USER_STACK                3 /* returning to user-stacks? */
-#ifdef CONFIG_VTI
-#define PRED_EMUL              2 /* Need to save r4-r7 for inst emulation */
-#define PRED_NON_EMUL          3 /* No need to save r4-r7 for normal path */
-#define PRED_BN0               6 /* Guest is in bank 0 */
-#define PRED_BN1               7 /* Guest is in bank 1 */
-#endif // CONFIG_VTI
-#define PRED_SYSCALL           4 /* inside a system call? */
-#define PRED_NON_SYSCALL       5 /* complement of PRED_SYSCALL */
-
-#ifdef __ASSEMBLY__
-# define PASTE2(x,y)   x##y
-# define PASTE(x,y)    PASTE2(x,y)
-
-# define pLvSys                PASTE(p,PRED_LEAVE_SYSCALL)
-# define pKStk         PASTE(p,PRED_KERNEL_STACK)
-# define pUStk         PASTE(p,PRED_USER_STACK)
-#ifdef CONFIG_VTI
-# define pEml          PASTE(p,PRED_EMUL)
-# define pNonEml       PASTE(p,PRED_NON_EMUL)
-# define pBN0          PASTE(p,PRED_BN0)
-# define pBN1          PASTE(p,PRED_BN1)
-#endif // CONFIG_VTI
-# define pSys          PASTE(p,PRED_SYSCALL)
-# define pNonSys       PASTE(p,PRED_NON_SYSCALL)
-#endif
-
-#define PT(f)          (IA64_PT_REGS_##f##_OFFSET)
-#define SW(f)          (IA64_SWITCH_STACK_##f##_OFFSET)
-#ifdef CONFIG_VTI
-#define VPD(f)      (VPD_##f##_START_OFFSET)
-#endif // CONFIG_VTI
-
-#define PT_REGS_SAVES(off)                     \
-       .unwabi 3, 'i';                         \
-       .fframe IA64_PT_REGS_SIZE+16+(off);     \
-       .spillsp rp, PT(CR_IIP)+16+(off);       \
-       .spillsp ar.pfs, PT(CR_IFS)+16+(off);   \
-       .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
-       .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
-       .spillsp pr, PT(PR)+16+(off);
-
-#define PT_REGS_UNWIND_INFO(off)               \
-       .prologue;                              \
-       PT_REGS_SAVES(off);                     \
-       .body
-
-#define SWITCH_STACK_SAVES(off)                                                
        \
-       .savesp ar.unat,SW(CALLER_UNAT)+16+(off);                               
\
-       .savesp ar.fpsr,SW(AR_FPSR)+16+(off);                                   
\
-       .spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off);               
\
-       .spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off);               
\
-       .spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off);           
\
-       .spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off);           
\
-       .spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off);           
\
-       .spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off);           
\
-       .spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off);           
\
-       .spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off);           
\
-       .spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off);           
\
-       .spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off);           
\
-       .spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off);               
\
-       .spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off);               
\
-       .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off);               
\
-       .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off);               
\
-       .spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off);               
\
-       .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); 
\
-       .spillsp @priunat,SW(AR_UNAT)+16+(off);                                 
\
-       .spillsp ar.rnat,SW(AR_RNAT)+16+(off);                                  
\
-       .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);                          
\
-       .spillsp pr,SW(PR)+16+(off))
-
-#define DO_SAVE_SWITCH_STACK                   \
-       movl r28=1f;                            \
-       ;;                                      \
-       .fframe IA64_SWITCH_STACK_SIZE;         \
-       adds sp=-IA64_SWITCH_STACK_SIZE,sp;     \
-       mov.ret.sptk b7=r28,1f;                 \
-       SWITCH_STACK_SAVES(0);                  \
-       br.cond.sptk.many save_switch_stack;    \
-1:
-
-#define DO_LOAD_SWITCH_STACK                   \
-       movl r28=1f;                            \
-       ;;                                      \
-       invala;                                 \
-       mov.ret.sptk b7=r28,1f;                 \
-       br.cond.sptk.many load_switch_stack;    \
-1:     .restore sp;                            \
-       adds sp=IA64_SWITCH_STACK_SIZE,sp
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/head.S
--- a/xen/arch/ia64/head.S      Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,1026 +0,0 @@
-/*
- * Here is where the ball gets rolling as far as the kernel is concerned.
- * When control is transferred to _start, the bootload has already
- * loaded us to the correct address.  All that's left to do here is
- * to set up the kernel's global pointer and jump to the kernel
- * entry point.
- *
- * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
- * Copyright (C) 1999 Don Dugger <Don.Dugger@xxxxxxxxx>
- * Copyright (C) 2002 Fenghua Yu <fenghua.yu@xxxxxxxxx>
- *   -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/fpu.h>
-#include <asm/kregs.h>
-#include <asm/mmu_context.h>
-#include <asm/offsets.h>
-#include <asm/pal.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-
-       .section __special_page_section,"ax"
-
-       .global empty_zero_page
-empty_zero_page:
-       .skip PAGE_SIZE
-
-       .global swapper_pg_dir
-swapper_pg_dir:
-       .skip PAGE_SIZE
-
-       .rodata
-halt_msg:
-       stringz "Halting kernel\n"
-
-       .text
-
-       .global start_ap
-
-       /*
-        * Start the kernel.  When the bootloader passes control to _start(), 
r28
-        * points to the address of the boot parameter area.  Execution reaches
-        * here in physical mode.
-        */
-GLOBAL_ENTRY(_start)
-start_ap:
-       .prologue
-       .save rp, r0            // terminate unwind chain with a NULL rp
-       .body
-
-       rsm psr.i | psr.ic
-       ;;
-       srlz.i
-       ;;
-       /*
-        * Initialize kernel region registers:
-        *      rr[0]: VHPT enabled, page size = PAGE_SHIFT
-        *      rr[1]: VHPT enabled, page size = PAGE_SHIFT
-        *      rr[2]: VHPT enabled, page size = PAGE_SHIFT
-        *      rr[3]: VHPT enabled, page size = PAGE_SHIFT
-        *      rr[4]: VHPT enabled, page size = PAGE_SHIFT
-        *      rr[5]: VHPT enabled, page size = PAGE_SHIFT
-        *      rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
-        *      rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
-        * We initialize all of them to prevent inadvertently assuming
-        * something about the state of address translation early in boot.
-        */
-       movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r7=(0<<61)
-       movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r9=(1<<61)
-       movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r11=(2<<61)
-       movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r13=(3<<61)
-       movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r15=(4<<61)
-       movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r17=(5<<61)
-       movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-       movl r19=(6<<61)
-       movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-       movl r21=(7<<61)
-       ;;
-       mov rr[r7]=r6
-       mov rr[r9]=r8
-       mov rr[r11]=r10
-       mov rr[r13]=r12
-       mov rr[r15]=r14
-       mov rr[r17]=r16
-       mov rr[r19]=r18
-       mov rr[r21]=r20
-       ;;
-       /*
-        * Now pin mappings into the TLB for kernel text and data
-        */
-       mov r18=KERNEL_TR_PAGE_SHIFT<<2
-       movl r17=KERNEL_START
-       ;;
-       mov cr.itir=r18
-       mov cr.ifa=r17
-       mov r16=IA64_TR_KERNEL
-       mov r3=ip
-       movl r18=PAGE_KERNEL
-       ;;
-       dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
-       ;;
-       or r18=r2,r18
-       ;;
-       srlz.i
-       ;;
-       itr.i itr[r16]=r18
-       ;;
-       itr.d dtr[r16]=r18
-       ;;
-       srlz.i
-
-       /*
-        * Switch into virtual mode:
-        */
-#ifdef CONFIG_VTI
-       movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
-                 |IA64_PSR_DI)
-#else // CONFIG_VTI
-       movl 
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
-                 |IA64_PSR_DI)
-#endif // CONFIG_VTI
-       ;;
-       mov cr.ipsr=r16
-       movl r17=1f
-       ;;
-       mov cr.iip=r17
-       mov cr.ifs=r0
-       ;;
-       rfi
-       ;;
-1:     // now we are in virtual mode
-
-       // set IVT entry point---can't access I/O ports without it
-#ifdef CONFIG_VTI
-    movl r3=vmx_ia64_ivt
-#else // CONFIG_VTI
-       movl r3=ia64_ivt
-#endif // CONFIG_VTI
-       ;;
-       mov cr.iva=r3
-       movl r2=FPSR_DEFAULT
-       ;;
-       srlz.i
-       movl gp=__gp
-
-       mov ar.fpsr=r2
-       ;;
-
-#define isAP   p2      // are we an Application Processor?
-#define isBP   p3      // are we the Bootstrap Processor?
-
-#ifdef CONFIG_SMP
-       /*
-        * Find the init_task for the currently booting CPU.  At poweron, and in
-        * UP mode, task_for_booting_cpu is NULL.
-        */
-       movl r3=task_for_booting_cpu
-       ;;
-       ld8 r3=[r3]
-       movl r2=init_task
-       ;;
-       cmp.eq isBP,isAP=r3,r0
-       ;;
-(isAP) mov r2=r3
-#else
-       movl r2=init_task
-       cmp.eq isBP,isAP=r0,r0
-#endif
-       ;;
-       tpa r3=r2               // r3 == phys addr of task struct
-       mov r16=-1
-(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to 
map it
-
-       // load mapping for stack (virtaddr in r2, physaddr in r3)
-       rsm psr.ic
-       movl r17=PAGE_KERNEL
-       ;;
-       srlz.d
-       dep r18=0,r3,0,12
-       ;;
-       or r18=r17,r18
-#ifdef XEN
-       dep r2=-1,r3,60,4       // IMVA of task
-#else
-       dep r2=-1,r3,61,3       // IMVA of task
-#endif
-       ;;
-       mov r17=rr[r2]
-       shr.u r16=r3,IA64_GRANULE_SHIFT
-       ;;
-       dep r17=0,r17,8,24
-       ;;
-       mov cr.itir=r17
-       mov cr.ifa=r2
-
-       mov r19=IA64_TR_CURRENT_STACK
-       ;;
-       itr.d dtr[r19]=r18
-       ;;
-       ssm psr.ic
-       srlz.d
-       ;;
-
-.load_current:
-       // load the "current" pointer (r13) and ar.k6 with the current task
-#ifdef CONFIG_VTI
-       mov r21=r2              // virtual address
-       ;;
-       bsw.1
-       ;;
-#else // CONFIG_VTI
-       mov IA64_KR(CURRENT)=r2         // virtual address
-       mov IA64_KR(CURRENT_STACK)=r16
-#endif // CONFIG_VTI
-       mov r13=r2
-       /*
-        * Reserve space at the top of the stack for "struct pt_regs".  Kernel 
threads
-        * don't store interesting values in that structure, but the space 
still needs
-        * to be there because time-critical stuff such as the context 
switching can
-        * be implemented more efficiently (for example, __switch_to()
-        * always sets the psr.dfh bit of the task it is switching to).
-        */
-       addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
-       addl r2=IA64_RBS_OFFSET,r2      // initialize the RSE
-       mov ar.rsc=0            // place RSE in enforced lazy mode
-       ;;
-       loadrs                  // clear the dirty partition
-       ;;
-       mov ar.bspstore=r2      // establish the new RSE stack
-       ;;
-       mov ar.rsc=0x3          // place RSE in eager mode
-
-#ifdef XEN
-(isBP) dep r28=-1,r28,60,4     // make address virtual
-#else
-(isBP) dep r28=-1,r28,61,3     // make address virtual
-#endif
-(isBP) movl r2=ia64_boot_param
-       ;;
-(isBP) st8 [r2]=r28            // save the address of the boot param area 
passed by the bootloader
-
-#ifdef CONFIG_SMP
-(isAP) br.call.sptk.many rp=start_secondary
-.ret0:
-(isAP) br.cond.sptk self
-#endif
-
-       // This is executed by the bootstrap processor (bsp) only:
-
-#ifdef CONFIG_IA64_FW_EMU
-       // initialize PAL & SAL emulator:
-       br.call.sptk.many rp=sys_fw_init
-.ret1:
-#endif
-       br.call.sptk.many rp=start_kernel
-.ret2: addl r3=@ltoff(halt_msg),gp
-       ;;
-       alloc r2=ar.pfs,8,0,2,0
-       ;;
-       ld8 out0=[r3]
-       br.call.sptk.many b0=console_print
-
-self:  hint @pause
-       ;;
-       br.sptk.many self               // endless loop
-       ;;
-END(_start)
-
-GLOBAL_ENTRY(ia64_save_debug_regs)
-       alloc r16=ar.pfs,1,0,0,0
-       mov r20=ar.lc                   // preserve ar.lc
-       mov ar.lc=IA64_NUM_DBG_REGS-1
-       mov r18=0
-       add r19=IA64_NUM_DBG_REGS*8,in0
-       ;;
-1:     mov r16=dbr[r18]
-#ifdef CONFIG_ITANIUM
-       ;;
-       srlz.d
-#endif
-       mov r17=ibr[r18]
-       add r18=1,r18
-       ;;
-       st8.nta [in0]=r16,8
-       st8.nta [r19]=r17,8
-       br.cloop.sptk.many 1b
-       ;;
-       mov ar.lc=r20                   // restore ar.lc
-       br.ret.sptk.many rp
-END(ia64_save_debug_regs)
-
-GLOBAL_ENTRY(ia64_load_debug_regs)
-       alloc r16=ar.pfs,1,0,0,0
-       lfetch.nta [in0]
-       mov r20=ar.lc                   // preserve ar.lc
-       add r19=IA64_NUM_DBG_REGS*8,in0
-       mov ar.lc=IA64_NUM_DBG_REGS-1
-       mov r18=-1
-       ;;
-1:     ld8.nta r16=[in0],8
-       ld8.nta r17=[r19],8
-       add r18=1,r18
-       ;;
-       mov dbr[r18]=r16
-#ifdef CONFIG_ITANIUM
-       ;;
-       srlz.d                          // Errata 132 (NoFix status)
-#endif
-       mov ibr[r18]=r17
-       br.cloop.sptk.many 1b
-       ;;
-       mov ar.lc=r20                   // restore ar.lc
-       br.ret.sptk.many rp
-END(ia64_load_debug_regs)
-
-GLOBAL_ENTRY(__ia64_save_fpu)
-       alloc r2=ar.pfs,1,4,0,0
-       adds loc0=96*16-16,in0
-       adds loc1=96*16-16-128,in0
-       ;;
-       stf.spill.nta [loc0]=f127,-256
-       stf.spill.nta [loc1]=f119,-256
-       ;;
-       stf.spill.nta [loc0]=f111,-256
-       stf.spill.nta [loc1]=f103,-256
-       ;;
-       stf.spill.nta [loc0]=f95,-256
-       stf.spill.nta [loc1]=f87,-256
-       ;;
-       stf.spill.nta [loc0]=f79,-256
-       stf.spill.nta [loc1]=f71,-256
-       ;;
-       stf.spill.nta [loc0]=f63,-256
-       stf.spill.nta [loc1]=f55,-256
-       adds loc2=96*16-32,in0
-       ;;
-       stf.spill.nta [loc0]=f47,-256
-       stf.spill.nta [loc1]=f39,-256
-       adds loc3=96*16-32-128,in0
-       ;;
-       stf.spill.nta [loc2]=f126,-256
-       stf.spill.nta [loc3]=f118,-256
-       ;;
-       stf.spill.nta [loc2]=f110,-256
-       stf.spill.nta [loc3]=f102,-256
-       ;;
-       stf.spill.nta [loc2]=f94,-256
-       stf.spill.nta [loc3]=f86,-256
-       ;;
-       stf.spill.nta [loc2]=f78,-256
-       stf.spill.nta [loc3]=f70,-256
-       ;;
-       stf.spill.nta [loc2]=f62,-256
-       stf.spill.nta [loc3]=f54,-256
-       adds loc0=96*16-48,in0
-       ;;
-       stf.spill.nta [loc2]=f46,-256
-       stf.spill.nta [loc3]=f38,-256
-       adds loc1=96*16-48-128,in0
-       ;;
-       stf.spill.nta [loc0]=f125,-256
-       stf.spill.nta [loc1]=f117,-256
-       ;;
-       stf.spill.nta [loc0]=f109,-256
-       stf.spill.nta [loc1]=f101,-256
-       ;;
-       stf.spill.nta [loc0]=f93,-256
-       stf.spill.nta [loc1]=f85,-256
-       ;;
-       stf.spill.nta [loc0]=f77,-256
-       stf.spill.nta [loc1]=f69,-256
-       ;;
-       stf.spill.nta [loc0]=f61,-256
-       stf.spill.nta [loc1]=f53,-256
-       adds loc2=96*16-64,in0
-       ;;
-       stf.spill.nta [loc0]=f45,-256
-       stf.spill.nta [loc1]=f37,-256
-       adds loc3=96*16-64-128,in0
-       ;;
-       stf.spill.nta [loc2]=f124,-256
-       stf.spill.nta [loc3]=f116,-256
-       ;;
-       stf.spill.nta [loc2]=f108,-256
-       stf.spill.nta [loc3]=f100,-256
-       ;;
-       stf.spill.nta [loc2]=f92,-256
-       stf.spill.nta [loc3]=f84,-256
-       ;;
-       stf.spill.nta [loc2]=f76,-256
-       stf.spill.nta [loc3]=f68,-256
-       ;;
-       stf.spill.nta [loc2]=f60,-256
-       stf.spill.nta [loc3]=f52,-256
-       adds loc0=96*16-80,in0
-       ;;
-       stf.spill.nta [loc2]=f44,-256
-       stf.spill.nta [loc3]=f36,-256
-       adds loc1=96*16-80-128,in0
-       ;;
-       stf.spill.nta [loc0]=f123,-256
-       stf.spill.nta [loc1]=f115,-256
-       ;;
-       stf.spill.nta [loc0]=f107,-256
-       stf.spill.nta [loc1]=f99,-256
-       ;;
-       stf.spill.nta [loc0]=f91,-256
-       stf.spill.nta [loc1]=f83,-256
-       ;;
-       stf.spill.nta [loc0]=f75,-256
-       stf.spill.nta [loc1]=f67,-256
-       ;;
-       stf.spill.nta [loc0]=f59,-256
-       stf.spill.nta [loc1]=f51,-256
-       adds loc2=96*16-96,in0
-       ;;
-       stf.spill.nta [loc0]=f43,-256
-       stf.spill.nta [loc1]=f35,-256
-       adds loc3=96*16-96-128,in0
-       ;;
-       stf.spill.nta [loc2]=f122,-256
-       stf.spill.nta [loc3]=f114,-256
-       ;;
-       stf.spill.nta [loc2]=f106,-256
-       stf.spill.nta [loc3]=f98,-256
-       ;;
-       stf.spill.nta [loc2]=f90,-256
-       stf.spill.nta [loc3]=f82,-256
-       ;;
-       stf.spill.nta [loc2]=f74,-256
-       stf.spill.nta [loc3]=f66,-256
-       ;;
-       stf.spill.nta [loc2]=f58,-256
-       stf.spill.nta [loc3]=f50,-256
-       adds loc0=96*16-112,in0
-       ;;
-       stf.spill.nta [loc2]=f42,-256
-       stf.spill.nta [loc3]=f34,-256
-       adds loc1=96*16-112-128,in0
-       ;;
-       stf.spill.nta [loc0]=f121,-256
-       stf.spill.nta [loc1]=f113,-256
-       ;;
-       stf.spill.nta [loc0]=f105,-256
-       stf.spill.nta [loc1]=f97,-256
-       ;;
-       stf.spill.nta [loc0]=f89,-256
-       stf.spill.nta [loc1]=f81,-256
-       ;;
-       stf.spill.nta [loc0]=f73,-256
-       stf.spill.nta [loc1]=f65,-256
-       ;;
-       stf.spill.nta [loc0]=f57,-256
-       stf.spill.nta [loc1]=f49,-256
-       adds loc2=96*16-128,in0
-       ;;
-       stf.spill.nta [loc0]=f41,-256
-       stf.spill.nta [loc1]=f33,-256
-       adds loc3=96*16-128-128,in0
-       ;;
-       stf.spill.nta [loc2]=f120,-256
-       stf.spill.nta [loc3]=f112,-256
-       ;;
-       stf.spill.nta [loc2]=f104,-256
-       stf.spill.nta [loc3]=f96,-256
-       ;;
-       stf.spill.nta [loc2]=f88,-256
-       stf.spill.nta [loc3]=f80,-256
-       ;;
-       stf.spill.nta [loc2]=f72,-256
-       stf.spill.nta [loc3]=f64,-256
-       ;;
-       stf.spill.nta [loc2]=f56,-256
-       stf.spill.nta [loc3]=f48,-256
-       ;;
-       stf.spill.nta [loc2]=f40
-       stf.spill.nta [loc3]=f32
-       br.ret.sptk.many rp
-END(__ia64_save_fpu)
-
-GLOBAL_ENTRY(__ia64_load_fpu)
-       alloc r2=ar.pfs,1,2,0,0
-       adds r3=128,in0
-       adds r14=256,in0
-       adds r15=384,in0
-       mov loc0=512
-       mov loc1=-1024+16
-       ;;
-       ldf.fill.nta f32=[in0],loc0
-       ldf.fill.nta f40=[ r3],loc0
-       ldf.fill.nta f48=[r14],loc0
-       ldf.fill.nta f56=[r15],loc0
-       ;;
-       ldf.fill.nta f64=[in0],loc0
-       ldf.fill.nta f72=[ r3],loc0
-       ldf.fill.nta f80=[r14],loc0
-       ldf.fill.nta f88=[r15],loc0
-       ;;
-       ldf.fill.nta f96=[in0],loc1
-       ldf.fill.nta f104=[ r3],loc1
-       ldf.fill.nta f112=[r14],loc1
-       ldf.fill.nta f120=[r15],loc1
-       ;;
-       ldf.fill.nta f33=[in0],loc0
-       ldf.fill.nta f41=[ r3],loc0
-       ldf.fill.nta f49=[r14],loc0
-       ldf.fill.nta f57=[r15],loc0
-       ;;
-       ldf.fill.nta f65=[in0],loc0
-       ldf.fill.nta f73=[ r3],loc0
-       ldf.fill.nta f81=[r14],loc0
-       ldf.fill.nta f89=[r15],loc0
-       ;;
-       ldf.fill.nta f97=[in0],loc1
-       ldf.fill.nta f105=[ r3],loc1
-       ldf.fill.nta f113=[r14],loc1
-       ldf.fill.nta f121=[r15],loc1
-       ;;
-       ldf.fill.nta f34=[in0],loc0
-       ldf.fill.nta f42=[ r3],loc0
-       ldf.fill.nta f50=[r14],loc0
-       ldf.fill.nta f58=[r15],loc0
-       ;;
-       ldf.fill.nta f66=[in0],loc0
-       ldf.fill.nta f74=[ r3],loc0
-       ldf.fill.nta f82=[r14],loc0
-       ldf.fill.nta f90=[r15],loc0
-       ;;
-       ldf.fill.nta f98=[in0],loc1
-       ldf.fill.nta f106=[ r3],loc1
-       ldf.fill.nta f114=[r14],loc1
-       ldf.fill.nta f122=[r15],loc1
-       ;;
-       ldf.fill.nta f35=[in0],loc0
-       ldf.fill.nta f43=[ r3],loc0
-       ldf.fill.nta f51=[r14],loc0
-       ldf.fill.nta f59=[r15],loc0
-       ;;
-       ldf.fill.nta f67=[in0],loc0
-       ldf.fill.nta f75=[ r3],loc0
-       ldf.fill.nta f83=[r14],loc0
-       ldf.fill.nta f91=[r15],loc0
-       ;;
-       ldf.fill.nta f99=[in0],loc1
-       ldf.fill.nta f107=[ r3],loc1
-       ldf.fill.nta f115=[r14],loc1
-       ldf.fill.nta f123=[r15],loc1
-       ;;
-       ldf.fill.nta f36=[in0],loc0
-       ldf.fill.nta f44=[ r3],loc0
-       ldf.fill.nta f52=[r14],loc0
-       ldf.fill.nta f60=[r15],loc0
-       ;;
-       ldf.fill.nta f68=[in0],loc0
-       ldf.fill.nta f76=[ r3],loc0
-       ldf.fill.nta f84=[r14],loc0
-       ldf.fill.nta f92=[r15],loc0
-       ;;
-       ldf.fill.nta f100=[in0],loc1
-       ldf.fill.nta f108=[ r3],loc1
-       ldf.fill.nta f116=[r14],loc1
-       ldf.fill.nta f124=[r15],loc1
-       ;;
-       ldf.fill.nta f37=[in0],loc0
-       ldf.fill.nta f45=[ r3],loc0
-       ldf.fill.nta f53=[r14],loc0
-       ldf.fill.nta f61=[r15],loc0
-       ;;
-       ldf.fill.nta f69=[in0],loc0
-       ldf.fill.nta f77=[ r3],loc0
-       ldf.fill.nta f85=[r14],loc0
-       ldf.fill.nta f93=[r15],loc0
-       ;;
-       ldf.fill.nta f101=[in0],loc1
-       ldf.fill.nta f109=[ r3],loc1
-       ldf.fill.nta f117=[r14],loc1
-       ldf.fill.nta f125=[r15],loc1
-       ;;
-       ldf.fill.nta f38 =[in0],loc0
-       ldf.fill.nta f46 =[ r3],loc0
-       ldf.fill.nta f54 =[r14],loc0
-       ldf.fill.nta f62 =[r15],loc0
-       ;;
-       ldf.fill.nta f70 =[in0],loc0
-       ldf.fill.nta f78 =[ r3],loc0
-       ldf.fill.nta f86 =[r14],loc0
-       ldf.fill.nta f94 =[r15],loc0
-       ;;
-       ldf.fill.nta f102=[in0],loc1
-       ldf.fill.nta f110=[ r3],loc1
-       ldf.fill.nta f118=[r14],loc1
-       ldf.fill.nta f126=[r15],loc1
-       ;;
-       ldf.fill.nta f39 =[in0],loc0
-       ldf.fill.nta f47 =[ r3],loc0
-       ldf.fill.nta f55 =[r14],loc0
-       ldf.fill.nta f63 =[r15],loc0
-       ;;
-       ldf.fill.nta f71 =[in0],loc0
-       ldf.fill.nta f79 =[ r3],loc0
-       ldf.fill.nta f87 =[r14],loc0
-       ldf.fill.nta f95 =[r15],loc0
-       ;;
-       ldf.fill.nta f103=[in0]
-       ldf.fill.nta f111=[ r3]
-       ldf.fill.nta f119=[r14]
-       ldf.fill.nta f127=[r15]
-       br.ret.sptk.many rp
-END(__ia64_load_fpu)
-
-GLOBAL_ENTRY(__ia64_init_fpu)
-       stf.spill [sp]=f0               // M3
-       mov      f32=f0                 // F
-       nop.b    0
-
-       ldfps    f33,f34=[sp]           // M0
-       ldfps    f35,f36=[sp]           // M1
-       mov      f37=f0                 // F
-       ;;
-
-       setf.s   f38=r0                 // M2
-       setf.s   f39=r0                 // M3
-       mov      f40=f0                 // F
-
-       ldfps    f41,f42=[sp]           // M0
-       ldfps    f43,f44=[sp]           // M1
-       mov      f45=f0                 // F
-
-       setf.s   f46=r0                 // M2
-       setf.s   f47=r0                 // M3
-       mov      f48=f0                 // F
-
-       ldfps    f49,f50=[sp]           // M0
-       ldfps    f51,f52=[sp]           // M1
-       mov      f53=f0                 // F
-
-       setf.s   f54=r0                 // M2
-       setf.s   f55=r0                 // M3
-       mov      f56=f0                 // F
-
-       ldfps    f57,f58=[sp]           // M0
-       ldfps    f59,f60=[sp]           // M1
-       mov      f61=f0                 // F
-
-       setf.s   f62=r0                 // M2
-       setf.s   f63=r0                 // M3
-       mov      f64=f0                 // F
-
-       ldfps    f65,f66=[sp]           // M0
-       ldfps    f67,f68=[sp]           // M1
-       mov      f69=f0                 // F
-
-       setf.s   f70=r0                 // M2
-       setf.s   f71=r0                 // M3
-       mov      f72=f0                 // F
-
-       ldfps    f73,f74=[sp]           // M0
-       ldfps    f75,f76=[sp]           // M1
-       mov      f77=f0                 // F
-
-       setf.s   f78=r0                 // M2
-       setf.s   f79=r0                 // M3
-       mov      f80=f0                 // F
-
-       ldfps    f81,f82=[sp]           // M0
-       ldfps    f83,f84=[sp]           // M1
-       mov      f85=f0                 // F
-
-       setf.s   f86=r0                 // M2
-       setf.s   f87=r0                 // M3
-       mov      f88=f0                 // F
-
-       /*
-        * When the instructions are cached, it would be faster to initialize
-        * the remaining registers with simply mov instructions (F-unit).
-        * This gets the time down to ~29 cycles.  However, this would use up
-        * 33 bundles, whereas continuing with the above pattern yields
-        * 10 bundles and ~30 cycles.
-        */
-
-       ldfps    f89,f90=[sp]           // M0
-       ldfps    f91,f92=[sp]           // M1
-       mov      f93=f0                 // F
-
-       setf.s   f94=r0                 // M2
-       setf.s   f95=r0                 // M3
-       mov      f96=f0                 // F
-
-       ldfps    f97,f98=[sp]           // M0
-       ldfps    f99,f100=[sp]          // M1
-       mov      f101=f0                // F
-
-       setf.s   f102=r0                // M2
-       setf.s   f103=r0                // M3
-       mov      f104=f0                // F
-
-       ldfps    f105,f106=[sp]         // M0
-       ldfps    f107,f108=[sp]         // M1
-       mov      f109=f0                // F
-
-       setf.s   f110=r0                // M2
-       setf.s   f111=r0                // M3
-       mov      f112=f0                // F
-
-       ldfps    f113,f114=[sp]         // M0
-       ldfps    f115,f116=[sp]         // M1
-       mov      f117=f0                // F
-
-       setf.s   f118=r0                // M2
-       setf.s   f119=r0                // M3
-       mov      f120=f0                // F
-
-       ldfps    f121,f122=[sp]         // M0
-       ldfps    f123,f124=[sp]         // M1
-       mov      f125=f0                // F
-
-       setf.s   f126=r0                // M2
-       setf.s   f127=r0                // M3
-       br.ret.sptk.many rp             // F
-END(__ia64_init_fpu)
-
-/*
- * Switch execution mode from virtual to physical
- *
- * Inputs:
- *     r16 = new psr to establish
- * Output:
- *     r19 = old virtual address of ar.bsp
- *     r20 = old virtual address of sp
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_phys)
- {
-       alloc r2=ar.pfs,0,0,0,0
-       rsm psr.i | psr.ic              // disable interrupts and interrupt 
collection
-       mov r15=ip
- }
-       ;;
- {
-       flushrs                         // must be first insn in group
-       srlz.i
- }
-       ;;
-       mov cr.ipsr=r16                 // set new PSR
-       add r3=1f-ia64_switch_mode_phys,r15
-
-       mov r19=ar.bsp
-       mov r20=sp
-       mov r14=rp                      // get return address into a general 
register
-       ;;
-
-       // going to physical mode, use tpa to translate virt->phys
-       tpa r17=r19
-       tpa r3=r3
-       tpa sp=sp
-       tpa r14=r14
-       ;;
-
-       mov r18=ar.rnat                 // save ar.rnat
-       mov ar.bspstore=r17             // this steps on ar.rnat
-       mov cr.iip=r3
-       mov cr.ifs=r0
-       ;;
-       mov ar.rnat=r18                 // restore ar.rnat
-       rfi                             // must be last insn in group
-       ;;
-1:     mov rp=r14
-       br.ret.sptk.many rp
-END(ia64_switch_mode_phys)
-
-/*
- * Switch execution mode from physical to virtual
- *
- * Inputs:
- *     r16 = new psr to establish
- *     r19 = new bspstore to establish
- *     r20 = new sp to establish
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_virt)
- {
-       alloc r2=ar.pfs,0,0,0,0
-       rsm psr.i | psr.ic              // disable interrupts and interrupt 
collection
-       mov r15=ip
- }
-       ;;
- {
-       flushrs                         // must be first insn in group
-       srlz.i
- }
-       ;;
-       mov cr.ipsr=r16                 // set new PSR
-       add r3=1f-ia64_switch_mode_virt,r15
-
-       mov r14=rp                      // get return address into a general 
register
-       ;;
-
-       // going to virtual
-       //   - for code addresses, set upper bits of addr to KERNEL_START
-       //   - for stack addresses, copy from input argument
-       movl r18=KERNEL_START
-       dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
-       dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
-       mov sp=r20
-       ;;
-       or r3=r3,r18
-       or r14=r14,r18
-       ;;
-
-       mov r18=ar.rnat                 // save ar.rnat
-       mov ar.bspstore=r19             // this steps on ar.rnat
-       mov cr.iip=r3
-       mov cr.ifs=r0
-       ;;
-       mov ar.rnat=r18                 // restore ar.rnat
-       rfi                             // must be last insn in group
-       ;;
-1:     mov rp=r14
-       br.ret.sptk.many rp
-END(ia64_switch_mode_virt)
-
-GLOBAL_ENTRY(ia64_delay_loop)
-       .prologue
-{      nop 0                   // work around GAS unwind info generation bug...
-       .save ar.lc,r2
-       mov r2=ar.lc
-       .body
-       ;;
-       mov ar.lc=r32
-}
-       ;;
-       // force loop to be 32-byte aligned (GAS bug means we cannot use .align
-       // inside function body without corrupting unwind info).
-{      nop 0 }
-1:     br.cloop.sptk.few 1b
-       ;;
-       mov ar.lc=r2
-       br.ret.sptk.many rp
-END(ia64_delay_loop)
-
-/*
- * Return a CPU-local timestamp in nano-seconds.  This timestamp is
- * NOT synchronized across CPUs its return value must never be
- * compared against the values returned on another CPU.  The usage in
- * kernel/sched.c ensures that.
- *
- * The return-value of sched_clock() is NOT supposed to wrap-around.
- * If it did, it would cause some scheduling hiccups (at the worst).
- * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
- * that would happen only once every 5+ years.
- *
- * The code below basically calculates:
- *
- *   (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
- *
- * except that the multiplication and the shift are done with 128-bit
- * intermediate precision so that we can produce a full 64-bit result.
- */
-GLOBAL_ENTRY(sched_clock)
-#ifdef XEN
-       movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
-#else
-       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
-#endif
-       mov.m r9=ar.itc         // fetch cycle-counter                          
(35 cyc)
-       ;;
-       ldf8 f8=[r8]
-       ;;
-       setf.sig f9=r9          // certain to stall, so issue it _after_ ldf8...
-       ;;
-       xmpy.lu f10=f9,f8       // calculate low 64 bits of 128-bit product     
(4 cyc)
-       xmpy.hu f11=f9,f8       // calculate high 64 bits of 128-bit product
-       ;;
-       getf.sig r8=f10         //                                              
(5 cyc)
-       getf.sig r9=f11
-       ;;
-       shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
-       br.ret.sptk.many rp
-END(sched_clock)
-
-GLOBAL_ENTRY(start_kernel_thread)
-       .prologue
-       .save rp, r0                            // this is the end of the 
call-chain
-       .body
-       alloc r2 = ar.pfs, 0, 0, 2, 0
-       mov out0 = r9
-       mov out1 = r11;;
-       br.call.sptk.many rp = kernel_thread_helper;;
-       mov out0 = r8
-       br.call.sptk.many rp = sys_exit;;
-1:     br.sptk.few 1b                          // not reached
-END(start_kernel_thread)
-
-#ifdef CONFIG_IA64_BRL_EMU
-
-/*
- *  Assembly routines used by brl_emu.c to set preserved register state.
- */
-
-#define SET_REG(reg)                           \
- GLOBAL_ENTRY(ia64_set_##reg);                 \
-       alloc r16=ar.pfs,1,0,0,0;               \
-       mov reg=r32;                            \
-       ;;                                      \
-       br.ret.sptk.many rp;                    \
- END(ia64_set_##reg)
-
-SET_REG(b1);
-SET_REG(b2);
-SET_REG(b3);
-SET_REG(b4);
-SET_REG(b5);
-
-#endif /* CONFIG_IA64_BRL_EMU */
-
-#ifdef CONFIG_SMP
-       /*
-        * This routine handles spinlock contention.  It uses a non-standard 
calling
-        * convention to avoid converting leaf routines into interior routines. 
 Because
-        * of this special convention, there are several restrictions:
-        *
-        * - do not use gp relative variables, this code is called from the 
kernel
-        *   and from modules, r1 is undefined.
-        * - do not use stacked registers, the caller owns them.
-        * - do not use the scratch stack space, the caller owns it.
-        * - do not use any registers other than the ones listed below
-        *
-        * Inputs:
-        *   ar.pfs - saved CFM of caller
-        *   ar.ccv - 0 (and available for use)
-        *   r27    - flags from spin_lock_irqsave or 0.  Must be preserved.
-        *   r28    - available for use.
-        *   r29    - available for use.
-        *   r30    - available for use.
-        *   r31    - address of lock, available for use.
-        *   b6     - return address
-        *   p14    - available for use.
-        *   p15    - used to track flag status.
-        *
-        * If you patch this code to use more registers, do not forget to update
-        * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
-        */
-
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-
-GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
-       .prologue
-       .save ar.pfs, r0        // this code effectively has a zero frame size
-       .save rp, r28
-       .body
-       nop 0
-       tbit.nz p15,p0=r27,IA64_PSR_I_BIT
-       .restore sp             // pop existing prologue after next insn
-       mov b6 = r28
-       .prologue
-       .save ar.pfs, r0
-       .altrp b6
-       .body
-       ;;
-(p15)  ssm psr.i               // reenable interrupts if they were on
-                               // DavidM says that srlz.d is slow and is not 
required in this case
-.wait:
-       // exponential backoff, kdb, lockmeter etc. go in here
-       hint @pause
-       ld4 r30=[r31]           // don't use ld4.bias; if it's contended, we 
won't write the word
-       nop 0
-       ;;
-       cmp4.ne p14,p0=r30,r0
-(p14)  br.cond.sptk.few .wait
-(p15)  rsm psr.i               // disable interrupts if we reenabled them
-       br.cond.sptk.few b6     // lock is now free, try to acquire
-       .global ia64_spinlock_contention_pre3_4_end     // for kernprof
-ia64_spinlock_contention_pre3_4_end:
-END(ia64_spinlock_contention_pre3_4)
-
-#else
-
-GLOBAL_ENTRY(ia64_spinlock_contention)
-       .prologue
-       .altrp b6
-       .body
-       tbit.nz p15,p0=r27,IA64_PSR_I_BIT
-       ;;
-.wait:
-(p15)  ssm psr.i               // reenable interrupts if they were on
-                               // DavidM says that srlz.d is slow and is not 
required in this case
-.wait2:
-       // exponential backoff, kdb, lockmeter etc. go in here
-       hint @pause
-       ld4 r30=[r31]           // don't use ld4.bias; if it's contended, we 
won't write the word
-       ;;
-       cmp4.ne p14,p0=r30,r0
-       mov r30 = 1
-(p14)  br.cond.sptk.few .wait2
-(p15)  rsm psr.i               // disable interrupts if we reenabled them
-       ;;
-       cmpxchg4.acq r30=[r31], r30, ar.ccv
-       ;;
-       cmp4.ne p14,p0=r0,r30
-(p14)  br.cond.sptk.few .wait
-
-       br.ret.sptk.many b6     // lock is now taken
-END(ia64_spinlock_contention)
-
-#endif
-
-#endif /* CONFIG_SMP */
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/irq_ia64.c
--- a/xen/arch/ia64/irq_ia64.c  Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,381 +0,0 @@
-/*
- * linux/arch/ia64/kernel/irq.c
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *
- *  6/10/99: Updated to bring in sync with x86 version to facilitate
- *          support for SMP and different interrupt controllers.
- *
- * 09/15/00 Goutham Rao <goutham.rao@xxxxxxxxx> Implemented pci_irq_to_vector
- *                      PCI to vector allocation routine.
- * 04/14/2004 Ashok Raj <ashok.raj@xxxxxxxxx>
- *                                             Added CPU Hotplug handling for 
IPF.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-
-#include <linux/jiffies.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel_stat.h>
-#include <linux/slab.h>
-#include <linux/ptrace.h>
-#include <linux/random.h>      /* for rand_initialize_irq() */
-#include <linux/signal.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/threads.h>
-#include <linux/bitops.h>
-
-#include <asm/delay.h>
-#include <asm/intrinsics.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/machvec.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
-#endif
-
-#define IRQ_DEBUG      0
-
-/* default base addr of IPI table */
-void __iomem *ipi_base_addr = ((void __iomem *)
-                              (__IA64_UNCACHED_OFFSET | 
IA64_IPI_DEFAULT_BASE_ADDR));
-
-/*
- * Legacy IRQ to IA-64 vector translation table.
- */
-__u8 isa_irq_to_vector_map[16] = {
-       /* 8259 IRQ translation, first 16 entries */
-       0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
-       0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
-};
-EXPORT_SYMBOL(isa_irq_to_vector_map);
-
-static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
-
-int
-assign_irq_vector (int irq)
-{
-       int pos, vector;
- again:
-       pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
-       vector = IA64_FIRST_DEVICE_VECTOR + pos;
-       if (vector > IA64_LAST_DEVICE_VECTOR)
-               /* XXX could look for sharable vectors instead of panic'ing... 
*/
-               panic("assign_irq_vector: out of interrupt vectors!");
-       if (test_and_set_bit(pos, ia64_vector_mask))
-               goto again;
-       return vector;
-}
-
-void
-free_irq_vector (int vector)
-{
-       int pos;
-
-       if (vector < IA64_FIRST_DEVICE_VECTOR || vector > 
IA64_LAST_DEVICE_VECTOR)
-               return;
-
-       pos = vector - IA64_FIRST_DEVICE_VECTOR;
-       if (!test_and_clear_bit(pos, ia64_vector_mask))
-               printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
-}
-
-#ifdef CONFIG_SMP
-#      define IS_RESCHEDULE(vec)       (vec == IA64_IPI_RESCHEDULE)
-#else
-#      define IS_RESCHEDULE(vec)       (0)
-#endif
-/*
- * That's where the IVT branches when we get an external
- * interrupt. This branches to the correct hardware IRQ handler via
- * function ptr.
- */
-void
-ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-{
-       unsigned long saved_tpr;
-
-#if IRQ_DEBUG
-#ifdef XEN
-       xen_debug_irq(vector, regs);
-#endif
-       {
-               unsigned long bsp, sp;
-
-               /*
-                * Note: if the interrupt happened while executing in
-                * the context switch routine (ia64_switch_to), we may
-                * get a spurious stack overflow here.  This is
-                * because the register and the memory stack are not
-                * switched atomically.
-                */
-               bsp = ia64_getreg(_IA64_REG_AR_BSP);
-               sp = ia64_getreg(_IA64_REG_SP);
-
-               if ((sp - bsp) < 1024) {
-                       static unsigned char count;
-                       static long last_time;
-
-                       if (jiffies - last_time > 5*HZ)
-                               count = 0;
-                       if (++count < 5) {
-                               last_time = jiffies;
-                               printk("ia64_handle_irq: DANGER: less than "
-                                      "1KB of free stack space!!\n"
-                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
-                       }
-               }
-       }
-#endif /* IRQ_DEBUG */
-
-       /*
-        * Always set TPR to limit maximum interrupt nesting depth to
-        * 16 (without this, it would be ~240, which could easily lead
-        * to kernel stack overflows).
-        */
-       irq_enter();
-       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
-       ia64_srlz_d();
-       while (vector != IA64_SPURIOUS_INT_VECTOR) {
-               if (!IS_RESCHEDULE(vector)) {
-                       ia64_setreg(_IA64_REG_CR_TPR, vector);
-                       ia64_srlz_d();
-
-#ifdef XEN
-                       if (!xen_do_IRQ(vector))
-#endif
-                       __do_IRQ(local_vector_to_irq(vector), regs);
-
-                       /*
-                        * Disable interrupts and send EOI:
-                        */
-                       local_irq_disable();
-                       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
-               }
-               ia64_eoi();
-               vector = ia64_get_ivr();
-       }
-       /*
-        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
-        * handler needs to be able to wait for further keyboard interrupts, 
which can't
-        * come through until ia64_eoi() has been done.
-        */
-       irq_exit();
-}
-
-#ifdef  CONFIG_VTI
-#define vmx_irq_enter()                \
-       add_preempt_count(HARDIRQ_OFFSET);
-
-/* Now softirq will be checked when leaving hypervisor, or else
- * scheduler irq will be executed too early.
- */
-#define vmx_irq_exit(void)     \
-       sub_preempt_count(HARDIRQ_OFFSET);
-/*
- * That's where the IVT branches when we get an external
- * interrupt. This branches to the correct hardware IRQ handler via
- * function ptr.
- */
-void
-vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-{
-       unsigned long saved_tpr;
-       int     wake_dom0 = 0;
-
-
-#if IRQ_DEBUG
-       {
-               unsigned long bsp, sp;
-
-               /*
-                * Note: if the interrupt happened while executing in
-                * the context switch routine (ia64_switch_to), we may
-                * get a spurious stack overflow here.  This is
-                * because the register and the memory stack are not
-                * switched atomically.
-                */
-               bsp = ia64_getreg(_IA64_REG_AR_BSP);
-               sp = ia64_getreg(_IA64_REG_AR_SP);
-
-               if ((sp - bsp) < 1024) {
-                       static unsigned char count;
-                       static long last_time;
-
-                       if (jiffies - last_time > 5*HZ)
-                               count = 0;
-                       if (++count < 5) {
-                               last_time = jiffies;
-                               printk("ia64_handle_irq: DANGER: less than "
-                                      "1KB of free stack space!!\n"
-                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
-                       }
-               }
-       }
-#endif /* IRQ_DEBUG */
-
-       /*
-        * Always set TPR to limit maximum interrupt nesting depth to
-        * 16 (without this, it would be ~240, which could easily lead
-        * to kernel stack overflows).
-        */
-       vmx_irq_enter();
-       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
-       ia64_srlz_d();
-       while (vector != IA64_SPURIOUS_INT_VECTOR) {
-           if (!IS_RESCHEDULE(vector)) {
-               ia64_setreg(_IA64_REG_CR_TPR, vector);
-               ia64_srlz_d();
-
-               if (vector != IA64_TIMER_VECTOR) {
-                       /* FIXME: Leave IRQ re-route later */
-                       vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
-                       wake_dom0 = 1;
-               }
-               else {  // FIXME: Handle Timer only now
-                       __do_IRQ(local_vector_to_irq(vector), regs);
-               }
-               
-               /*
-                * Disable interrupts and send EOI:
-                */
-               local_irq_disable();
-               ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
-           }
-           else {
-                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
-            }
-           ia64_eoi();
-           vector = ia64_get_ivr();
-       }
-       /*
-        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
-        * handler needs to be able to wait for further keyboard interrupts, 
which can't
-        * come through until ia64_eoi() has been done.
-        */
-       vmx_irq_exit();
-       if ( wake_dom0 && current != dom0 ) 
-               domain_wake(dom0->vcpu[0]);
-}
-#endif
-
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * This function emulates a interrupt processing when a cpu is about to be
- * brought down.
- */
-void ia64_process_pending_intr(void)
-{
-       ia64_vector vector;
-       unsigned long saved_tpr;
-       extern unsigned int vectors_in_migration[NR_IRQS];
-
-       vector = ia64_get_ivr();
-
-        irq_enter();
-        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
-        ia64_srlz_d();
-
-        /*
-         * Perform normal interrupt style processing
-         */
-       while (vector != IA64_SPURIOUS_INT_VECTOR) {
-               if (!IS_RESCHEDULE(vector)) {
-                       ia64_setreg(_IA64_REG_CR_TPR, vector);
-                       ia64_srlz_d();
-
-                       /*
-                        * Now try calling normal ia64_handle_irq as it would 
have got called
-                        * from a real intr handler. Try passing null for 
pt_regs, hopefully
-                        * it will work. I hope it works!.
-                        * Probably could shared code.
-                        */
-                       vectors_in_migration[local_vector_to_irq(vector)]=0;
-                       __do_IRQ(local_vector_to_irq(vector), NULL);
-
-                       /*
-                        * Disable interrupts and send EOI
-                        */
-                       local_irq_disable();
-                       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
-               }
-               ia64_eoi();
-               vector = ia64_get_ivr();
-       }
-       irq_exit();
-}
-#endif
-
-
-#ifdef CONFIG_SMP
-extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
-
-static struct irqaction ipi_irqaction = {
-       .handler =      handle_IPI,
-       .flags =        SA_INTERRUPT,
-       .name =         "IPI"
-};
-#endif
-
-void
-register_percpu_irq (ia64_vector vec, struct irqaction *action)
-{
-       irq_desc_t *desc;
-       unsigned int irq;
-
-       for (irq = 0; irq < NR_IRQS; ++irq)
-               if (irq_to_vector(irq) == vec) {
-                       desc = irq_descp(irq);
-                       desc->status |= IRQ_PER_CPU;
-                       desc->handler = &irq_type_ia64_lsapic;
-                       if (action)
-                               setup_irq(irq, action);
-               }
-}
-
-void __init
-init_IRQ (void)
-{
-       register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
-#ifdef CONFIG_SMP
-       register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
-#endif
-#ifdef CONFIG_PERFMON
-       pfm_init_percpu();
-#endif
-       platform_irq_init();
-}
-
-void
-ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
-{
-       void __iomem *ipi_addr;
-       unsigned long ipi_data;
-       unsigned long phys_cpu_id;
-
-#ifdef CONFIG_SMP
-       phys_cpu_id = cpu_physical_id(cpu);
-#else
-       phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
-#endif
-
-       /*
-        * cpu number is in 8bit ID and 8bit EID
-        */
-
-       ipi_data = (delivery_mode << 8) | (vector & 0xff);
-       ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
-
-       writeq(ipi_data, ipi_addr);
-}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/mm_contig.c
--- a/xen/arch/ia64/mm_contig.c Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,305 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 2000, Rohit Seth <rohit.seth@xxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
- *
- * Routines used by ia64 machines with contiguous (or virtually contiguous)
- * memory.
- */
-#include <linux/config.h>
-#include <linux/bootmem.h>
-#include <linux/efi.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-
-#include <asm/meminit.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-#include <asm/mca.h>
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long num_dma_physpages;
-#endif
-
-/**
- * show_mem - display a memory statistics summary
- *
- * Just walks the pages in the system and describes where they're allocated.
- */
-#ifndef XEN
-void
-show_mem (void)
-{
-       int i, total = 0, reserved = 0;
-       int shared = 0, cached = 0;
-
-       printk("Mem-info:\n");
-       show_free_areas();
-
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       i = max_mapnr;
-       while (i-- > 0) {
-               if (!pfn_valid(i))
-                       continue;
-               total++;
-               if (PageReserved(mem_map+i))
-                       reserved++;
-               else if (PageSwapCache(mem_map+i))
-                       cached++;
-               else if (page_count(mem_map + i))
-                       shared += page_count(mem_map + i) - 1;
-       }
-       printk("%d pages of RAM\n", total);
-       printk("%d reserved pages\n", reserved);
-       printk("%d pages shared\n", shared);
-       printk("%d pages swap cached\n", cached);
-       printk("%ld pages in page table cache\n", pgtable_cache_size);
-}
-#endif
-
-/* physical address where the bootmem map is located */
-unsigned long bootmap_start;
-
-/**
- * find_max_pfn - adjust the maximum page number callback
- * @start: start of range
- * @end: end of range
- * @arg: address of pointer to global max_pfn variable
- *
- * Passed as a callback function to efi_memmap_walk() to determine the highest
- * available page frame number in the system.
- */
-int
-find_max_pfn (unsigned long start, unsigned long end, void *arg)
-{
-       unsigned long *max_pfnp = arg, pfn;
-
-       pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
-       if (pfn > *max_pfnp)
-               *max_pfnp = pfn;
-       return 0;
-}
-
-/**
- * find_bootmap_location - callback to find a memory area for the bootmap
- * @start: start of region
- * @end: end of region
- * @arg: unused callback data
- *
- * Find a place to put the bootmap and return its starting address in
- * bootmap_start.  This address must be page-aligned.
- */
-int
-find_bootmap_location (unsigned long start, unsigned long end, void *arg)
-{
-       unsigned long needed = *(unsigned long *)arg;
-       unsigned long range_start, range_end, free_start;
-       int i;
-
-#if IGNORE_PFN0
-       if (start == PAGE_OFFSET) {
-               start += PAGE_SIZE;
-               if (start >= end)
-                       return 0;
-       }
-#endif
-
-       free_start = PAGE_OFFSET;
-
-       for (i = 0; i < num_rsvd_regions; i++) {
-               range_start = max(start, free_start);
-               range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
-
-               free_start = PAGE_ALIGN(rsvd_region[i].end);
-
-               if (range_end <= range_start)
-                       continue; /* skip over empty range */
-
-               if (range_end - range_start >= needed) {
-                       bootmap_start = __pa(range_start);
-                       return -1;      /* done */
-               }
-
-               /* nothing more available in this segment */
-               if (range_end == end)
-                       return 0;
-       }
-       return 0;
-}
-
-/**
- * find_memory - setup memory map
- *
- * Walk the EFI memory map and find usable memory for the system, taking
- * into account reserved areas.
- */
-#ifndef XEN
-void
-find_memory (void)
-{
-       unsigned long bootmap_size;
-
-       reserve_memory();
-
-       /* first find highest page frame number */
-       max_pfn = 0;
-       efi_memmap_walk(find_max_pfn, &max_pfn);
-
-       /* how many bytes to cover all the pages */
-       bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
-
-       /* look for a location to hold the bootmap */
-       bootmap_start = ~0UL;
-       efi_memmap_walk(find_bootmap_location, &bootmap_size);
-       if (bootmap_start == ~0UL)
-               panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
-
-       bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
-
-       /* Free all available memory, then mark bootmem-map as being in use. */
-       efi_memmap_walk(filter_rsvd_memory, free_bootmem);
-       reserve_bootmem(bootmap_start, bootmap_size);
-
-       find_initrd();
-}
-#endif
-
-#ifdef CONFIG_SMP
-/**
- * per_cpu_init - setup per-cpu variables
- *
- * Allocate and setup per-cpu data areas.
- */
-void *
-per_cpu_init (void)
-{
-       void *cpu_data;
-       int cpu;
-
-       /*
-        * get_free_pages() cannot be used before cpu_init() done.  BSP
-        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
-        * get_zeroed_page().
-        */
-       if (smp_processor_id() == 0) {
-               cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
-                                          PERCPU_PAGE_SIZE, 
__pa(MAX_DMA_ADDRESS));
-               for (cpu = 0; cpu < NR_CPUS; cpu++) {
-                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - 
__per_cpu_start);
-                       __per_cpu_offset[cpu] = (char *) cpu_data - 
__per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-                       per_cpu(local_per_cpu_offset, cpu) = 
__per_cpu_offset[cpu];
-               }
-       }
-       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-}
-#endif /* CONFIG_SMP */
-
-static int
-count_pages (u64 start, u64 end, void *arg)
-{
-       unsigned long *count = arg;
-
-       *count += (end - start) >> PAGE_SHIFT;
-       return 0;
-}
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static int
-count_dma_pages (u64 start, u64 end, void *arg)
-{
-       unsigned long *count = arg;
-
-       if (start < MAX_DMA_ADDRESS)
-               *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
-       return 0;
-}
-#endif
-
-/*
- * Set up the page tables.
- */
-
-#ifndef XEN
-void
-paging_init (void)
-{
-       unsigned long max_dma;
-       unsigned long zones_size[MAX_NR_ZONES];
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-       unsigned long zholes_size[MAX_NR_ZONES];
-       unsigned long max_gap;
-#endif
-
-       /* initialize mem_map[] */
-
-       memset(zones_size, 0, sizeof(zones_size));
-
-       num_physpages = 0;
-       efi_memmap_walk(count_pages, &num_physpages);
-
-       max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-       memset(zholes_size, 0, sizeof(zholes_size));
-
-       num_dma_physpages = 0;
-       efi_memmap_walk(count_dma_pages, &num_dma_physpages);
-
-       if (max_low_pfn < max_dma) {
-               zones_size[ZONE_DMA] = max_low_pfn;
-               zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
-       } else {
-               zones_size[ZONE_DMA] = max_dma;
-               zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
-               if (num_physpages > num_dma_physpages) {
-                       zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
-                       zholes_size[ZONE_NORMAL] =
-                               ((max_low_pfn - max_dma) -
-                                (num_physpages - num_dma_physpages));
-               }
-       }
-
-       max_gap = 0;
-       efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
-       if (max_gap < LARGE_GAP) {
-               vmem_map = (struct page *) 0;
-               free_area_init_node(0, &contig_page_data, zones_size, 0,
-                                   zholes_size);
-       } else {
-               unsigned long map_size;
-
-               /* allocate virtual_mem_map */
-
-               map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
-               vmalloc_end -= map_size;
-               vmem_map = (struct page *) vmalloc_end;
-               efi_memmap_walk(create_mem_map_page_table, NULL);
-
-               mem_map = contig_page_data.node_mem_map = vmem_map;
-               free_area_init_node(0, &contig_page_data, zones_size,
-                                   0, zholes_size);
-
-               printk("Virtual mem_map starts at 0x%p\n", mem_map);
-       }
-#else /* !CONFIG_VIRTUAL_MEM_MAP */
-       if (max_low_pfn < max_dma)
-               zones_size[ZONE_DMA] = max_low_pfn;
-       else {
-               zones_size[ZONE_DMA] = max_dma;
-               zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
-       }
-       free_area_init(zones_size);
-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
-       zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
-}
-#endif /* !CONFIG_XEN */
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/pal.S
--- a/xen/arch/ia64/pal.S       Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,310 +0,0 @@
-/*
- * PAL Firmware support
- * IA-64 Processor Programmers Reference Vol 2
- *
- * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
- *     David Mosberger <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- *
- * 05/22/2000 eranian Added support for stacked register calls
- * 05/24/2000 eranian Added support for physical mode static calls
- */
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-
-       .data
-pal_entry_point:
-       data8 ia64_pal_default_handler
-       .text
-
-/*
- * Set the PAL entry point address.  This could be written in C code, but we 
do it here
- * to keep it all in one module (besides, it's so trivial that it's
- * not a big deal).
- *
- * in0         Address of the PAL entry point (text address, NOT a function 
descriptor).
- */
-GLOBAL_ENTRY(ia64_pal_handler_init)
-       alloc r3=ar.pfs,1,0,0,0
-       movl r2=pal_entry_point
-       ;;
-       st8 [r2]=in0
-       br.ret.sptk.many rp
-END(ia64_pal_handler_init)
-
-/*
- * Default PAL call handler.  This needs to be coded in assembly because it 
uses
- * the static calling convention, i.e., the RSE may not be used and calls are
- * done via "br.cond" (not "br.call").
- */
-GLOBAL_ENTRY(ia64_pal_default_handler)
-       mov r8=-1
-       br.cond.sptk.many rp
-END(ia64_pal_default_handler)
-
-/*
- * Make a PAL call using the static calling convention.
- *
- * in0         Index of PAL service
- * in1 - in3   Remaining PAL arguments
- * in4        1 ==> clear psr.ic,  0 ==> don't clear psr.ic
- *
- */
-GLOBAL_ENTRY(ia64_pal_call_static)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
-       alloc loc1 = ar.pfs,5,5,0,0
-       movl loc2 = pal_entry_point
-1:     {
-         mov r28 = in0
-         mov r29 = in1
-         mov r8 = ip
-       }
-       ;;
-       ld8 loc2 = [loc2]               // loc2 <- entry point
-       tbit.nz p6,p7 = in4, 0
-       adds r8 = 1f-1b,r8
-       mov loc4=ar.rsc                 // save RSE configuration
-       ;;
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       mov loc3 = psr
-       mov loc0 = rp
-       .body
-       mov r30 = in2
-
-(p6)   rsm psr.i | psr.ic
-       mov r31 = in3
-       mov b7 = loc2
-
-(p7)   rsm psr.i
-       ;;
-(p6)   srlz.i
-       mov rp = r8
-       br.cond.sptk.many b7
-1:     mov psr.l = loc3
-       mov ar.rsc = loc4               // restore RSE configuration
-       mov ar.pfs = loc1
-       mov rp = loc0
-       ;;
-       srlz.d                          // seralize restoration of psr.l
-       br.ret.sptk.many b0
-END(ia64_pal_call_static)
-
-/*
- * Make a PAL call using the stacked registers calling convention.
- *
- * Inputs:
- *     in0         Index of PAL service
- *     in2 - in3   Remaning PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_stacked)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
-       alloc loc1 = ar.pfs,4,4,4,0
-       movl loc2 = pal_entry_point
-
-       mov r28  = in0                  // Index MUST be copied to r28
-       mov out0 = in0                  // AND in0 of PAL function
-       mov loc0 = rp
-       .body
-       ;;
-       ld8 loc2 = [loc2]               // loc2 <- entry point
-       mov out1 = in1
-       mov out2 = in2
-       mov out3 = in3
-       mov loc3 = psr
-       ;;
-       rsm psr.i
-       mov b7 = loc2
-       ;;
-       br.call.sptk.many rp=b7         // now make the call
-.ret0: mov psr.l  = loc3
-       mov ar.pfs = loc1
-       mov rp = loc0
-       ;;
-       srlz.d                          // serialize restoration of psr.l
-       br.ret.sptk.many b0
-END(ia64_pal_call_stacked)
-
-/*
- * Make a physical mode PAL call using the static registers calling convention.
- *
- * Inputs:
- *     in0         Index of PAL service
- *     in2 - in3   Remaning PAL arguments
- *
- * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
- * So we don't need to clear them.
- */
-#define PAL_PSR_BITS_TO_CLEAR                                                  
\
-       (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT  | IA64_PSR_DB | IA64_PSR_RT |  
\
-        IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |                
\
-        IA64_PSR_DFL | IA64_PSR_DFH)
-
-#define PAL_PSR_BITS_TO_SET                                                    
\
-       (IA64_PSR_BN)
-
-
-GLOBAL_ENTRY(ia64_pal_call_phys_static)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
-       alloc loc1 = ar.pfs,4,7,0,0
-       movl loc2 = pal_entry_point
-1:     {
-         mov r28  = in0                // copy procedure index
-         mov r8   = ip                 // save ip to compute branch
-         mov loc0 = rp                 // save rp
-       }
-       .body
-       ;;
-       ld8 loc2 = [loc2]               // loc2 <- entry point
-       mov r29  = in1                  // first argument
-       mov r30  = in2                  // copy arg2
-       mov r31  = in3                  // copy arg3
-       ;;
-       mov loc3 = psr                  // save psr
-       adds r8  = 1f-1b,r8             // calculate return address for call
-       ;;
-       mov loc4=ar.rsc                 // save RSE configuration
-#ifdef XEN
-       dep.z loc2=loc2,0,60            // convert pal entry point to physical
-#else // XEN
-       dep.z loc2=loc2,0,61            // convert pal entry point to physical
-#endif // XEN
-       tpa r8=r8                       // convert rp to physical
-       ;;
-       mov b7 = loc2                   // install target to branch reg
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       movl r16=PAL_PSR_BITS_TO_CLEAR
-       movl r17=PAL_PSR_BITS_TO_SET
-       ;;
-       or loc3=loc3,r17                // add in psr the bits to set
-       ;;
-       andcm r16=loc3,r16              // removes bits to clear from psr
-       br.call.sptk.many rp=ia64_switch_mode_phys
-.ret1: mov rp = r8                     // install return address (physical)
-       mov loc5 = r19
-       mov loc6 = r20
-       br.cond.sptk.many b7
-1:
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       mov r16=loc3                    // r16= original psr
-       mov r19=loc5
-       mov r20=loc6
-       br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret2:
-       mov psr.l = loc3                // restore init PSR
-
-       mov ar.pfs = loc1
-       mov rp = loc0
-       ;;
-       mov ar.rsc=loc4                 // restore RSE configuration
-       srlz.d                          // seralize restoration of psr.l
-       br.ret.sptk.many b0
-END(ia64_pal_call_phys_static)
-
-/*
- * Make a PAL call using the stacked registers in physical mode.
- *
- * Inputs:
- *     in0         Index of PAL service
- *     in2 - in3   Remaning PAL arguments
- */
-GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
-       .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
-       alloc   loc1 = ar.pfs,5,7,4,0
-       movl    loc2 = pal_entry_point
-1:     {
-         mov r28  = in0                // copy procedure index
-         mov loc0 = rp         // save rp
-       }
-       .body
-       ;;
-       ld8 loc2 = [loc2]               // loc2 <- entry point
-       mov out0 = in0          // first argument
-       mov out1 = in1          // copy arg2
-       mov out2 = in2          // copy arg3
-       mov out3 = in3          // copy arg3
-       ;;
-       mov loc3 = psr          // save psr
-       ;;
-       mov loc4=ar.rsc                 // save RSE configuration
-#ifdef XEN
-       dep.z loc2=loc2,0,60            // convert pal entry point to physical
-#else // XEN
-       dep.z loc2=loc2,0,61            // convert pal entry point to physical
-#endif // XEN
-       ;;
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       movl r16=PAL_PSR_BITS_TO_CLEAR
-       movl r17=PAL_PSR_BITS_TO_SET
-       ;;
-       or loc3=loc3,r17                // add in psr the bits to set
-       mov b7 = loc2                   // install target to branch reg
-       ;;
-       andcm r16=loc3,r16              // removes bits to clear from psr
-       br.call.sptk.many rp=ia64_switch_mode_phys
-.ret6:
-       mov loc5 = r19
-       mov loc6 = r20
-       br.call.sptk.many rp=b7         // now make the call
-.ret7:
-       mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
-       mov r16=loc3                    // r16= original psr
-       mov r19=loc5
-       mov r20=loc6
-       br.call.sptk.many rp=ia64_switch_mode_virt      // return to virtual 
mode
-
-.ret8: mov psr.l  = loc3               // restore init PSR
-       mov ar.pfs = loc1
-       mov rp = loc0
-       ;;
-       mov ar.rsc=loc4                 // restore RSE configuration
-       srlz.d                          // seralize restoration of psr.l
-       br.ret.sptk.many b0
-END(ia64_pal_call_phys_stacked)
-
-/*
- * Save scratch fp scratch regs which aren't saved in pt_regs already 
(fp10-fp15).
- *
- * NOTE: We need to do this since firmware (SAL and PAL) may use any of the 
scratch
- * regs fp-low partition.
- *
- * Inputs:
- *      in0    Address of stack storage for fp regs
- */
-GLOBAL_ENTRY(ia64_save_scratch_fpregs)
-       alloc r3=ar.pfs,1,0,0,0
-       add r2=16,in0
-       ;;
-       stf.spill [in0] = f10,32
-       stf.spill [r2]  = f11,32
-       ;;
-       stf.spill [in0] = f12,32
-       stf.spill [r2]  = f13,32
-       ;;
-       stf.spill [in0] = f14,32
-       stf.spill [r2]  = f15,32
-       br.ret.sptk.many rp
-END(ia64_save_scratch_fpregs)
-
-/*
- * Load scratch fp scratch regs (fp10-fp15)
- *
- * Inputs:
- *      in0    Address of stack storage for fp regs
- */
-GLOBAL_ENTRY(ia64_load_scratch_fpregs)
-       alloc r3=ar.pfs,1,0,0,0
-       add r2=16,in0
-       ;;
-       ldf.fill  f10 = [in0],32
-       ldf.fill  f11 = [r2],32
-       ;;
-       ldf.fill  f12 = [in0],32
-       ldf.fill  f13 = [r2],32
-       ;;
-       ldf.fill  f14 = [in0],32
-       ldf.fill  f15 = [r2],32
-       br.ret.sptk.many rp
-END(ia64_load_scratch_fpregs)
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/setup.c
--- a/xen/arch/ia64/setup.c     Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,773 +0,0 @@
-/*
- * Architecture-specific setup.
- *
- * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 2000, Rohit Seth <rohit.seth@xxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- *
- * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
- * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
- * 03/31/00 R.Seth     cpu_initialized and current->processor fixes
- * 02/04/00 D.Mosberger        some more get_cpuinfo fixes...
- * 02/01/00 R.Seth     fixed get_cpuinfo for SMP
- * 01/07/99 S.Eranian  added the support for command line argument
- * 06/24/99 W.Drummond added boot_cpu_data.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/acpi.h>
-#include <linux/bootmem.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/threads.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/efi.h>
-#include <linux/initrd.h>
-
-#include <asm/ia32.h>
-#include <asm/machvec.h>
-#include <asm/mca.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/patch.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-#include <asm/serial.h>
-#include <asm/setup.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/unistd.h>
-#ifdef CONFIG_VTI
-#include <asm/vmx.h>
-#endif // CONFIG_VTI
-#include <asm/io.h>
-
-#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
-# error "struct cpuinfo_ia64 too big!"
-#endif
-
-#ifdef CONFIG_SMP
-unsigned long __per_cpu_offset[NR_CPUS];
-EXPORT_SYMBOL(__per_cpu_offset);
-#endif
-
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
-DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
-unsigned long ia64_cycles_per_usec;
-struct ia64_boot_param *ia64_boot_param;
-struct screen_info screen_info;
-
-unsigned long ia64_max_cacheline_size;
-unsigned long ia64_iobase;     /* virtual address for I/O accesses */
-EXPORT_SYMBOL(ia64_iobase);
-struct io_space io_space[MAX_IO_SPACES];
-EXPORT_SYMBOL(io_space);
-unsigned int num_io_spaces;
-
-unsigned char aux_device_present = 0xaa;        /* XXX remove this when legacy 
I/O is gone */
-
-/*
- * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 
1).  This
- * mask specifies a mask of address bits that must be 0 in order for two 
buffers to be
- * mergeable by the I/O MMU (i.e., the end address of the first buffer and the 
start
- * address of the second buffer must be aligned to (merge_mask+1) in order to 
be
- * mergeable).  By default, we assume there is no I/O MMU which can merge 
physically
- * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds 
to a iommu
- * page-size of 2^64.
- */
-unsigned long ia64_max_iommu_merge_mask = ~0UL;
-EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
-
-/*
- * We use a special marker for the end of memory and it uses the extra (+1) 
slot
- */
-struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
-int num_rsvd_regions;
-
-
-/*
- * Filter incoming memory segments based on the primitive map created from the 
boot
- * parameters. Segments contained in the map are removed from the memory 
ranges. A
- * caller-specified function is called with the memory ranges that remain 
after filtering.
- * This routine does not assume the incoming segments are sorted.
- */
-int
-filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
-{
-       unsigned long range_start, range_end, prev_start;
-       void (*func)(unsigned long, unsigned long, int);
-       int i;
-
-#if IGNORE_PFN0
-       if (start == PAGE_OFFSET) {
-               printk(KERN_WARNING "warning: skipping physical page 0\n");
-               start += PAGE_SIZE;
-               if (start >= end) return 0;
-       }
-#endif
-       /*
-        * lowest possible address(walker uses virtual)
-        */
-       prev_start = PAGE_OFFSET;
-       func = arg;
-
-       for (i = 0; i < num_rsvd_regions; ++i) {
-               range_start = max(start, prev_start);
-               range_end   = min(end, rsvd_region[i].start);
-
-               if (range_start < range_end)
-#ifdef XEN
-               {
-               /* init_boot_pages requires "ps, pe" */
-                       printk("Init boot pages: 0x%lx -> 0x%lx.\n",
-                               __pa(range_start), __pa(range_end));
-                       (*func)(__pa(range_start), __pa(range_end), 0);
-               }
-#else
-                       call_pernode_memory(__pa(range_start), range_end - 
range_start, func);
-#endif
-
-               /* nothing more available in this segment */
-               if (range_end == end) return 0;
-
-               prev_start = rsvd_region[i].end;
-       }
-       /* end of memory marker allows full processing inside loop body */
-       return 0;
-}
-
-static void
-sort_regions (struct rsvd_region *rsvd_region, int max)
-{
-       int j;
-
-       /* simple bubble sorting */
-       while (max--) {
-               for (j = 0; j < max; ++j) {
-                       if (rsvd_region[j].start > rsvd_region[j+1].start) {
-                               struct rsvd_region tmp;
-                               tmp = rsvd_region[j];
-                               rsvd_region[j] = rsvd_region[j + 1];
-                               rsvd_region[j + 1] = tmp;
-                       }
-               }
-       }
-}
-
-/**
- * reserve_memory - setup reserved memory areas
- *
- * Setup the reserved memory areas set aside for the boot parameters,
- * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
- * see include/asm-ia64/meminit.h if you need to define more.
- */
-void
-reserve_memory (void)
-{
-       int n = 0;
-
-       /*
-        * none of the entries in this table overlap
-        */
-       rsvd_region[n].start = (unsigned long) ia64_boot_param;
-       rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
-       n++;
-
-       rsvd_region[n].start = (unsigned long) 
__va(ia64_boot_param->efi_memmap);
-       rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->efi_memmap_size;
-       n++;
-
-       rsvd_region[n].start = (unsigned long) 
__va(ia64_boot_param->command_line);
-       rsvd_region[n].end   = (rsvd_region[n].start
-                               + strlen(__va(ia64_boot_param->command_line)) + 
1);
-       n++;
-
-       rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
-#ifdef XEN
-       /* Reserve xen image/bitmap/xen-heap */
-       rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
-#else
-       rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
-#endif
-       n++;
-
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (ia64_boot_param->initrd_start) {
-               rsvd_region[n].start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
-               rsvd_region[n].end   = rsvd_region[n].start + 
ia64_boot_param->initrd_size;
-               n++;
-       }
-#endif
-
-       /* end of memory marker */
-       rsvd_region[n].start = ~0UL;
-       rsvd_region[n].end   = ~0UL;
-       n++;
-
-       num_rsvd_regions = n;
-
-       sort_regions(rsvd_region, num_rsvd_regions);
-}
-
-/**
- * find_initrd - get initrd parameters from the boot parameter structure
- *
- * Grab the initrd start and end from the boot parameter struct given us by
- * the boot loader.
- */
-void
-find_initrd (void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (ia64_boot_param->initrd_start) {
-               initrd_start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
-               initrd_end   = initrd_start+ia64_boot_param->initrd_size;
-
-               printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
-                      initrd_start, ia64_boot_param->initrd_size);
-       }
-#endif
-}
-
-static void __init
-io_port_init (void)
-{
-       extern unsigned long ia64_iobase;
-       unsigned long phys_iobase;
-
-       /*
-        *  Set `iobase' to the appropriate address in region 6 (uncached 
access range).
-        *
-        *  The EFI memory map is the "preferred" location to get the I/O port 
space base,
-        *  rather the relying on AR.KR0. This should become more clear in 
future SAL
-        *  specs. We'll fall back to getting it out of AR.KR0 if no 
appropriate entry is
-        *  found in the memory map.
-        */
-       phys_iobase = efi_get_iobase();
-       if (phys_iobase)
-               /* set AR.KR0 since this is all we use it for anyway */
-               ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
-       else {
-               phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
-               printk(KERN_INFO "No I/O port range found in EFI memory map, 
falling back "
-                      "to AR.KR0\n");
-               printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
-       }
-       ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
-
-       /* setup legacy IO port space */
-       io_space[0].mmio_base = ia64_iobase;
-       io_space[0].sparse = 1;
-       num_io_spaces = 1;
-}
-
-/**
- * early_console_setup - setup debugging console
- *
- * Consoles started here require little enough setup that we can start using
- * them very early in the boot process, either right after the machine
- * vector initialization, or even before if the drivers can detect their hw.
- *
- * Returns non-zero if a console couldn't be setup.
- */
-static inline int __init
-early_console_setup (char *cmdline)
-{
-#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
-       {
-               extern int sn_serial_console_early_setup(void);
-               if (!sn_serial_console_early_setup())
-                       return 0;
-       }
-#endif
-#ifdef CONFIG_EFI_PCDP
-       if (!efi_setup_pcdp_console(cmdline))
-               return 0;
-#endif
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-       if (!early_serial_console_init(cmdline))
-               return 0;
-#endif
-
-       return -1;
-}
-
-static inline void
-mark_bsp_online (void)
-{
-#ifdef CONFIG_SMP
-       /* If we register an early console, allow CPU 0 to printk */
-       cpu_set(smp_processor_id(), cpu_online_map);
-#endif
-}
-
-void __init
-#ifdef XEN
-early_setup_arch (char **cmdline_p)
-#else
-setup_arch (char **cmdline_p)
-#endif
-{
-       unw_init();
-
-       ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
__end___vtop_patchlist);
-
-       *cmdline_p = __va(ia64_boot_param->command_line);
-#ifdef XEN
-       efi_init();
-#else
-       strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
-
-       efi_init();
-       io_port_init();
-#endif
-
-#ifdef CONFIG_IA64_GENERIC
-       {
-               const char *mvec_name = strstr (*cmdline_p, "machvec=");
-               char str[64];
-
-               if (mvec_name) {
-                       const char *end;
-                       size_t len;
-
-                       mvec_name += 8;
-                       end = strchr (mvec_name, ' ');
-                       if (end)
-                               len = end - mvec_name;
-                       else
-                               len = strlen (mvec_name);
-                       len = min(len, sizeof (str) - 1);
-                       strncpy (str, mvec_name, len);
-                       str[len] = '\0';
-                       mvec_name = str;
-               } else
-                       mvec_name = acpi_get_sysname();
-               machvec_init(mvec_name);
-       }
-#endif
-
-#ifdef XEN
-       early_cmdline_parse(cmdline_p);
-       cmdline_parse(*cmdline_p);
-#undef CONFIG_ACPI_BOOT
-#endif
-       if (early_console_setup(*cmdline_p) == 0)
-               mark_bsp_online();
-
-#ifdef CONFIG_ACPI_BOOT
-       /* Initialize the ACPI boot-time table parser */
-       acpi_table_init();
-# ifdef CONFIG_ACPI_NUMA
-       acpi_numa_init();
-# endif
-#else
-# ifdef CONFIG_SMP
-       smp_build_cpu_map();    /* happens, e.g., with the Ski simulator */
-# endif
-#endif /* CONFIG_APCI_BOOT */
-
-#ifndef XEN
-       find_memory();
-#else
-       io_port_init();
-}
-
-void __init
-late_setup_arch (char **cmdline_p)
-{
-#undef CONFIG_ACPI_BOOT
-       acpi_table_init();
-#endif
-       /* process SAL system table: */
-       ia64_sal_init(efi.sal_systab);
-
-#ifdef CONFIG_SMP
-       cpu_physical_id(0) = hard_smp_processor_id();
-#endif
-
-#ifdef CONFIG_VTI
-       identify_vmx_feature();
-#endif // CONFIG_VTI
-
-       cpu_init();     /* initialize the bootstrap CPU */
-
-#ifdef CONFIG_ACPI_BOOT
-       acpi_boot_init();
-#endif
-
-#ifdef CONFIG_VT
-       if (!conswitchp) {
-# if defined(CONFIG_DUMMY_CONSOLE)
-               conswitchp = &dummy_con;
-# endif
-# if defined(CONFIG_VGA_CONSOLE)
-               /*
-                * Non-legacy systems may route legacy VGA MMIO range to system
-                * memory.  vga_con probes the MMIO hole, so memory looks like
-                * a VGA device to it.  The EFI memory map can tell us if it's
-                * memory so we can avoid this problem.
-                */
-               if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
-                       conswitchp = &vga_con;
-# endif
-       }
-#endif
-
-       /* enable IA-64 Machine Check Abort Handling unless disabled */
-       if (!strstr(saved_command_line, "nomca"))
-               ia64_mca_init();
-
-       platform_setup(cmdline_p);
-       paging_init();
-}
-
-/*
- * Display cpu info for all cpu's.
- */
-static int
-show_cpuinfo (struct seq_file *m, void *v)
-{
-#ifdef CONFIG_SMP
-#      define lpj      c->loops_per_jiffy
-#      define cpunum   c->cpu
-#else
-#      define lpj      loops_per_jiffy
-#      define cpunum   0
-#endif
-       static struct {
-               unsigned long mask;
-               const char *feature_name;
-       } feature_bits[] = {
-               { 1UL << 0, "branchlong" },
-               { 1UL << 1, "spontaneous deferral"},
-               { 1UL << 2, "16-byte atomic ops" }
-       };
-       char family[32], features[128], *cp, sep;
-       struct cpuinfo_ia64 *c = v;
-       unsigned long mask;
-       int i;
-
-       mask = c->features;
-
-       switch (c->family) {
-             case 0x07:        memcpy(family, "Itanium", 8); break;
-             case 0x1f:        memcpy(family, "Itanium 2", 10); break;
-             default:          sprintf(family, "%u", c->family); break;
-       }
-
-       /* build the feature string: */
-       memcpy(features, " standard", 10);
-       cp = features;
-       sep = 0;
-       for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
-               if (mask & feature_bits[i].mask) {
-                       if (sep)
-                               *cp++ = sep;
-                       sep = ',';
-                       *cp++ = ' ';
-                       strcpy(cp, feature_bits[i].feature_name);
-                       cp += strlen(feature_bits[i].feature_name);
-                       mask &= ~feature_bits[i].mask;
-               }
-       }
-       if (mask) {
-               /* print unknown features as a hex value: */
-               if (sep)
-                       *cp++ = sep;
-               sprintf(cp, " 0x%lx", mask);
-       }
-
-       seq_printf(m,
-                  "processor  : %d\n"
-                  "vendor     : %s\n"
-                  "arch       : IA-64\n"
-                  "family     : %s\n"
-                  "model      : %u\n"
-                  "revision   : %u\n"
-                  "archrev    : %u\n"
-                  "features   :%s\n"   /* don't change this---it _is_ right! */
-                  "cpu number : %lu\n"
-                  "cpu regs   : %u\n"
-                  "cpu MHz    : %lu.%06lu\n"
-                  "itc MHz    : %lu.%06lu\n"
-                  "BogoMIPS   : %lu.%02lu\n\n",
-                  cpunum, c->vendor, family, c->model, c->revision, c->archrev,
-                  features, c->ppn, c->number,
-                  c->proc_freq / 1000000, c->proc_freq % 1000000,
-                  c->itc_freq / 1000000, c->itc_freq % 1000000,
-                  lpj*HZ/500000, (lpj*HZ/5000) % 100);
-       return 0;
-}
-
-static void *
-c_start (struct seq_file *m, loff_t *pos)
-{
-#ifdef CONFIG_SMP
-       while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
-               ++*pos;
-#endif
-       return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
-}
-
-static void *
-c_next (struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return c_start(m, pos);
-}
-
-static void
-c_stop (struct seq_file *m, void *v)
-{
-}
-
-#ifndef XEN
-struct seq_operations cpuinfo_op = {
-       .start =        c_start,
-       .next =         c_next,
-       .stop =         c_stop,
-       .show =         show_cpuinfo
-};
-#endif
-
-void
-identify_cpu (struct cpuinfo_ia64 *c)
-{
-       union {
-               unsigned long bits[5];
-               struct {
-                       /* id 0 & 1: */
-                       char vendor[16];
-
-                       /* id 2 */
-                       u64 ppn;                /* processor serial number */
-
-                       /* id 3: */
-                       unsigned number         :  8;
-                       unsigned revision       :  8;
-                       unsigned model          :  8;
-                       unsigned family         :  8;
-                       unsigned archrev        :  8;
-                       unsigned reserved       : 24;
-
-                       /* id 4: */
-                       u64 features;
-               } field;
-       } cpuid;
-       pal_vm_info_1_u_t vm1;
-       pal_vm_info_2_u_t vm2;
-       pal_status_t status;
-       unsigned long impl_va_msb = 50, phys_addr_size = 44;    /* Itanium 
defaults */
-       int i;
-
-       for (i = 0; i < 5; ++i)
-               cpuid.bits[i] = ia64_get_cpuid(i);
-
-       memcpy(c->vendor, cpuid.field.vendor, 16);
-#ifdef CONFIG_SMP
-       c->cpu = smp_processor_id();
-#endif
-       c->ppn = cpuid.field.ppn;
-       c->number = cpuid.field.number;
-       c->revision = cpuid.field.revision;
-       c->model = cpuid.field.model;
-       c->family = cpuid.field.family;
-       c->archrev = cpuid.field.archrev;
-       c->features = cpuid.field.features;
-
-       status = ia64_pal_vm_summary(&vm1, &vm2);
-       if (status == PAL_STATUS_SUCCESS) {
-               impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
-               phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
-       }
-       c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
-       c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
-
-#ifdef CONFIG_VTI
-       /* If vmx feature is on, do necessary initialization for vmx */
-       if (vmx_enabled)
-               vmx_init_env();
-#endif
-}
-
-void
-setup_per_cpu_areas (void)
-{
-       /* start_kernel() requires this... */
-}
-
-static void
-get_max_cacheline_size (void)
-{
-       unsigned long line_size, max = 1;
-       u64 l, levels, unique_caches;
-        pal_cache_config_info_t cci;
-        s64 status;
-
-        status = ia64_pal_cache_summary(&levels, &unique_caches);
-        if (status != 0) {
-                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed 
(status=%ld)\n",
-                       __FUNCTION__, status);
-                max = SMP_CACHE_BYTES;
-               goto out;
-        }
-
-       for (l = 0; l < levels; ++l) {
-               status = ia64_pal_cache_config_info(l, /* cache_type 
(data_or_unified)= */ 2,
-                                                   &cci);
-               if (status != 0) {
-                       printk(KERN_ERR
-                              "%s: ia64_pal_cache_config_info(l=%lu) failed 
(status=%ld)\n",
-                              __FUNCTION__, l, status);
-                       max = SMP_CACHE_BYTES;
-               }
-               line_size = 1 << cci.pcci_line_size;
-               if (line_size > max)
-                       max = line_size;
-        }
-  out:
-       if (max > ia64_max_cacheline_size)
-               ia64_max_cacheline_size = max;
-}
-
-/*
- * cpu_init() initializes state that is per-CPU.  This function acts
- * as a 'CPU state barrier', nothing should get across.
- */
-void
-cpu_init (void)
-{
-       extern void __devinit ia64_mmu_init (void *);
-       unsigned long num_phys_stacked;
-       pal_vm_info_2_u_t vmi;
-       unsigned int max_ctx;
-       struct cpuinfo_ia64 *cpu_info;
-       void *cpu_data;
-
-       cpu_data = per_cpu_init();
-
-       /*
-        * We set ar.k3 so that assembly code in MCA handler can compute
-        * physical addresses of per cpu variables with a simple:
-        *   phys = ar.k3 + &per_cpu_var
-        */
-       ia64_set_kr(IA64_KR_PER_CPU_DATA,
-                   ia64_tpa(cpu_data) - (long) __per_cpu_start);
-
-       get_max_cacheline_size();
-
-       /*
-        * We can't pass "local_cpu_data" to identify_cpu() because we haven't 
called
-        * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first 
because it
-        * depends on the data returned by identify_cpu().  We break the 
dependency by
-        * accessing cpu_data() through the canonical per-CPU address.
-        */
-       cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - 
__per_cpu_start);
-       identify_cpu(cpu_info);
-
-#ifdef CONFIG_MCKINLEY
-       {
-#              define FEATURE_SET 16
-               struct ia64_pal_retval iprv;
-
-               if (cpu_info->family == 0x1f) {
-                       PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 
FEATURE_SET, 0);
-                       if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 
& 0x80))
-                               PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
-                                             (iprv.v1 | 0x80), FEATURE_SET, 0);
-               }
-       }
-#endif
-
-       /* Clear the stack memory reserved for pt_regs: */
-       memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
-
-       ia64_set_kr(IA64_KR_FPU_OWNER, 0);
-
-       /*
-        * Initialize default control register to defer all speculative faults. 
 The
-        * kernel MUST NOT depend on a particular setting of these bits (in 
other words,
-        * the kernel must have recovery code for all speculative accesses).  
Turn on
-        * dcr.lc as per recommendation by the architecture team.  Most IA-32 
apps
-        * shouldn't be affected by this (moral: keep your ia32 locks aligned 
and you'll
-        * be fine).
-        */
-       ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | 
IA64_DCR_DX | IA64_DCR_DR
-                                       | IA64_DCR_DA | IA64_DCR_DD | 
IA64_DCR_LC));
-       atomic_inc(&init_mm.mm_count);
-       current->active_mm = &init_mm;
-#ifdef XEN
-       if (current->domain->arch.mm)
-#else
-       if (current->mm)
-#endif
-               BUG();
-
-       ia64_mmu_init(ia64_imva(cpu_data));
-       ia64_mca_cpu_init(ia64_imva(cpu_data));
-
-#ifdef CONFIG_IA32_SUPPORT
-       ia32_cpu_init();
-#endif
-
-       /* Clear ITC to eliminiate sched_clock() overflows in human time.  */
-       ia64_set_itc(0);
-
-       /* disable all local interrupt sources: */
-       ia64_set_itv(1 << 16);
-       ia64_set_lrr0(1 << 16);
-       ia64_set_lrr1(1 << 16);
-       ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
-       ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
-
-       /* clear TPR & XTP to enable all interrupt classes: */
-       ia64_setreg(_IA64_REG_CR_TPR, 0);
-#ifdef CONFIG_SMP
-       normal_xtp();
-#endif
-
-       /* set ia64_ctx.max_rid to the maximum RID that is supported by all 
CPUs: */
-       if (ia64_pal_vm_summary(NULL, &vmi) == 0)
-               max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
-       else {
-               printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 
18 RID bits\n");
-               max_ctx = (1U << 15) - 1;       /* use architected minimum */
-       }
-       while (max_ctx < ia64_ctx.max_ctx) {
-               unsigned int old = ia64_ctx.max_ctx;
-               if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
-                       break;
-       }
-
-       if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
-               printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 
physical "
-                      "stacked regs\n");
-               num_phys_stacked = 96;
-       }
-       /* size of physical stacked register partition plus 8 bytes: */
-       __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
-       platform_cpu_init();
-}
-
-void
-check_bugs (void)
-{
-       ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
-                              (unsigned long) __end___mckinley_e9_bundles);
-}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/time.c
--- a/xen/arch/ia64/time.c      Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,264 +0,0 @@
-/*
- * linux/arch/ia64/kernel/time.c
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- *     David Mosberger <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
- * Copyright (C) 1999-2000 VA Linux Systems
- * Copyright (C) 1999-2000 Walt Drummond <drummond@xxxxxxxxxxx>
- */
-#include <linux/config.h>
-
-#include <linux/cpu.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/profile.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/efi.h>
-#include <linux/profile.h>
-#include <linux/timex.h>
-
-#include <asm/machvec.h>
-#include <asm/delay.h>
-#include <asm/hw_irq.h>
-#include <asm/ptrace.h>
-#include <asm/sal.h>
-#include <asm/sections.h>
-#include <asm/system.h>
-#ifdef XEN
-#include <linux/jiffies.h>     // not included by xen/sched.h
-#endif
-
-extern unsigned long wall_jiffies;
-
-u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
-#define TIME_KEEPER_ID 0       /* smp_processor_id() of time-keeper */
-
-#ifdef CONFIG_IA64_DEBUG_IRQ
-
-unsigned long last_cli_ip;
-EXPORT_SYMBOL(last_cli_ip);
-
-#endif
-
-#ifndef XEN
-static struct time_interpolator itc_interpolator = {
-       .shift = 16,
-       .mask = 0xffffffffffffffffLL,
-       .source = TIME_SOURCE_CPU
-};
-
-static irqreturn_t
-timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
-{
-       unsigned long new_itm;
-
-       if (unlikely(cpu_is_offline(smp_processor_id()))) {
-               return IRQ_HANDLED;
-       }
-
-       platform_timer_interrupt(irq, dev_id, regs);
-
-       new_itm = local_cpu_data->itm_next;
-
-       if (!time_after(ia64_get_itc(), new_itm))
-               printk(KERN_ERR "Oops: timer tick before it's due 
(itc=%lx,itm=%lx)\n",
-                      ia64_get_itc(), new_itm);
-
-       profile_tick(CPU_PROFILING, regs);
-
-       while (1) {
-               update_process_times(user_mode(regs));
-
-               new_itm += local_cpu_data->itm_delta;
-
-               if (smp_processor_id() == TIME_KEEPER_ID) {
-                       /*
-                        * Here we are in the timer irq handler. We have irqs 
locally
-                        * disabled, but we don't know if the timer_bh is 
running on
-                        * another CPU. We need to avoid to SMP race by 
acquiring the
-                        * xtime_lock.
-                        */
-                       write_seqlock(&xtime_lock);
-                       do_timer(regs);
-                       local_cpu_data->itm_next = new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else
-                       local_cpu_data->itm_next = new_itm;
-
-               if (time_after(new_itm, ia64_get_itc()))
-                       break;
-       }
-
-       do {
-               /*
-                * If we're too close to the next clock tick for
-                * comfort, we increase the safety margin by
-                * intentionally dropping the next tick(s).  We do NOT
-                * update itm.next because that would force us to call
-                * do_timer() which in turn would let our clock run
-                * too fast (with the potentially devastating effect
-                * of losing monotony of time).
-                */
-               while (!time_after(new_itm, ia64_get_itc() + 
local_cpu_data->itm_delta/2))
-                       new_itm += local_cpu_data->itm_delta;
-               ia64_set_itm(new_itm);
-               /* double check, in case we got hit by a (slow) PMI: */
-       } while (time_after_eq(ia64_get_itc(), new_itm));
-       return IRQ_HANDLED;
-}
-#endif
-
-/*
- * Encapsulate access to the itm structure for SMP.
- */
-void
-ia64_cpu_local_tick (void)
-{
-       int cpu = smp_processor_id();
-       unsigned long shift = 0, delta;
-
-       /* arrange for the cycle counter to generate a timer interrupt: */
-       ia64_set_itv(IA64_TIMER_VECTOR);
-
-       delta = local_cpu_data->itm_delta;
-       /*
-        * Stagger the timer tick for each CPU so they don't occur all at 
(almost) the
-        * same time:
-        */
-       if (cpu) {
-               unsigned long hi = 1UL << ia64_fls(cpu);
-               shift = (2*(cpu - hi) + 1) * delta/hi/2;
-       }
-       local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
-       ia64_set_itm(local_cpu_data->itm_next);
-}
-
-static int nojitter;
-
-static int __init nojitter_setup(char *str)
-{
-       nojitter = 1;
-       printk("Jitter checking for ITC timers disabled\n");
-       return 1;
-}
-
-__setup("nojitter", nojitter_setup);
-
-
-void __devinit
-ia64_init_itm (void)
-{
-       unsigned long platform_base_freq, itc_freq;
-       struct pal_freq_ratio itc_ratio, proc_ratio;
-       long status, platform_base_drift, itc_drift;
-
-       /*
-        * According to SAL v2.6, we need to use a SAL call to determine the 
platform base
-        * frequency and then a PAL call to determine the frequency ratio 
between the ITC
-        * and the base frequency.
-        */
-       status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
-                                   &platform_base_freq, &platform_base_drift);
-       if (status != 0) {
-               printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", 
ia64_sal_strerror(status));
-       } else {
-               status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
-               if (status != 0)
-                       printk(KERN_ERR "PAL_FREQ_RATIOS failed with 
status=%ld\n", status);
-       }
-       if (status != 0) {
-               /* invent "random" values */
-               printk(KERN_ERR
-                      "SAL/PAL failed to obtain frequency info---inventing 
reasonable values\n");
-               platform_base_freq = 100000000;
-               platform_base_drift = -1;       /* no drift info */
-               itc_ratio.num = 3;
-               itc_ratio.den = 1;
-       }
-       if (platform_base_freq < 40000000) {
-               printk(KERN_ERR "Platform base frequency %lu bogus---resetting 
to 75MHz!\n",
-                      platform_base_freq);
-               platform_base_freq = 75000000;
-               platform_base_drift = -1;
-       }
-       if (!proc_ratio.den)
-               proc_ratio.den = 1;     /* avoid division by zero */
-       if (!itc_ratio.den)
-               itc_ratio.den = 1;      /* avoid division by zero */
-
-       itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
-
-       local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
-       printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
-              "ITC freq=%lu.%03luMHz", smp_processor_id(),
-              platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
-              itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 
1000) % 1000);
-
-       if (platform_base_drift != -1) {
-               itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
-               printk("+/-%ldppm\n", itc_drift);
-       } else {
-               itc_drift = -1;
-               printk("\n");
-       }
-
-       local_cpu_data->proc_freq = 
(platform_base_freq*proc_ratio.num)/proc_ratio.den;
-       local_cpu_data->itc_freq = itc_freq;
-       local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / 
USEC_PER_SEC;
-       local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
-                                       + itc_freq/2)/itc_freq;
-
-       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-#ifndef XEN
-               itc_interpolator.frequency = local_cpu_data->itc_freq;
-               itc_interpolator.drift = itc_drift;
-#ifdef CONFIG_SMP
-               /* On IA64 in an SMP configuration ITCs are never accurately 
synchronized.
-                * Jitter compensation requires a cmpxchg which may limit
-                * the scalability of the syscalls for retrieving time.
-                * The ITC synchronization is usually successful to within a few
-                * ITC ticks but this is not a sure thing. If you need to 
improve
-                * timer performance in SMP situations then boot the kernel 
with the
-                * "nojitter" option. However, doing so may result in time 
fluctuating (maybe
-                * even going backward) if the ITC offsets between the 
individual CPUs
-                * are too large.
-                */
-               if (!nojitter) itc_interpolator.jitter = 1;
-#endif
-               register_time_interpolator(&itc_interpolator);
-#endif
-       }
-
-       /* Setup the CPU local timer tick */
-       ia64_cpu_local_tick();
-}
-
-#ifndef XEN
-static struct irqaction timer_irqaction = {
-       .handler =      timer_interrupt,
-       .flags =        SA_INTERRUPT,
-       .name =         "timer"
-};
-
-void __init
-time_init (void)
-{
-       register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
-       efi_gettimeofday(&xtime);
-       ia64_init_itm();
-
-       /*
-        * Initialize wall_to_monotonic such that adding it to xtime will yield 
zero, the
-        * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
-        */
-       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, 
-xtime.tv_nsec);
-}
-#endif
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/tlb.c
--- a/xen/arch/ia64/tlb.c       Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,199 +0,0 @@
-/*
- * TLB support routines.
- *
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *
- * 08/02/00 A. Mallick <asit.k.mallick@xxxxxxxxx>
- *             Modified RID allocation for SMP
- *          Goutham Rao <goutham.rao@xxxxxxxxx>
- *              IPI based ptc implementation and A-step IPI implementation.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/delay.h>
-#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-#include <asm/pal.h>
-#include <asm/tlbflush.h>
-
-static struct {
-       unsigned long mask;     /* mask of supported purge page-sizes */
-       unsigned long max_bits; /* log2() of largest supported purge page-size 
*/
-} purge;
-
-struct ia64_ctx ia64_ctx = {
-       .lock =         SPIN_LOCK_UNLOCKED,
-       .next =         1,
-       .limit =        (1 << 15) - 1,          /* start out with the safe 
(architected) limit */
-       .max_ctx =      ~0U
-};
-
-DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
-
-/*
- * Acquire the ia64_ctx.lock before calling this function!
- */
-void
-wrap_mmu_context (struct mm_struct *mm)
-{
-#ifdef XEN
-printf("wrap_mmu_context: called, not implemented\n");
-#else
-       unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
-       struct task_struct *tsk;
-       int i;
-
-       if (ia64_ctx.next > max_ctx)
-               ia64_ctx.next = 300;    /* skip daemons */
-       ia64_ctx.limit = max_ctx + 1;
-
-       /*
-        * Scan all the task's mm->context and set proper safe range
-        */
-
-       read_lock(&tasklist_lock);
-  repeat:
-       for_each_process(tsk) {
-               if (!tsk->mm)
-                       continue;
-               tsk_context = tsk->mm->context;
-               if (tsk_context == ia64_ctx.next) {
-                       if (++ia64_ctx.next >= ia64_ctx.limit) {
-                               /* empty range: reset the range limit and start 
over */
-                               if (ia64_ctx.next > max_ctx)
-                                       ia64_ctx.next = 300;
-                               ia64_ctx.limit = max_ctx + 1;
-                               goto repeat;
-                       }
-               }
-               if ((tsk_context > ia64_ctx.next) && (tsk_context < 
ia64_ctx.limit))
-                       ia64_ctx.limit = tsk_context;
-       }
-       read_unlock(&tasklist_lock);
-       /* can't call flush_tlb_all() here because of race condition with O(1) 
scheduler [EF] */
-       {
-               int cpu = get_cpu(); /* prevent preemption/migration */
-               for (i = 0; i < NR_CPUS; ++i)
-                       if (cpu_online(i) && (i != cpu))
-                               per_cpu(ia64_need_tlb_flush, i) = 1;
-               put_cpu();
-       }
-       local_flush_tlb_all();
-#endif
-}
-
-void
-ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long 
nbits)
-{
-       static DEFINE_SPINLOCK(ptcg_lock);
-
-       /* HW requires global serialization of ptc.ga.  */
-       spin_lock(&ptcg_lock);
-       {
-               do {
-                       /*
-                        * Flush ALAT entries also.
-                        */
-                       ia64_ptcga(start, (nbits<<2));
-                       ia64_srlz_i();
-                       start += (1UL << nbits);
-               } while (start < end);
-       }
-       spin_unlock(&ptcg_lock);
-}
-
-void
-local_flush_tlb_all (void)
-{
-       unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
-
-       addr    = local_cpu_data->ptce_base;
-       count0  = local_cpu_data->ptce_count[0];
-       count1  = local_cpu_data->ptce_count[1];
-       stride0 = local_cpu_data->ptce_stride[0];
-       stride1 = local_cpu_data->ptce_stride[1];
-
-       local_irq_save(flags);
-       for (i = 0; i < count0; ++i) {
-               for (j = 0; j < count1; ++j) {
-                       ia64_ptce(addr);
-                       addr += stride1;
-               }
-               addr += stride0;
-       }
-       local_irq_restore(flags);
-       ia64_srlz_i();                  /* srlz.i implies srlz.d */
-}
-EXPORT_SYMBOL(local_flush_tlb_all);
-
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned 
long end)
-{
-#ifdef XEN
-printf("flush_tlb_range: called, not implemented\n");
-#else
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long size = end - start;
-       unsigned long nbits;
-
-       if (mm != current->active_mm) {
-               /* this does happen, but perhaps it's not worth optimizing for? 
*/
-#ifdef CONFIG_SMP
-               flush_tlb_all();
-#else
-               mm->context = 0;
-#endif
-               return;
-       }
-
-       nbits = ia64_fls(size + 0xfff);
-       while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < 
purge.max_bits))
-               ++nbits;
-       if (nbits > purge.max_bits)
-               nbits = purge.max_bits;
-       start &= ~((1UL << nbits) - 1);
-
-# ifdef CONFIG_SMP
-       platform_global_tlb_purge(start, end, nbits);
-# else
-       do {
-               ia64_ptcl(start, (nbits<<2));
-               start += (1UL << nbits);
-       } while (start < end);
-# endif
-
-       ia64_srlz_i();                  /* srlz.i implies srlz.d */
-#endif
-}
-EXPORT_SYMBOL(flush_tlb_range);
-
-void __devinit
-ia64_tlb_init (void)
-{
-       ia64_ptce_info_t ptce_info;
-       unsigned long tr_pgbits;
-       long status;
-
-       if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
-               printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
-                      "defaulting to architected purge page-sizes.\n", status);
-               purge.mask = 0x115557000UL;
-       }
-       purge.max_bits = ia64_fls(purge.mask);
-
-       ia64_get_ptce(&ptce_info);
-       local_cpu_data->ptce_base = ptce_info.base;
-       local_cpu_data->ptce_count[0] = ptce_info.count[0];
-       local_cpu_data->ptce_count[1] = ptce_info.count[1];
-       local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
-       local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
-
-       local_flush_tlb_all();          /* nuke left overs from 
bootstrapping... */
-}
diff -r e2127f19861b -r f242de2e5a3c xen/arch/ia64/unaligned.c
--- a/xen/arch/ia64/unaligned.c Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,1653 +0,0 @@
-/*
- * Architecture-specific unaligned trap handling.
- *
- * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *
- * 2002/12/09   Fix rotating register handling (off-by-1 error, missing 
fr-rotation).  Fix
- *             get_rse_reg() to not leak kernel bits to user-level (reading an 
out-of-frame
- *             stacked register returns an undefined value; it does NOT 
trigger a
- *             "rsvd register fault").
- * 2001/10/11  Fix unaligned access to rotating registers in s/w pipelined 
loops.
- * 2001/08/13  Correct size of extended floats (float_fsz) from 16 to 10 bytes.
- * 2001/01/17  Add support emulation of unaligned kernel accesses.
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp_lock.h>
-#include <linux/tty.h>
-
-#include <asm/intrinsics.h>
-#include <asm/processor.h>
-#include <asm/rse.h>
-#include <asm/uaccess.h>
-#include <asm/unaligned.h>
-
-extern void die_if_kernel(char *str, struct pt_regs *regs, long err) 
__attribute__ ((noreturn));
-
-#undef DEBUG_UNALIGNED_TRAP
-
-#ifdef DEBUG_UNALIGNED_TRAP
-# define DPRINT(a...)  do { printk("%s %u: ", __FUNCTION__, __LINE__); printk 
(a); } while (0)
-# define DDUMP(str,vp,len)     dump(str, vp, len)
-
-static void
-dump (const char *str, void *vp, size_t len)
-{
-       unsigned char *cp = vp;
-       int i;
-
-       printk("%s", str);
-       for (i = 0; i < len; ++i)
-               printk (" %02x", *cp++);
-       printk("\n");
-}
-#else
-# define DPRINT(a...)
-# define DDUMP(str,vp,len)
-#endif
-
-#define IA64_FIRST_STACKED_GR  32
-#define IA64_FIRST_ROTATING_FR 32
-#define SIGN_EXT9              0xffffffffffffff00ul
-
-/*
- * For M-unit:
- *
- *  opcode |   m  |   x6    |
- * --------|------|---------|
- * [40-37] | [36] | [35:30] |
- * --------|------|---------|
- *     4   |   1  |    6    | = 11 bits
- * --------------------------
- * However bits [31:30] are not directly useful to distinguish between
- * load/store so we can use [35:32] instead, which gives the following
- * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
- * checking the m-bit until later in the load/store emulation.
- */
-#define IA64_OPCODE_MASK       0x1ef
-#define IA64_OPCODE_SHIFT      32
-
-/*
- * Table C-28 Integer Load/Store
- *
- * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
- *
- * ld8.fill, st8.fill  MUST be aligned because the RNATs are based on
- * the address (bits [8:3]), so we must failed.
- */
-#define LD_OP            0x080
-#define LDS_OP           0x081
-#define LDA_OP           0x082
-#define LDSA_OP          0x083
-#define LDBIAS_OP        0x084
-#define LDACQ_OP         0x085
-/* 0x086, 0x087 are not relevant */
-#define LDCCLR_OP        0x088
-#define LDCNC_OP         0x089
-#define LDCCLRACQ_OP     0x08a
-#define ST_OP            0x08c
-#define STREL_OP         0x08d
-/* 0x08e,0x8f are not relevant */
-
-/*
- * Table C-29 Integer Load +Reg
- *
- * we use the ld->m (bit [36:36]) field to determine whether or not we have
- * a load/store of this form.
- */
-
-/*
- * Table C-30 Integer Load/Store +Imm
- *
- * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
- *
- * ld8.fill, st8.fill  must be aligned because the Nat register are based on
- * the address, so we must fail and the program must be fixed.
- */
-#define LD_IMM_OP            0x0a0
-#define LDS_IMM_OP           0x0a1
-#define LDA_IMM_OP           0x0a2
-#define LDSA_IMM_OP          0x0a3
-#define LDBIAS_IMM_OP        0x0a4
-#define LDACQ_IMM_OP         0x0a5
-/* 0x0a6, 0xa7 are not relevant */
-#define LDCCLR_IMM_OP        0x0a8
-#define LDCNC_IMM_OP         0x0a9
-#define LDCCLRACQ_IMM_OP     0x0aa
-#define ST_IMM_OP            0x0ac
-#define STREL_IMM_OP         0x0ad
-/* 0x0ae,0xaf are not relevant */
-
-/*
- * Table C-32 Floating-point Load/Store
- */
-#define LDF_OP           0x0c0
-#define LDFS_OP          0x0c1
-#define LDFA_OP          0x0c2
-#define LDFSA_OP         0x0c3
-/* 0x0c6 is irrelevant */
-#define LDFCCLR_OP       0x0c8
-#define LDFCNC_OP        0x0c9
-/* 0x0cb is irrelevant  */
-#define STF_OP           0x0cc
-
-/*
- * Table C-33 Floating-point Load +Reg
- *
- * we use the ld->m (bit [36:36]) field to determine whether or not we have
- * a load/store of this form.
- */
-
-/*
- * Table C-34 Floating-point Load/Store +Imm
- */
-#define LDF_IMM_OP       0x0e0
-#define LDFS_IMM_OP      0x0e1
-#define LDFA_IMM_OP      0x0e2
-#define LDFSA_IMM_OP     0x0e3
-/* 0x0e6 is irrelevant */
-#define LDFCCLR_IMM_OP   0x0e8
-#define LDFCNC_IMM_OP    0x0e9
-#define STF_IMM_OP       0x0ec
-
-typedef struct {
-       unsigned long    qp:6;  /* [0:5]   */
-       unsigned long    r1:7;  /* [6:12]  */
-       unsigned long   imm:7;  /* [13:19] */
-       unsigned long    r3:7;  /* [20:26] */
-       unsigned long     x:1;  /* [27:27] */
-       unsigned long  hint:2;  /* [28:29] */
-       unsigned long x6_sz:2;  /* [30:31] */
-       unsigned long x6_op:4;  /* [32:35], x6 = x6_sz|x6_op */
-       unsigned long     m:1;  /* [36:36] */
-       unsigned long    op:4;  /* [37:40] */
-       unsigned long   pad:23; /* [41:63] */
-} load_store_t;
-
-
-typedef enum {
-       UPD_IMMEDIATE,  /* ldXZ r1=[r3],imm(9) */
-       UPD_REG         /* ldXZ r1=[r3],r2     */
-} update_t;
-
-/*
- * We use tables to keep track of the offsets of registers in the saved state.
- * This way we save having big switch/case statements.
- *
- * We use bit 0 to indicate switch_stack or pt_regs.
- * The offset is simply shifted by 1 bit.
- * A 2-byte value should be enough to hold any kind of offset
- *
- * In case the calling convention changes (and thus pt_regs/switch_stack)
- * simply use RSW instead of RPT or vice-versa.
- */
-
-#define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
-#define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
-
-#define RPT(x)         (RPO(x) << 1)
-#define RSW(x)         (1| RSO(x)<<1)
-
-#define GR_OFFS(x)     (gr_info[x]>>1)
-#define GR_IN_SW(x)    (gr_info[x] & 0x1)
-
-#define FR_OFFS(x)     (fr_info[x]>>1)
-#define FR_IN_SW(x)    (fr_info[x] & 0x1)
-
-static u16 gr_info[32]={
-       0,                      /* r0 is read-only : WE SHOULD NEVER GET THIS */
-
-       RPT(r1), RPT(r2), RPT(r3),
-
-#ifdef  CONFIG_VTI
-       RPT(r4), RPT(r5), RPT(r6), RPT(r7),
-#else   //CONFIG_VTI
-       RSW(r4), RSW(r5), RSW(r6), RSW(r7),
-#endif  //CONFIG_VTI
-
-       RPT(r8), RPT(r9), RPT(r10), RPT(r11),
-       RPT(r12), RPT(r13), RPT(r14), RPT(r15),
-
-       RPT(r16), RPT(r17), RPT(r18), RPT(r19),
-       RPT(r20), RPT(r21), RPT(r22), RPT(r23),
-       RPT(r24), RPT(r25), RPT(r26), RPT(r27),
-       RPT(r28), RPT(r29), RPT(r30), RPT(r31)
-};
-
-static u16 fr_info[32]={
-       0,                      /* constant : WE SHOULD NEVER GET THIS */
-       0,                      /* constant : WE SHOULD NEVER GET THIS */
-
-       RSW(f2), RSW(f3), RSW(f4), RSW(f5),
-
-       RPT(f6), RPT(f7), RPT(f8), RPT(f9),
-       RPT(f10), RPT(f11),
-
-       RSW(f12), RSW(f13), RSW(f14),
-       RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
-       RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
-       RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
-       RSW(f30), RSW(f31)
-};
-
-/* Invalidate ALAT entry for integer register REGNO.  */
-static void
-invala_gr (int regno)
-{
-#      define F(reg)   case reg: ia64_invala_gr(reg); break
-
-       switch (regno) {
-               F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
-               F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
-               F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
-               F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
-               F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
-               F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
-               F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
-               F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
-               F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
-               F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
-               F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
-               F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
-               F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
-               F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
-               F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
-               F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
-       }
-#      undef F
-}
-
-/* Invalidate ALAT entry for floating-point register REGNO.  */
-static void
-invala_fr (int regno)
-{
-#      define F(reg)   case reg: ia64_invala_fr(reg); break
-
-       switch (regno) {
-               F(  0); F(  1); F(  2); F(  3); F(  4); F(  5); F(  6); F(  7);
-               F(  8); F(  9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
-               F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
-               F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
-               F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
-               F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
-               F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
-               F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
-               F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
-               F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
-               F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
-               F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
-               F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
-               F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
-               F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
-               F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
-       }
-#      undef F
-}
-
-static inline unsigned long
-rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
-{
-       reg += rrb;
-       if (reg >= sor)
-               reg -= sor;
-       return reg;
-}
-
-#ifdef CONFIG_VTI
-static void
-set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
-       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-       unsigned long rnats, nat_mask;
-    unsigned long old_rsc,new_rsc;
-       unsigned long on_kbs,rnat;
-       long sof = (regs->cr_ifs) & 0x7f;
-       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-       long ridx = r1 - 32;
-
-       if (ridx >= sof) {
-               /* this should never happen, as the "rsvd register fault" has 
higher priority */
-               DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
-               return;
-       }
-
-       if (ridx < sor)
-               ridx = rotate_reg(sor, rrb_gr, ridx);
-
-    old_rsc=ia64_get_rsc();
-    new_rsc=old_rsc&(~0x3);
-    ia64_set_rsc(new_rsc);
-
-    bspstore = ia64_get_bspstore();
-    bsp =kbs + (regs->loadrs >> 19);//16+3
-
-       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
-    nat_mask = 1UL << ia64_rse_slot_num(addr);
-       rnat_addr = ia64_rse_rnat_addr(addr);
-
-    if(addr >= bspstore){
-
-        ia64_flushrs ();
-        ia64_mf ();
-               *addr = val;
-        bspstore = ia64_get_bspstore();
-       rnat = ia64_get_rnat ();
-        if(bspstore < rnat_addr){
-            rnat=rnat&(~nat_mask);
-        }else{
-            *rnat_addr = (*rnat_addr)&(~nat_mask);
-        }
-        ia64_mf();
-        ia64_loadrs();
-        ia64_set_rnat(rnat);
-    }else{
-
-       rnat = ia64_get_rnat ();
-               *addr = val;
-        if(bspstore < rnat_addr){
-            rnat=rnat&(~nat_mask);
-        }else{
-            *rnat_addr = (*rnat_addr)&(~nat_mask);
-        }
-        ia64_set_bspstore (bspstore);
-        ia64_set_rnat(rnat);
-    }
-    ia64_set_rsc(old_rsc);
-}
-
-
-static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
-       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-       unsigned long rnats, nat_mask;
-       unsigned long on_kbs;
-    unsigned long old_rsc, new_rsc;
-       long sof = (regs->cr_ifs) & 0x7f;
-       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-       long ridx = r1 - 32;
-
-       if (ridx >= sof) {
-               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
-               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
-               panic("wrong stack register number");
-       }
-
-       if (ridx < sor)
-               ridx = rotate_reg(sor, rrb_gr, ridx);
-
-    old_rsc=ia64_get_rsc();
-    new_rsc=old_rsc&(~(0x3));
-    ia64_set_rsc(new_rsc);
-
-    bspstore = ia64_get_bspstore();
-    bsp =kbs + (regs->loadrs >> 19); //16+3;
-
-       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
-    nat_mask = 1UL << ia64_rse_slot_num(addr);
-       rnat_addr = ia64_rse_rnat_addr(addr);
-
-    if(addr >= bspstore){
-
-        ia64_flushrs ();
-        ia64_mf ();
-        bspstore = ia64_get_bspstore();
-    }
-       *val=*addr;
-    if(bspstore < rnat_addr){
-        *nat=!!(ia64_get_rnat()&nat_mask);
-    }else{
-        *nat = !!((*rnat_addr)&nat_mask);
-    }
-    ia64_set_rsc(old_rsc);
-}
-#else // CONFIG_VTI
-static void
-set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
-       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-       unsigned long rnats, nat_mask;
-       unsigned long on_kbs;
-       long sof = (regs->cr_ifs) & 0x7f;
-       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-       long ridx = r1 - 32;
-
-       if (ridx >= sof) {
-               /* this should never happen, as the "rsvd register fault" has 
higher priority */
-               DPRINT("ignoring write to r%lu; only %lu registers are 
allocated!\n", r1, sof);
-               return;
-       }
-
-       if (ridx < sor)
-               ridx = rotate_reg(sor, rrb_gr, ridx);
-
-       DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld 
ridx=%ld\n",
-              r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) 
& 0x7f, ridx);
-
-       on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
-       addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + 
ridx);
-       if (addr >= kbs) {
-               /* the register is on the kernel backing store: easy... */
-               rnat_addr = ia64_rse_rnat_addr(addr);
-               if ((unsigned long) rnat_addr >= sw->ar_bspstore)
-                       rnat_addr = &sw->ar_rnat;
-               nat_mask = 1UL << ia64_rse_slot_num(addr);
-
-               *addr = val;
-               if (nat)
-                       *rnat_addr |=  nat_mask;
-               else
-                       *rnat_addr &= ~nat_mask;
-               return;
-       }
-
-       if (!user_stack(current, regs)) {
-               DPRINT("ignoring kernel write to r%lu; register isn't on the 
kernel RBS!", r1);
-               return;
-       }
-
-       bspstore = (unsigned long *)regs->ar_bspstore;
-       ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
-       bsp     = ia64_rse_skip_regs(ubs_end, -sof);
-       addr    = ia64_rse_skip_regs(bsp, ridx);
-
-       DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, 
(void *) addr);
-
-       ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, 
val);
-
-       rnat_addr = ia64_rse_rnat_addr(addr);
-
-       ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, &rnats);
-       DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
-              (void *) rnat_addr, rnats, nat, (rnats >> 
ia64_rse_slot_num(addr)) & 1);
-
-       nat_mask = 1UL << ia64_rse_slot_num(addr);
-       if (nat)
-               rnats |=  nat_mask;
-       else
-               rnats &= ~nat_mask;
-       ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, rnats);
-
-       DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
-}
-
-
-static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int 
*nat)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
-       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-       unsigned long rnats, nat_mask;
-       unsigned long on_kbs;
-       long sof = (regs->cr_ifs) & 0x7f;
-       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-       long ridx = r1 - 32;
-
-       if (ridx >= sof) {
-               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
-               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
-               goto fail;
-       }
-
-       if (ridx < sor)
-               ridx = rotate_reg(sor, rrb_gr, ridx);
-
-       DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld 
ridx=%ld\n",
-              r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) 
& 0x7f, ridx);
-
-       on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
-       addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + 
ridx);
-       if (addr >= kbs) {
-               /* the register is on the kernel backing store: easy... */
-               *val = *addr;
-               if (nat) {
-                       rnat_addr = ia64_rse_rnat_addr(addr);
-                       if ((unsigned long) rnat_addr >= sw->ar_bspstore)
-                               rnat_addr = &sw->ar_rnat;
-                       nat_mask = 1UL << ia64_rse_slot_num(addr);
-                       *nat = (*rnat_addr & nat_mask) != 0;
-               }
-               return;
-       }
-
-       if (!user_stack(current, regs)) {
-               DPRINT("ignoring kernel read of r%lu; register isn't on the 
RBS!", r1);
-               goto fail;
-       }
-
-       bspstore = (unsigned long *)regs->ar_bspstore;
-       ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
-       bsp     = ia64_rse_skip_regs(ubs_end, -sof);
-       addr    = ia64_rse_skip_regs(bsp, ridx);
-
-       DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, 
(void *) addr);
-
-       ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, 
val);
-
-       if (nat) {
-               rnat_addr = ia64_rse_rnat_addr(addr);
-               nat_mask = 1UL << ia64_rse_slot_num(addr);
-
-               DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
-
-               ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) 
rnat_addr, &rnats);
-               *nat = (rnats & nat_mask) != 0;
-       }
-       return;
-
-  fail:
-       *val = 0;
-       if (nat)
-               *nat = 0;
-       return;
-}
-#endif // CONFIG_VTI
-
-
-#ifdef XEN
-void
-#else
-static void
-#endif
-setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long addr;
-       unsigned long bitmask;
-       unsigned long *unat;
-
-       /*
-        * First takes care of stacked registers
-        */
-       if (regnum >= IA64_FIRST_STACKED_GR) {
-               set_rse_reg(regs, regnum, val, nat);
-               return;
-       }
-
-       /*
-        * Using r0 as a target raises a General Exception fault which has 
higher priority
-        * than the Unaligned Reference fault.
-        */
-
-       /*
-        * Now look at registers in [0-31] range and init correct UNAT
-        */
-       if (GR_IN_SW(regnum)) {
-               addr = (unsigned long)sw;
-               unat = &sw->ar_unat;
-       } else {
-               addr = (unsigned long)regs;
-#ifdef CONFIG_VTI
-               unat = &regs->eml_unat;
-#else //CONFIG_VTI
-               unat = &sw->caller_unat;
-#endif  //CONFIG_VTI
-       }
-       DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
-              addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
-       /*
-        * add offset from base of struct
-        * and do it !
-        */
-       addr += GR_OFFS(regnum);
-
-       *(unsigned long *)addr = val;
-
-       /*
-        * We need to clear the corresponding UNAT bit to fully emulate the load
-        * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
-        */
-       bitmask   = 1UL << (addr >> 3 & 0x3f);
-       DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void 
*) unat, *unat);
-       if (nat) {
-               *unat |= bitmask;
-       } else {
-               *unat &= ~bitmask;
-       }
-       DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void 
*) unat,*unat);
-}
-
-/*
- * Return the (rotated) index for floating point register REGNUM (REGNUM must 
be in the
- * range from 32-127, result is in the range from 0-95.
- */
-static inline unsigned long
-fph_index (struct pt_regs *regs, long regnum)
-{
-       unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
-       return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
-}
-
-static void
-setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
-       struct switch_stack *sw = (struct switch_stack *)regs - 1;
-       unsigned long addr;
-
-       /*
-        * From EAS-2.5: FPDisableFault has higher priority than Unaligned
-        * Fault. Thus, when we get here, we know the partition is enabled.
-        * To update f32-f127, there are three choices:
-        *
-        *      (1) save f32-f127 to thread.fph and update the values there
-        *      (2) use a gigantic switch statement to directly access the 
registers
-        *      (3) generate code on the fly to update the desired register
-        *
-        * For now, we are using approach (1).
-        */
-       if (regnum >= IA64_FIRST_ROTATING_FR) {
-               ia64_sync_fph(current);
-#ifdef XEN
-               current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
-#else
-               current->thread.fph[fph_index(regs, regnum)] = *fpval;
-#endif
-       } else {
-               /*
-                * pt_regs or switch_stack ?
-                */
-               if (FR_IN_SW(regnum)) {
-                       addr = (unsigned long)sw;
-               } else {
-                       addr = (unsigned long)regs;
-               }
-
-               DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
-
-               addr += FR_OFFS(regnum);
-               *(struct ia64_fpreg *)addr = *fpval;
-
-               /*
-                * mark the low partition as being used now
-                *
-                * It is highly unlikely that this bit is not already set, but
-                * let's do it for safety.
-                */
-               regs->cr_ipsr |= IA64_PSR_MFL;
-       }
-}
-
-/*
- * Those 2 inline functions generate the spilled versions of the constant 
floating point
- * registers which can be used with stfX
- */
-static inline void
-float_spill_f0 (struct ia64_fpreg *final)
-{
-       ia64_stf_spill(final, 0);
-}
-
-static inline void
-float_spill_f1 (struct ia64_fpreg *final)
-{
-       ia64_stf_spill(final, 1);
-}
-
-static void
-getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long addr;
-
-       /*
-        * From EAS-2.5: FPDisableFault has higher priority than
-        * Unaligned Fault. Thus, when we get here, we know the partition is
-        * enabled.
-        *
-        * When regnum > 31, the register is still live and we need to force a 
save
-        * to current->thread.fph to get access to it.  See discussion in 
setfpreg()
-        * for reasons and other ways of doing this.
-        */
-       if (regnum >= IA64_FIRST_ROTATING_FR) {
-               ia64_flush_fph(current);
-#ifdef XEN
-               *fpval = current->arch._thread.fph[fph_index(regs, regnum)];
-#else
-               *fpval = current->thread.fph[fph_index(regs, regnum)];
-#endif
-       } else {
-               /*
-                * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
-                * not saved, we must generate their spilled form on the fly
-                */
-               switch(regnum) {
-               case 0:
-                       float_spill_f0(fpval);
-                       break;
-               case 1:
-                       float_spill_f1(fpval);
-                       break;
-               default:
-                       /*
-                        * pt_regs or switch_stack ?
-                        */
-                       addr =  FR_IN_SW(regnum) ? (unsigned long)sw
-                                                : (unsigned long)regs;
-
-                       DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
-                              FR_IN_SW(regnum), addr, FR_OFFS(regnum));
-
-                       addr  += FR_OFFS(regnum);
-                       *fpval = *(struct ia64_fpreg *)addr;
-               }
-       }
-}
-
-
-#ifdef XEN
-void
-#else
-static void
-#endif
-getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs 
*regs)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long addr, *unat;
-
-       if (regnum >= IA64_FIRST_STACKED_GR) {
-               get_rse_reg(regs, regnum, val, nat);
-               return;
-       }
-
-       /*
-        * take care of r0 (read-only always evaluate to 0)
-        */
-       if (regnum == 0) {
-               *val = 0;
-               if (nat)
-                       *nat = 0;
-               return;
-       }
-
-       /*
-        * Now look at registers in [0-31] range and init correct UNAT
-        */
-       if (GR_IN_SW(regnum)) {
-               addr = (unsigned long)sw;
-               unat = &sw->ar_unat;
-       } else {
-               addr = (unsigned long)regs;
-#ifdef  CONFIG_VTI
-               unat = &regs->eml_unat;;
-#else   //CONFIG_VTI
-               unat = &sw->caller_unat;
-#endif  //CONFIG_VTI
-       }
-
-       DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
-
-       addr += GR_OFFS(regnum);
-
-       *val  = *(unsigned long *)addr;
-
-       /*
-        * do it only when requested
-        */
-       if (nat)
-               *nat  = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
-}
-
-static void
-emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, 
unsigned long ifa)
-{
-       /*
-        * IMPORTANT:
-        * Given the way we handle unaligned speculative loads, we should
-        * not get to this point in the code but we keep this sanity check,
-        * just in case.
-        */
-       if (ld.x6_op == 1 || ld.x6_op == 3) {
-               printk(KERN_ERR "%s: register update on speculative load, 
error\n", __FUNCTION__);
-               die_if_kernel("unaligned reference on speculative load with 
register update\n",
-                             regs, 30);
-       }
-
-
-       /*
-        * at this point, we know that the base register to update is valid 
i.e.,
-        * it's not r0
-        */
-       if (type == UPD_IMMEDIATE) {
-               unsigned long imm;
-
-               /*
-                * Load +Imm: ldXZ r1=[r3],imm(9)
-                *
-                *
-                * form imm9: [13:19] contain the first 7 bits
-                */
-               imm = ld.x << 7 | ld.imm;
-
-               /*
-                * sign extend (1+8bits) if m set
-                */
-               if (ld.m) imm |= SIGN_EXT9;
-
-               /*
-                * ifa == r3 and we know that the NaT bit on r3 was clear so
-                * we can directly use ifa.
-                */
-               ifa += imm;
-
-               setreg(ld.r3, ifa, 0, regs);
-
-               DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, 
ifa);
-
-       } else if (ld.m) {
-               unsigned long r2;
-               int nat_r2;
-
-               /*
-                * Load +Reg Opcode: ldXZ r1=[r3],r2
-                *
-                * Note: that we update r3 even in the case of ldfX.a
-                * (where the load does not happen)
-                *
-                * The way the load algorithm works, we know that r3 does not
-                * have its NaT bit set (would have gotten NaT consumption
-                * before getting the unaligned fault). So we can use ifa
-                * which equals r3 at this point.
-                *
-                * IMPORTANT:
-                * The above statement holds ONLY because we know that we
-                * never reach this code when trying to do a ldX.s.
-                * If we ever make it to here on an ldfX.s then
-                */
-               getreg(ld.imm, &r2, &nat_r2, regs);
-
-               ifa += r2;
-
-               /*
-                * propagate Nat r2 -> r3
-                */
-               setreg(ld.r3, ifa, nat_r2, regs);
-
-               DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, 
nat_r2);
-       }
-}
-
-
-static int
-emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
-       unsigned int len = 1 << ld.x6_sz;
-       unsigned long val = 0;
-
-       /*
-        * r0, as target, doesn't need to be checked because Illegal Instruction
-        * faults have higher priority than unaligned faults.
-        *
-        * r0 cannot be found as the base as it would never generate an
-        * unaligned reference.
-        */
-
-       /*
-        * ldX.a we will emulate load and also invalidate the ALAT entry.
-        * See comment below for explanation on how we handle ldX.a
-        */
-
-       if (len != 2 && len != 4 && len != 8) {
-               DPRINT("unknown size: x6=%d\n", ld.x6_sz);
-               return -1;
-       }
-       /* this assumes little-endian byte-order: */
-       if (copy_from_user(&val, (void __user *) ifa, len))
-               return -1;
-       setreg(ld.r1, val, 0, regs);
-
-       /*
-        * check for updates on any kind of loads
-        */
-       if (ld.op == 0x5 || ld.m)
-               emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, 
regs, ifa);
-
-       /*
-        * handling of various loads (based on EAS2.4):
-        *
-        * ldX.acq (ordered load):
-        *      - acquire semantics would have been used, so force fence 
instead.
-        *
-        * ldX.c.clr (check load and clear):
-        *      - if we get to this handler, it's because the entry was not in 
the ALAT.
-        *        Therefore the operation reverts to a normal load
-        *
-        * ldX.c.nc (check load no clear):
-        *      - same as previous one
-        *
-        * ldX.c.clr.acq (ordered check load and clear):
-        *      - same as above for c.clr part. The load needs to have acquire 
semantics. So
-        *        we use the fence semantics which is stronger and thus ensures 
correctness.
-        *
-        * ldX.a (advanced load):
-        *      - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's 
because the
-        *        address doesn't match requested size alignment. This means 
that we would
-        *        possibly need more than one load to get the result.
-        *
-        *        The load part can be handled just like a normal load, however 
the difficult
-        *        part is to get the right thing into the ALAT. The critical 
piece of information
-        *        in the base address of the load & size. To do that, a ld.a 
must be executed,
-        *        clearly any address can be pushed into the table by using 
ld1.a r1=[r3]. Now
-        *        if we use the same target register, we will be okay for the 
check.a instruction.
-        *        If we look at the store, basically a stX [r3]=r1 checks the 
ALAT  for any entry
-        *        which would overlap within [r3,r3+X] (the size of the load 
was store in the
-        *        ALAT). If such an entry is found the entry is invalidated. 
But this is not good
-        *        enough, take the following example:
-        *              r3=3
-        *              ld4.a r1=[r3]
-        *
-        *        Could be emulated by doing:
-        *              ld1.a r1=[r3],1
-        *              store to temporary;
-        *              ld1.a r1=[r3],1
-        *              store & shift to temporary;
-        *              ld1.a r1=[r3],1
-        *              store & shift to temporary;
-        *              ld1.a r1=[r3]
-        *              store & shift to temporary;
-        *              r1=temporary
-        *
-        *        So in this case, you would get the right value is r1 but the 
wrong info in
-        *        the ALAT.  Notice that you could do it in reverse to finish 
with address 3
-        *        but you would still get the size wrong.  To get the size 
right, one needs to
-        *        execute exactly the same kind of load. You could do it from a 
aligned
-        *        temporary location, but you would get the address wrong.
-        *
-        *        So no matter what, it is not possible to emulate an advanced 
load
-        *        correctly. But is that really critical ?
-        *
-        *        We will always convert ld.a into a normal load with ALAT 
invalidated.  This
-        *        will enable compiler to do optimization where certain code 
path after ld.a
-        *        is not required to have ld.c/chk.a, e.g., code path with no 
intervening stores.
-        *
-        *        If there is a store after the advanced load, one must either 
do a ld.c.* or
-        *        chk.a.* to reuse the value stored in the ALAT. Both can 
"fail" (meaning no
-        *        entry found in ALAT), and that's perfectly ok because:
-        *
-        *              - ld.c.*, if the entry is not present a  normal load is 
executed
-        *              - chk.a.*, if the entry is not present, execution jumps 
to recovery code
-        *
-        *        In either case, the load can be potentially retried in 
another form.
-        *
-        *        ALAT must be invalidated for the register (so that chk.a or 
ld.c don't pick
-        *        up a stale entry later). The register base update MUST also 
be performed.
-        */
-
-       /*
-        * when the load has the .acq completer then
-        * use ordering fence.
-        */
-       if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
-               mb();
-
-       /*
-        * invalidate ALAT entry in case of advanced load
-        */
-       if (ld.x6_op == 0x2)
-               invala_gr(ld.r1);
-
-       return 0;
-}
-
-static int
-emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
-       unsigned long r2;
-       unsigned int len = 1 << ld.x6_sz;
-
-       /*
-        * if we get to this handler, Nat bits on both r3 and r2 have already
-        * been checked. so we don't need to do it
-        *
-        * extract the value to be stored
-        */
-       getreg(ld.imm, &r2, NULL, regs);
-
-       /*
-        * we rely on the macros in unaligned.h for now i.e.,
-        * we let the compiler figure out how to read memory gracefully.
-        *
-        * We need this switch/case because the way the inline function
-        * works. The code is optimized by the compiler and looks like
-        * a single switch/case.
-        */
-       DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
-
-       if (len != 2 && len != 4 && len != 8) {
-               DPRINT("unknown size: x6=%d\n", ld.x6_sz);
-               return -1;
-       }
-
-       /* this assumes little-endian byte-order: */
-       if (copy_to_user((void __user *) ifa, &r2, len))
-               return -1;
-
-       /*
-        * stX [r3]=r2,imm(9)
-        *
-        * NOTE:
-        * ld.r3 can never be r0, because r0 would not generate an
-        * unaligned access.
-        */
-       if (ld.op == 0x5) {
-               unsigned long imm;
-
-               /*
-                * form imm9: [12:6] contain first 7bits
-                */
-               imm = ld.x << 7 | ld.r1;
-               /*
-                * sign extend (8bits) if m set
-                */
-               if (ld.m) imm |= SIGN_EXT9;
-               /*
-                * ifa == r3 (NaT is necessarily cleared)
-                */
-               ifa += imm;
-
-               DPRINT("imm=%lx r3=%lx\n", imm, ifa);
-
-               setreg(ld.r3, ifa, 0, regs);
-       }
-       /*
-        * we don't have alat_invalidate_multiple() so we need
-        * to do the complete flush :-<<
-        */
-       ia64_invala();
-
-       /*
-        * stX.rel: use fence instead of release
-        */
-       if (ld.x6_op == 0xd)
-               mb();
-
-       return 0;
-}
-
-/*
- * floating point operations sizes in bytes
- */
-static const unsigned char float_fsz[4]={
-       10, /* extended precision (e) */
-       8,  /* integer (8)            */
-       4,  /* single precision (s)   */
-       8   /* double precision (d)   */
-};
-
-static inline void
-mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldfe(6, init);
-       ia64_stop();
-       ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldf8(6, init);
-       ia64_stop();
-       ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldfs(6, init);
-       ia64_stop();
-       ia64_stf_spill(final, 6);
-}
-
-static inline void
-mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldfd(6, init);
-       ia64_stop();
-       ia64_stf_spill(final, 6);
-}
-
-static inline void
-float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldf_fill(6, init);
-       ia64_stop();
-       ia64_stfe(final, 6);
-}
-
-static inline void
-float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldf_fill(6, init);
-       ia64_stop();
-       ia64_stf8(final, 6);
-}
-
-static inline void
-float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldf_fill(6, init);
-       ia64_stop();
-       ia64_stfs(final, 6);
-}
-
-static inline void
-float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
-{
-       ia64_ldf_fill(6, init);
-       ia64_stop();
-       ia64_stfd(final, 6);
-}
-
-static int
-emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs 
*regs)
-{
-       struct ia64_fpreg fpr_init[2];
-       struct ia64_fpreg fpr_final[2];
-       unsigned long len = float_fsz[ld.x6_sz];
-
-       /*
-        * fr0 & fr1 don't need to be checked because Illegal Instruction 
faults have
-        * higher priority than unaligned faults.
-        *
-        * r0 cannot be found as the base as it would never generate an 
unaligned
-        * reference.
-        */
-
-       /*
-        * make sure we get clean buffers
-        */
-       memset(&fpr_init, 0, sizeof(fpr_init));
-       memset(&fpr_final, 0, sizeof(fpr_final));
-
-       /*
-        * ldfpX.a: we don't try to emulate anything but we must
-        * invalidate the ALAT entry and execute updates, if any.
-        */
-       if (ld.x6_op != 0x2) {
-               /*
-                * This assumes little-endian byte-order.  Note that there is 
no "ldfpe"
-                * instruction:
-                */
-               if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
-                   || copy_from_user(&fpr_init[1], (void __user *) (ifa + 
len), len))
-                       return -1;
-
-               DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, 
ld.x6_sz);
-               DDUMP("frp_init =", &fpr_init, 2*len);
-               /*
-                * XXX fixme
-                * Could optimize inlines by using ldfpX & 2 spills
-                */
-               switch( ld.x6_sz ) {
-                       case 0:
-                               mem2float_extended(&fpr_init[0], &fpr_final[0]);
-                               mem2float_extended(&fpr_init[1], &fpr_final[1]);
-                               break;
-                       case 1:
-                               mem2float_integer(&fpr_init[0], &fpr_final[0]);
-                               mem2float_integer(&fpr_init[1], &fpr_final[1]);
-                               break;
-                       case 2:
-                               mem2float_single(&fpr_init[0], &fpr_final[0]);
-                               mem2float_single(&fpr_init[1], &fpr_final[1]);
-                               break;
-                       case 3:
-                               mem2float_double(&fpr_init[0], &fpr_final[0]);
-                               mem2float_double(&fpr_init[1], &fpr_final[1]);
-                               break;
-               }
-               DDUMP("fpr_final =", &fpr_final, 2*len);
-               /*
-                * XXX fixme
-                *
-                * A possible optimization would be to drop fpr_final and 
directly
-                * use the storage from the saved context i.e., the actual final
-                * destination (pt_regs, switch_stack or thread structure).
-                */
-               setfpreg(ld.r1, &fpr_final[0], regs);
-               setfpreg(ld.imm, &fpr_final[1], regs);
-       }
-
-       /*
-        * Check for updates: only immediate updates are available for this
-        * instruction.
-        */
-       if (ld.m) {
-               /*
-                * the immediate is implicit given the ldsz of the operation:
-                * single: 8 (2x4) and for  all others it's 16 (2x8)
-                */
-               ifa += len<<1;
-
-               /*
-                * IMPORTANT:
-                * the fact that we force the NaT of r3 to zero is ONLY valid
-                * as long as we don't come here with a ldfpX.s.
-                * For this reason we keep this sanity check
-                */
-               if (ld.x6_op == 1 || ld.x6_op == 3)
-                       printk(KERN_ERR "%s: register update on speculative 
load pair, error\n",
-                              __FUNCTION__);
-
-               setreg(ld.r3, ifa, 0, regs);
-       }
-
-       /*
-        * Invalidate ALAT entries, if any, for both registers.
-        */
-       if (ld.x6_op == 0x2) {
-               invala_fr(ld.r1);
-               invala_fr(ld.imm);
-       }
-       return 0;
-}
-
-
-static int
-emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
-       struct ia64_fpreg fpr_init;
-       struct ia64_fpreg fpr_final;
-       unsigned long len = float_fsz[ld.x6_sz];
-
-       /*
-        * fr0 & fr1 don't need to be checked because Illegal Instruction
-        * faults have higher priority than unaligned faults.
-        *
-        * r0 cannot be found as the base as it would never generate an
-        * unaligned reference.
-        */
-
-       /*
-        * make sure we get clean buffers
-        */
-       memset(&fpr_init,0, sizeof(fpr_init));
-       memset(&fpr_final,0, sizeof(fpr_final));
-
-       /*
-        * ldfX.a we don't try to emulate anything but we must
-        * invalidate the ALAT entry.
-        * See comments in ldX for descriptions on how the various loads are 
handled.
-        */
-       if (ld.x6_op != 0x2) {
-               if (copy_from_user(&fpr_init, (void __user *) ifa, len))
-                       return -1;
-
-               DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
-               DDUMP("fpr_init =", &fpr_init, len);
-               /*
-                * we only do something for x6_op={0,8,9}
-                */
-               switch( ld.x6_sz ) {
-                       case 0:
-                               mem2float_extended(&fpr_init, &fpr_final);
-                               break;
-                       case 1:
-                               mem2float_integer(&fpr_init, &fpr_final);
-                               break;
-                       case 2:
-                               mem2float_single(&fpr_init, &fpr_final);
-                               break;
-                       case 3:
-                               mem2float_double(&fpr_init, &fpr_final);
-                               break;
-               }
-               DDUMP("fpr_final =", &fpr_final, len);
-               /*
-                * XXX fixme
-                *
-                * A possible optimization would be to drop fpr_final and 
directly
-                * use the storage from the saved context i.e., the actual final
-                * destination (pt_regs, switch_stack or thread structure).
-                */
-               setfpreg(ld.r1, &fpr_final, regs);
-       }
-
-       /*
-        * check for updates on any loads
-        */
-       if (ld.op == 0x7 || ld.m)
-               emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, 
regs, ifa);
-
-       /*
-        * invalidate ALAT entry in case of advanced floating point loads
-        */
-       if (ld.x6_op == 0x2)
-               invala_fr(ld.r1);
-
-       return 0;
-}
-
-
-static int
-emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
-{
-       struct ia64_fpreg fpr_init;
-       struct ia64_fpreg fpr_final;
-       unsigned long len = float_fsz[ld.x6_sz];
-
-       /*
-        * make sure we get clean buffers
-        */
-       memset(&fpr_init,0, sizeof(fpr_init));
-       memset(&fpr_final,0, sizeof(fpr_final));
-
-       /*
-        * if we get to this handler, Nat bits on both r3 and r2 have already
-        * been checked. so we don't need to do it
-        *
-        * extract the value to be stored
-        */
-       getfpreg(ld.imm, &fpr_init, regs);
-       /*
-        * during this step, we extract the spilled registers from the saved
-        * context i.e., we refill. Then we store (no spill) to temporary
-        * aligned location
-        */
-       switch( ld.x6_sz ) {
-               case 0:
-                       float2mem_extended(&fpr_init, &fpr_final);
-                       break;
-               case 1:
-                       float2mem_integer(&fpr_init, &fpr_final);
-                       break;
-               case 2:
-                       float2mem_single(&fpr_init, &fpr_final);
-                       break;
-               case 3:
-                       float2mem_double(&fpr_init, &fpr_final);
-                       break;
-       }
-       DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
-       DDUMP("fpr_init =", &fpr_init, len);
-       DDUMP("fpr_final =", &fpr_final, len);
-
-       if (copy_to_user((void __user *) ifa, &fpr_final, len))
-               return -1;
-
-       /*
-        * stfX [r3]=r2,imm(9)
-        *
-        * NOTE:
-        * ld.r3 can never be r0, because r0 would not generate an
-        * unaligned access.
-        */
-       if (ld.op == 0x7) {
-               unsigned long imm;
-
-               /*
-                * form imm9: [12:6] contain first 7bits
-                */
-               imm = ld.x << 7 | ld.r1;
-               /*
-                * sign extend (8bits) if m set
-                */
-               if (ld.m)
-                       imm |= SIGN_EXT9;
-               /*
-                * ifa == r3 (NaT is necessarily cleared)
-                */
-               ifa += imm;
-
-               DPRINT("imm=%lx r3=%lx\n", imm, ifa);
-
-               setreg(ld.r3, ifa, 0, regs);
-       }
-       /*
-        * we don't have alat_invalidate_multiple() so we need
-        * to do the complete flush :-<<
-        */
-       ia64_invala();
-
-       return 0;
-}
-
-/*
- * Make sure we log the unaligned access, so that user/sysadmin can notice it 
and
- * eventually fix the program.  However, we don't want to do that for every 
access so we
- * pace it with jiffies.  This isn't really MP-safe, but it doesn't really 
have to be
- * either...
- */
-static int
-within_logging_rate_limit (void)
-{
-       static unsigned long count, last_time;
-
-       if (jiffies - last_time > 5*HZ)
-               count = 0;
-       if (++count < 5) {
-               last_time = jiffies;
-               return 1;
-       }
-       return 0;
-
-}
-
-void
-ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
-{
-#ifdef XEN
-printk("ia64_handle_unaligned: called, not working yet\n");
-#else
-       struct ia64_psr *ipsr = ia64_psr(regs);
-       mm_segment_t old_fs = get_fs();
-       unsigned long bundle[2];
-       unsigned long opcode;
-       struct siginfo si;
-       const struct exception_table_entry *eh = NULL;
-       union {
-               unsigned long l;
-               load_store_t insn;
-       } u;
-       int ret = -1;
-
-       if (ia64_psr(regs)->be) {
-               /* we don't support big-endian accesses */
-               die_if_kernel("big-endian unaligned accesses are not 
supported", regs, 0);
-               goto force_sigbus;
-       }
-
-       /*
-        * Treat kernel accesses for which there is an exception handler entry 
the same as
-        * user-level unaligned accesses.  Otherwise, a clever program could 
trick this
-        * handler into reading an arbitrary kernel addresses...
-        */
-       if (!user_mode(regs))
-               eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
-       if (user_mode(regs) || eh) {
-               if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
-                       goto force_sigbus;
-
-               if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
-                   && within_logging_rate_limit())
-               {
-                       char buf[200];  /* comm[] is at most 16 bytes... */
-                       size_t len;
-
-                       len = sprintf(buf, "%s(%d): unaligned access to 
0x%016lx, "
-                                     "ip=0x%016lx\n\r", current->comm, 
current->pid,
-                                     ifa, regs->cr_iip + ipsr->ri);
-                       /*
-                        * Don't call tty_write_message() if we're in the 
kernel; we might
-                        * be holding locks...
-                        */
-                       if (user_mode(regs))
-                               tty_write_message(current->signal->tty, buf);
-                       buf[len-1] = '\0';      /* drop '\r' */
-                       printk(KERN_WARNING "%s", buf); /* watch for command 
names containing %s */
-               }
-       } else {
-               if (within_logging_rate_limit())
-                       printk(KERN_WARNING "kernel unaligned access to 
0x%016lx, ip=0x%016lx\n",
-                              ifa, regs->cr_iip + ipsr->ri);
-               set_fs(KERNEL_DS);
-       }
-
-       DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
-              regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
-
-       if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
-               goto failure;
-
-       /*
-        * extract the instruction from the bundle given the slot number
-        */
-       switch (ipsr->ri) {
-             case 0: u.l = (bundle[0] >>  5); break;
-             case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
-             case 2: u.l = (bundle[1] >> 23); break;
-       }
-       opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
-
-       DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d 
ld.hint=%d "
-              "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, 
u.insn.imm,
-              u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, 
u.insn.op);
-
-       /*
-        * IMPORTANT:
-        * Notice that the switch statement DOES not cover all possible 
instructions
-        * that DO generate unaligned references. This is made on purpose 
because for some
-        * instructions it DOES NOT make sense to try and emulate the access. 
Sometimes it
-        * is WRONG to try and emulate. Here is a list of instruction we don't 
emulate i.e.,
-        * the program will get a signal and die:
-        *
-        *      load/store:
-        *              - ldX.spill
-        *              - stX.spill
-        *      Reason: RNATs are based on addresses
-        *
-        *      synchronization:
-        *              - cmpxchg
-        *              - fetchadd
-        *              - xchg
-        *      Reason: ATOMIC operations cannot be emulated properly using 
multiple
-        *              instructions.
-        *
-        *      speculative loads:
-        *              - ldX.sZ
-        *      Reason: side effects, code must be ready to deal with failure 
so simpler
-        *              to let the load fail.
-        * 
---------------------------------------------------------------------------------
-        * XXX fixme
-        *
-        * I would like to get rid of this switch case and do something
-        * more elegant.
-        */
-       switch (opcode) {
-             case LDS_OP:
-             case LDSA_OP:
-             case LDS_IMM_OP:
-             case LDSA_IMM_OP:
-             case LDFS_OP:
-             case LDFSA_OP:
-             case LDFS_IMM_OP:
-               /*
-                * The instruction will be retried with deferred exceptions 
turned on, and
-                * we should get Nat bit installed
-                *
-                * IMPORTANT: When PSR_ED is set, the register & immediate 
update forms
-                * are actually executed even though the operation failed. So 
we don't
-                * need to take care of this.
-                */
-               DPRINT("forcing PSR_ED\n");
-               regs->cr_ipsr |= IA64_PSR_ED;
-               goto done;
-
-             case LD_OP:
-             case LDA_OP:
-             case LDBIAS_OP:
-             case LDACQ_OP:
-             case LDCCLR_OP:
-             case LDCNC_OP:
-             case LDCCLRACQ_OP:
-             case LD_IMM_OP:
-             case LDA_IMM_OP:
-             case LDBIAS_IMM_OP:
-             case LDACQ_IMM_OP:
-             case LDCCLR_IMM_OP:
-             case LDCNC_IMM_OP:
-             case LDCCLRACQ_IMM_OP:
-               ret = emulate_load_int(ifa, u.insn, regs);
-               break;
-
-             case ST_OP:
-             case STREL_OP:
-             case ST_IMM_OP:
-             case STREL_IMM_OP:
-               ret = emulate_store_int(ifa, u.insn, regs);
-               break;
-
-             case LDF_OP:
-             case LDFA_OP:
-             case LDFCCLR_OP:
-             case LDFCNC_OP:
-             case LDF_IMM_OP:
-             case LDFA_IMM_OP:
-             case LDFCCLR_IMM_OP:
-             case LDFCNC_IMM_OP:
-               if (u.insn.x)
-                       ret = emulate_load_floatpair(ifa, u.insn, regs);
-               else
-                       ret = emulate_load_float(ifa, u.insn, regs);
-               break;
-
-             case STF_OP:
-             case STF_IMM_OP:
-               ret = emulate_store_float(ifa, u.insn, regs);
-               break;
-
-             default:
-               goto failure;
-       }
-       DPRINT("ret=%d\n", ret);
-       if (ret)
-               goto failure;
-
-       if (ipsr->ri == 2)
-               /*
-                * given today's architecture this case is not likely to happen 
because a
-                * memory access instruction (M) can never be in the last slot 
of a
-                * bundle. But let's keep it for now.
-                */
-               regs->cr_iip += 16;
-       ipsr->ri = (ipsr->ri + 1) & 0x3;
-
-       DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
-  done:
-       set_fs(old_fs);         /* restore original address limit */
-       return;
-
-  failure:
-       /* something went wrong... */
-       if (!user_mode(regs)) {
-               if (eh) {
-                       ia64_handle_exception(regs, eh);
-                       goto done;
-               }
-               die_if_kernel("error during unaligned kernel access\n", regs, 
ret);
-               /* NOT_REACHED */
-       }
-  force_sigbus:
-       si.si_signo = SIGBUS;
-       si.si_errno = 0;
-       si.si_code = BUS_ADRALN;
-       si.si_addr = (void __user *) ifa;
-       si.si_flags = 0;
-       si.si_isr = 0;
-       si.si_imm = 0;
-       force_sig_info(SIGBUS, &si, current);
-       goto done;
-#endif
-}
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/gcc_intrin.h
--- a/xen/include/asm-ia64/gcc_intrin.h Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,657 +0,0 @@
-#ifndef _ASM_IA64_GCC_INTRIN_H
-#define _ASM_IA64_GCC_INTRIN_H
-/*
- *
- * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@xxxxxxxxx>
- * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
- */
-
-#include <linux/compiler.h>
-
-/* define this macro to get some asm stmts included in 'c' files */
-#define ASM_SUPPORTED
-
-/* Optimization barrier */
-/* The "volatile" is due to gcc bugs */
-#define ia64_barrier() asm volatile ("":::"memory")
-
-#define ia64_stop()    asm volatile (";;"::)
-
-#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
-
-#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
-
-extern void ia64_bad_param_for_setreg (void);
-extern void ia64_bad_param_for_getreg (void);
-
-register unsigned long ia64_r13 asm ("r13") __attribute_used__;
-
-#define ia64_setreg(regnum, val)                                               
\
-({                                                                             
\
-       switch (regnum) {                                                       
\
-           case _IA64_REG_PSR_L:                                               
\
-                   asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");       
\
-                   break;                                                      
\
-           case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:                          
\
-                   asm volatile ("mov ar%0=%1" ::                              
\
-                                         "i" (regnum - _IA64_REG_AR_KR0),      
\
-                                         "r"(val): "memory");                  
\
-                   break;                                                      
\
-           case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:                        
\
-                   asm volatile ("mov cr%0=%1" ::                              
\
-                                         "i" (regnum - _IA64_REG_CR_DCR),      
\
-                                         "r"(val): "memory" );                 
\
-                   break;                                                      
\
-           case _IA64_REG_SP:                                                  
\
-                   asm volatile ("mov r12=%0" ::                               
\
-                                         "r"(val): "memory");                  
\
-                   break;                                                      
\
-           case _IA64_REG_GP:                                                  
\
-                   asm volatile ("mov gp=%0" :: "r"(val) : "memory");          
\
-               break;                                                          
\
-           default:                                                            
\
-                   ia64_bad_param_for_setreg();                                
\
-                   break;                                                      
\
-       }                                                                       
\
-})
-
-#define ia64_getreg(regnum)                                                    
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-                                                                               
\
-       switch (regnum) {                                                       
\
-       case _IA64_REG_GP:                                                      
\
-               asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));              
\
-               break;                                                          
\
-       case _IA64_REG_IP:                                                      
\
-               asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));              
\
-               break;                                                          
\
-       case _IA64_REG_PSR:                                                     
\
-               asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));             
\
-               break;                                                          
\
-       case _IA64_REG_TP:      /* for current() */                             
\
-               ia64_intri_res = ia64_r13;                                      
\
-               break;                                                          
\
-       case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:                              
\
-               asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)             
\
-                                     : "i"(regnum - _IA64_REG_AR_KR0));        
\
-               break;                                                          
\
-       case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:                            
\
-               asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)             
\
-                                     : "i" (regnum - _IA64_REG_CR_DCR));       
\
-               break;                                                          
\
-       case _IA64_REG_SP:                                                      
\
-               asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));             
\
-               break;                                                          
\
-       default:                                                                
\
-               ia64_bad_param_for_getreg();                                    
\
-               break;                                                          
\
-       }                                                                       
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_hint_pause 0
-
-#define ia64_hint(mode)                                                \
-({                                                             \
-       switch (mode) {                                         \
-       case ia64_hint_pause:                                   \
-               asm volatile ("hint @pause" ::: "memory");      \
-               break;                                          \
-       }                                                       \
-})
-
-
-/* Integer values for mux1 instruction */
-#define ia64_mux1_brcst 0
-#define ia64_mux1_mix   8
-#define ia64_mux1_shuf  9
-#define ia64_mux1_alt  10
-#define ia64_mux1_rev  11
-
-#define ia64_mux1(x, mode)                                                     
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-                                                                               
\
-       switch (mode) {                                                         
\
-       case ia64_mux1_brcst:                                                   
\
-               asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));    
\
-               break;                                                          
\
-       case ia64_mux1_mix:                                                     
\
-               asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));      
\
-               break;                                                          
\
-       case ia64_mux1_shuf:                                                    
\
-               asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));     
\
-               break;                                                          
\
-       case ia64_mux1_alt:                                                     
\
-               asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));      
\
-               break;                                                          
\
-       case ia64_mux1_rev:                                                     
\
-               asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));      
\
-               break;                                                          
\
-       }                                                                       
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_popcnt(x)                                         \
-({                                                             \
-       __u64 ia64_intri_res;                                   \
-       asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
-                                                               \
-       ia64_intri_res;                                         \
-})
-
-#define ia64_getf_exp(x)                                       \
-({                                                             \
-       long ia64_intri_res;                                    \
-                                                               \
-       asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
-                                                               \
-       ia64_intri_res;                                         \
-})
-
-#define ia64_shrp(a, b, count)                                                 
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), 
"i"(count));   \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_ldfs(regnum, x)                                   \
-({                                                             \
-       register double __f__ asm ("f"#regnum);                 \
-       asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));     \
-})
-
-#define ia64_ldfd(regnum, x)                                   \
-({                                                             \
-       register double __f__ asm ("f"#regnum);                 \
-       asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));     \
-})
-
-#define ia64_ldfe(regnum, x)                                   \
-({                                                             \
-       register double __f__ asm ("f"#regnum);                 \
-       asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));     \
-})
-
-#define ia64_ldf8(regnum, x)                                   \
-({                                                             \
-       register double __f__ asm ("f"#regnum);                 \
-       asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));     \
-})
-
-#define ia64_ldf_fill(regnum, x)                               \
-({                                                             \
-       register double __f__ asm ("f"#regnum);                 \
-       asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
-})
-
-#define ia64_stfs(x, regnum)                                           \
-({                                                                     \
-       register double __f__ asm ("f"#regnum);                         \
-       asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stfd(x, regnum)                                           \
-({                                                                     \
-       register double __f__ asm ("f"#regnum);                         \
-       asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stfe(x, regnum)                                           \
-({                                                                     \
-       register double __f__ asm ("f"#regnum);                         \
-       asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stf8(x, regnum)                                           \
-({                                                                     \
-       register double __f__ asm ("f"#regnum);                         \
-       asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define ia64_stf_spill(x, regnum)                                              
\
-({                                                                             
\
-       register double __f__ asm ("f"#regnum);                                 
\
-       asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");    
\
-})
-
-#define ia64_fetchadd4_acq(p, inc)                                             
\
-({                                                                             
\
-                                                                               
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("fetchadd4.acq %0=[%1],%2"                                
\
-                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
-                               : "memory");                                    
\
-                                                                               
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_fetchadd4_rel(p, inc)                                             
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("fetchadd4.rel %0=[%1],%2"                                
\
-                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
-                               : "memory");                                    
\
-                                                                               
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_fetchadd8_acq(p, inc)                                             
\
-({                                                                             
\
-                                                                               
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("fetchadd8.acq %0=[%1],%2"                                
\
-                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
-                               : "memory");                                    
\
-                                                                               
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_fetchadd8_rel(p, inc)                                             
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("fetchadd8.rel %0=[%1],%2"                                
\
-                               : "=r"(ia64_intri_res) : "r"(p), "i" (inc)      
\
-                               : "memory");                                    
\
-                                                                               
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_xchg1(ptr,x)                                                      
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("xchg1 %0=[%1],%2"                                        
\
-                     : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); 
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_xchg2(ptr,x)                                              \
-({                                                                     \
-       __u64 ia64_intri_res;                                           \
-       asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)        \
-                     : "r" (ptr), "r" (x) : "memory");                 \
-       ia64_intri_res;                                                 \
-})
-
-#define ia64_xchg4(ptr,x)                                              \
-({                                                                     \
-       __u64 ia64_intri_res;                                           \
-       asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)        \
-                     : "r" (ptr), "r" (x) : "memory");                 \
-       ia64_intri_res;                                                 \
-})
-
-#define ia64_xchg8(ptr,x)                                              \
-({                                                                     \
-       __u64 ia64_intri_res;                                           \
-       asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)        \
-                     : "r" (ptr), "r" (x) : "memory");                 \
-       ia64_intri_res;                                                 \
-})
-
-#define ia64_cmpxchg1_acq(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-       asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg1_rel(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-       asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg2_acq(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-       asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg2_rel(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-                                                                               
        \
-       asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg4_acq(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-       asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg4_rel(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-       asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg8_acq(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-       asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_cmpxchg8_rel(ptr, new, old)                                       
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                          
        \
-                                                                               
        \
-       asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":                         
        \
-                             "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : 
"memory");    \
-       ia64_intri_res;                                                         
        \
-})
-
-#define ia64_mf()      asm volatile ("mf" ::: "memory")
-#define ia64_mfa()     asm volatile ("mf.a" ::: "memory")
-
-#ifdef CONFIG_VTI
-/*
- * Flushrs instruction stream.
- */
-#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
-
-#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
-
-#define ia64_get_rsc()                          \
-({                                  \
-    unsigned long val;                     \
-    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
-    val;                               \
-})
-
-#define ia64_set_rsc(val)                       \
-    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_bspstore()     \
-({                                  \
-    unsigned long val;                     \
-    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
-    val;                               \
-})
-
-#define ia64_set_bspstore(val)                       \
-    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_rnat()     \
-({                                  \
-    unsigned long val;                     \
-    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
-    val;                               \
-})
-
-#define ia64_set_rnat(val)                       \
-    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
-
-#define ia64_ttag(addr)                                                        
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));        
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_get_dcr()                          \
-({                                      \
-    __u64 result;                               \
-    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
-    result;                                 \
-})
-
-#define ia64_set_dcr(val)                           \
-({                                      \
-    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
-})
-
-#endif // CONFIG_VTI
-
-
-#define ia64_invala() asm volatile ("invala" ::: "memory")
-
-#define ia64_thash(addr)                                                       
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));       
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_srlz_i()  asm volatile (";; srlz.i ;;" ::: "memory")
-#define ia64_srlz_d()  asm volatile (";; srlz.d" ::: "memory");
-
-#ifdef HAVE_SERIALIZE_DIRECTIVE
-# define ia64_dv_serialize_data()              asm volatile 
(".serialize.data");
-# define ia64_dv_serialize_instruction()       asm volatile 
(".serialize.instruction");
-#else
-# define ia64_dv_serialize_data()
-# define ia64_dv_serialize_instruction()
-#endif
-
-#define ia64_nop(x)    asm volatile ("nop %0"::"i"(x));
-
-#define ia64_itci(addr)        asm volatile ("itc.i %0;;" :: "r"(addr) : 
"memory")
-
-#define ia64_itcd(addr)        asm volatile ("itc.d %0;;" :: "r"(addr) : 
"memory")
-
-
-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"                
                \
-                                            :: "r"(trnum), "r"(addr) : 
"memory")
-
-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"                
                \
-                                            :: "r"(trnum), "r"(addr) : 
"memory")
-
-#define ia64_tpa(addr)                                                         
\
-({                                                                             
\
-       __u64 ia64_pa;                                                          
\
-       asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");    
\
-       ia64_pa;                                                                
\
-})
-
-#define __ia64_set_dbr(index, val)                                             
\
-       asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_ibr(index, val)                                               
\
-       asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_pkr(index, val)                                               
\
-       asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_pmc(index, val)                                               
\
-       asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_pmd(index, val)                                               
\
-       asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
-#define ia64_set_rr(index, val)                                                
        \
-       asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
-
-#define ia64_get_cpuid(index)                                                  
        \
-({                                                                             
        \
-       __u64 ia64_intri_res;                                                   
        \
-       asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : 
"rO"(index));        \
-       ia64_intri_res;                                                         
        \
-})
-
-#define __ia64_get_dbr(index)                                                  
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_get_ibr(index)                                                    
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_get_pkr(index)                                                    
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_get_pmc(index)                                                    
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
-       ia64_intri_res;                                                         
\
-})
-
-
-#define ia64_get_pmd(index)                                                    
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));    
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_get_rr(index)                                                     
\
-({                                                                             
\
-       __u64 ia64_intri_res;                                                   
\
-       asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));    
\
-       ia64_intri_res;                                                         
\
-})
-
-#define ia64_fc(addr)  asm volatile ("fc %0" :: "r"(addr) : "memory")
-
-
-#define ia64_sync_i()  asm volatile (";; sync.i" ::: "memory")
-
-#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
-#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
-#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
-#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
-
-#define ia64_ptce(addr)        asm volatile ("ptc.e %0" :: "r"(addr))
-
-#define ia64_ptcga(addr, size)                                                 
\
-do {                                                                           
\
-       asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");       
\
-       ia64_dv_serialize_data();                                               
\
-} while (0)
-
-#define ia64_ptcl(addr, size)                                                  
\
-do {                                                                           
\
-       asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");        
\
-       ia64_dv_serialize_data();                                               
\
-} while (0)
-
-#define ia64_ptri(addr, size)                                          \
-       asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
-
-#define ia64_ptrd(addr, size)                                          \
-       asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
-
-/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
-
-#define ia64_lfhint_none   0
-#define ia64_lfhint_nt1    1
-#define ia64_lfhint_nt2    2
-#define ia64_lfhint_nta    3
-
-#define ia64_lfetch(lfhint, y)                                 \
-({                                                             \
-        switch (lfhint) {                                      \
-        case ia64_lfhint_none:                                 \
-                asm volatile ("lfetch [%0]" : : "r"(y));       \
-                break;                                         \
-        case ia64_lfhint_nt1:                                  \
-                asm volatile ("lfetch.nt1 [%0]" : : "r"(y));   \
-                break;                                         \
-        case ia64_lfhint_nt2:                                  \
-                asm volatile ("lfetch.nt2 [%0]" : : "r"(y));   \
-                break;                                         \
-        case ia64_lfhint_nta:                                  \
-                asm volatile ("lfetch.nta [%0]" : : "r"(y));   \
-                break;                                         \
-        }                                                      \
-})
-
-#define ia64_lfetch_excl(lfhint, y)                                    \
-({                                                                     \
-        switch (lfhint) {                                              \
-        case ia64_lfhint_none:                                         \
-                asm volatile ("lfetch.excl [%0]" :: "r"(y));           \
-                break;                                                 \
-        case ia64_lfhint_nt1:                                          \
-                asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));       \
-                break;                                                 \
-        case ia64_lfhint_nt2:                                          \
-                asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));       \
-                break;                                                 \
-        case ia64_lfhint_nta:                                          \
-                asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));       \
-                break;                                                 \
-        }                                                              \
-})
-
-#define ia64_lfetch_fault(lfhint, y)                                   \
-({                                                                     \
-        switch (lfhint) {                                              \
-        case ia64_lfhint_none:                                         \
-                asm volatile ("lfetch.fault [%0]" : : "r"(y));         \
-                break;                                                 \
-        case ia64_lfhint_nt1:                                          \
-                asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));     \
-                break;                                                 \
-        case ia64_lfhint_nt2:                                          \
-                asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));     \
-                break;                                                 \
-        case ia64_lfhint_nta:                                          \
-                asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));     \
-                break;                                                 \
-        }                                                              \
-})
-
-#define ia64_lfetch_fault_excl(lfhint, y)                              \
-({                                                                     \
-        switch (lfhint) {                                              \
-        case ia64_lfhint_none:                                         \
-                asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));     \
-                break;                                                 \
-        case ia64_lfhint_nt1:                                          \
-                asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
-                break;                                                 \
-        case ia64_lfhint_nt2:                                          \
-                asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
-                break;                                                 \
-        case ia64_lfhint_nta:                                          \
-                asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
-                break;                                                 \
-        }                                                              \
-})
-
-#define ia64_intrin_local_irq_restore(x)                       \
-do {                                                           \
-       asm volatile (";;   cmp.ne p6,p7=%0,r0;;"               \
-                     "(p6) ssm psr.i;"                         \
-                     "(p7) rsm psr.i;;"                        \
-                     "(p6) srlz.d"                             \
-                     :: "r"((x)) : "p6", "p7", "memory");      \
-} while (0)
-
-#endif /* _ASM_IA64_GCC_INTRIN_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/hpsim_ssc.h
--- a/xen/include/asm-ia64/hpsim_ssc.h  Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,55 +0,0 @@
-/*
- * Platform dependent support for HP simulator.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Vijay Chander <vijay@xxxxxxxxxxxx>
- */
-#ifndef _IA64_PLATFORM_HPSIM_SSC_H
-#define _IA64_PLATFORM_HPSIM_SSC_H
-
-/* Simulator system calls: */
-
-#define SSC_CONSOLE_INIT               20
-#define SSC_GETCHAR                    21
-#define SSC_PUTCHAR                    31
-#define SSC_CONNECT_INTERRUPT          58
-#define SSC_GENERATE_INTERRUPT         59
-#define SSC_SET_PERIODIC_INTERRUPT     60
-#define SSC_GET_RTC                    65
-#define SSC_EXIT                       66
-#define SSC_LOAD_SYMBOLS               69
-#define SSC_GET_TOD                    74
-#define SSC_CTL_TRACE                  76
-
-#define SSC_NETDEV_PROBE               100
-#define SSC_NETDEV_SEND                        101
-#define SSC_NETDEV_RECV                        102
-#define SSC_NETDEV_ATTACH              103
-#define SSC_NETDEV_DETACH              104
-
-/*
- * Simulator system call.
- */
-extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
-
-#ifdef XEN
-/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
- * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-#define SSC_OPEN                       50
-#define SSC_CLOSE                      51
-#define SSC_READ                       52
-#define SSC_WRITE                      53
-#define SSC_GET_COMPLETION             54
-#define SSC_WAIT_COMPLETION            55
-
-#define SSC_WRITE_ACCESS               2
-#define SSC_READ_ACCESS                        1
-
-struct ssc_disk_req {
-       unsigned long addr;
-       unsigned long len;
-};
-#endif
-
-#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/ia64regs.h
--- a/xen/include/asm-ia64/ia64regs.h   Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2002,2003 Intel Corp.
- *      Jun Nakajima <jun.nakajima@xxxxxxxxx>
- *      Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
- */
-
-#ifndef _ASM_IA64_IA64REGS_H
-#define _ASM_IA64_IA64REGS_H
-
-/*
- * Register Names for getreg() and setreg().
- *
- * The "magic" numbers happen to match the values used by the Intel compiler's
- * getreg()/setreg() intrinsics.
- */
-
-/* Special Registers */
-
-#define _IA64_REG_IP           1016    /* getreg only */
-#define _IA64_REG_PSR          1019
-#define _IA64_REG_PSR_L                1019
-
-/* General Integer Registers */
-
-#define _IA64_REG_GP           1025    /* R1 */
-#define _IA64_REG_R8           1032    /* R8 */
-#define _IA64_REG_R9           1033    /* R9 */
-#define _IA64_REG_SP           1036    /* R12 */
-#define _IA64_REG_TP           1037    /* R13 */
-
-/* Application Registers */
-
-#define _IA64_REG_AR_KR0       3072
-#define _IA64_REG_AR_KR1       3073
-#define _IA64_REG_AR_KR2       3074
-#define _IA64_REG_AR_KR3       3075
-#define _IA64_REG_AR_KR4       3076
-#define _IA64_REG_AR_KR5       3077
-#define _IA64_REG_AR_KR6       3078
-#define _IA64_REG_AR_KR7       3079
-#define _IA64_REG_AR_RSC       3088
-#define _IA64_REG_AR_BSP       3089
-#define _IA64_REG_AR_BSPSTORE  3090
-#define _IA64_REG_AR_RNAT      3091
-#define _IA64_REG_AR_FCR       3093
-#define _IA64_REG_AR_EFLAG     3096
-#define _IA64_REG_AR_CSD       3097
-#define _IA64_REG_AR_SSD       3098
-#define _IA64_REG_AR_CFLAG     3099
-#define _IA64_REG_AR_FSR       3100
-#define _IA64_REG_AR_FIR       3101
-#define _IA64_REG_AR_FDR       3102
-#define _IA64_REG_AR_CCV       3104
-#define _IA64_REG_AR_UNAT      3108
-#define _IA64_REG_AR_FPSR      3112
-#define _IA64_REG_AR_ITC       3116
-#define _IA64_REG_AR_PFS       3136
-#define _IA64_REG_AR_LC                3137
-#define _IA64_REG_AR_EC                3138
-
-/* Control Registers */
-
-#define _IA64_REG_CR_DCR       4096
-#define _IA64_REG_CR_ITM       4097
-#define _IA64_REG_CR_IVA       4098
-#define _IA64_REG_CR_PTA       4104
-#define _IA64_REG_CR_IPSR      4112
-#define _IA64_REG_CR_ISR       4113
-#define _IA64_REG_CR_IIP       4115
-#define _IA64_REG_CR_IFA       4116
-#define _IA64_REG_CR_ITIR      4117
-#define _IA64_REG_CR_IIPA      4118
-#define _IA64_REG_CR_IFS       4119
-#define _IA64_REG_CR_IIM       4120
-#define _IA64_REG_CR_IHA       4121
-#define _IA64_REG_CR_LID       4160
-#define _IA64_REG_CR_IVR       4161    /* getreg only */
-#define _IA64_REG_CR_TPR       4162
-#define _IA64_REG_CR_EOI       4163
-#define _IA64_REG_CR_IRR0      4164    /* getreg only */
-#define _IA64_REG_CR_IRR1      4165    /* getreg only */
-#define _IA64_REG_CR_IRR2      4166    /* getreg only */
-#define _IA64_REG_CR_IRR3      4167    /* getreg only */
-#define _IA64_REG_CR_ITV       4168
-#define _IA64_REG_CR_PMV       4169
-#define _IA64_REG_CR_CMCV      4170
-#define _IA64_REG_CR_LRR0      4176
-#define _IA64_REG_CR_LRR1      4177
-
-#ifdef  CONFIG_VTI
-#define IA64_REG_CR_DCR   0
-#define IA64_REG_CR_ITM   1
-#define IA64_REG_CR_IVA   2
-#define IA64_REG_CR_PTA   8
-#define IA64_REG_CR_IPSR  16
-#define IA64_REG_CR_ISR   17
-#define IA64_REG_CR_IIP   19
-#define IA64_REG_CR_IFA   20
-#define IA64_REG_CR_ITIR  21
-#define IA64_REG_CR_IIPA  22
-#define IA64_REG_CR_IFS   23
-#define IA64_REG_CR_IIM   24
-#define IA64_REG_CR_IHA   25
-#define IA64_REG_CR_LID   64
-#define IA64_REG_CR_IVR   65
-#define IA64_REG_CR_TPR   66
-#define IA64_REG_CR_EOI   67
-#define IA64_REG_CR_IRR0  68
-#define IA64_REG_CR_IRR1  69
-#define IA64_REG_CR_IRR2  70
-#define IA64_REG_CR_IRR3  71
-#define IA64_REG_CR_ITV   72
-#define IA64_REG_CR_PMV   73
-#define IA64_REG_CR_CMCV  74
-#define IA64_REG_CR_LRR0  80
-#define IA64_REG_CR_LRR1  81
-#endif  //  CONFIG_VTI
-
-/* Indirect Registers for getindreg() and setindreg() */
-
-#define _IA64_REG_INDR_CPUID   9000    /* getindreg only */
-#define _IA64_REG_INDR_DBR     9001
-#define _IA64_REG_INDR_IBR     9002
-#define _IA64_REG_INDR_PKR     9003
-#define _IA64_REG_INDR_PMC     9004
-#define _IA64_REG_INDR_PMD     9005
-#define _IA64_REG_INDR_RR      9006
-
-#endif /* _ASM_IA64_IA64REGS_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/io.h
--- a/xen/include/asm-ia64/io.h Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,488 +0,0 @@
-#ifndef _ASM_IA64_IO_H
-#define _ASM_IA64_IO_H
-
-/*
- * This file contains the definitions for the emulated IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated to
- * (a) handle it all in a way that makes gcc able to optimize it as
- * well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
- * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
- */
-
-/* We don't use IO slowdowns on the ia64, but.. */
-#define __SLOW_DOWN_IO do { } while (0)
-#define SLOW_DOWN_IO   do { } while (0)
-
-#ifdef XEN
-#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
-#else
-#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL    /* region 6 */
-#endif
-
-/*
- * The legacy I/O space defined by the ia64 architecture supports only 65536 
ports, but
- * large machines may have multiple other I/O spaces so we can't place any a 
priori limit
- * on IO_SPACE_LIMIT.  These additional spaces are described in ACPI.
- */
-#define IO_SPACE_LIMIT         0xffffffffffffffffUL
-
-#define MAX_IO_SPACES_BITS             4
-#define MAX_IO_SPACES                  (1UL << MAX_IO_SPACES_BITS)
-#define IO_SPACE_BITS                  24
-#define IO_SPACE_SIZE                  (1UL << IO_SPACE_BITS)
-
-#define IO_SPACE_NR(port)              ((port) >> IO_SPACE_BITS)
-#define IO_SPACE_BASE(space)           ((space) << IO_SPACE_BITS)
-#define IO_SPACE_PORT(port)            ((port) & (IO_SPACE_SIZE - 1))
-
-#define IO_SPACE_SPARSE_ENCODING(p)    ((((p) >> 2) << 12) | (p & 0xfff))
-
-struct io_space {
-       unsigned long mmio_base;        /* base in MMIO space */
-       int sparse;
-};
-
-extern struct io_space io_space[];
-extern unsigned int num_io_spaces;
-
-# ifdef __KERNEL__
-
-/*
- * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
- *     0xCxxxxxxxxxxxxxxx      MMIO cookie (return from ioremap)
- *     0x000000001SPPPPPP      PIO cookie (S=space number, P..P=port)
- *
- * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
- * code that uses bare port numbers without the prerequisite pci_iomap().
- */
-#define PIO_OFFSET             (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
-#define PIO_MASK               (PIO_OFFSET - 1)
-#define PIO_RESERVED           __IA64_UNCACHED_OFFSET
-#define HAVE_ARCH_PIO_SIZE
-
-#include <asm/intrinsics.h>
-#include <asm/machvec.h>
-#include <asm/page.h>
-#include <asm/system.h>
-#include <asm-generic/iomap.h>
-
-/*
- * Change virtual addresses to physical addresses and vv.
- */
-static inline unsigned long
-virt_to_phys (volatile void *address)
-{
-       return (unsigned long) address - PAGE_OFFSET;
-}
-
-static inline void*
-phys_to_virt (unsigned long address)
-{
-       return (void *) (address + PAGE_OFFSET);
-}
-
-#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c 
*/
-
-/*
- * The following two macros are deprecated and scheduled for removal.
- * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
- */
-#define bus_to_virt    phys_to_virt
-#define virt_to_bus    virt_to_phys
-#define page_to_bus    page_to_phys
-
-# endif /* KERNEL */
-
-/*
- * Memory fence w/accept.  This should never be used in code that is
- * not IA-64 specific.
- */
-#define __ia64_mf_a()  ia64_mfa()
-
-/**
- * ___ia64_mmiowb - I/O write barrier
- *
- * Ensure ordering of I/O space writes.  This will make sure that writes
- * following the barrier will arrive after all previous writes.  For most
- * ia64 platforms, this is a simple 'mf.a' instruction.
- *
- * See Documentation/DocBook/deviceiobook.tmpl for more information.
- */
-static inline void ___ia64_mmiowb(void)
-{
-       ia64_mfa();
-}
-
-static inline const unsigned long
-__ia64_get_io_port_base (void)
-{
-       extern unsigned long ia64_iobase;
-
-       return ia64_iobase;
-}
-
-static inline void*
-__ia64_mk_io_addr (unsigned long port)
-{
-       struct io_space *space;
-       unsigned long offset;
-
-       space = &io_space[IO_SPACE_NR(port)];
-       port = IO_SPACE_PORT(port);
-       if (space->sparse)
-               offset = IO_SPACE_SPARSE_ENCODING(port);
-       else
-               offset = port;
-
-       return (void *) (space->mmio_base | offset);
-}
-
-#define __ia64_inb     ___ia64_inb
-#define __ia64_inw     ___ia64_inw
-#define __ia64_inl     ___ia64_inl
-#define __ia64_outb    ___ia64_outb
-#define __ia64_outw    ___ia64_outw
-#define __ia64_outl    ___ia64_outl
-#define __ia64_readb   ___ia64_readb
-#define __ia64_readw   ___ia64_readw
-#define __ia64_readl   ___ia64_readl
-#define __ia64_readq   ___ia64_readq
-#define __ia64_readb_relaxed   ___ia64_readb
-#define __ia64_readw_relaxed   ___ia64_readw
-#define __ia64_readl_relaxed   ___ia64_readl
-#define __ia64_readq_relaxed   ___ia64_readq
-#define __ia64_writeb  ___ia64_writeb
-#define __ia64_writew  ___ia64_writew
-#define __ia64_writel  ___ia64_writel
-#define __ia64_writeq  ___ia64_writeq
-#define __ia64_mmiowb  ___ia64_mmiowb
-
-/*
- * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access 
to ensure
- * that the access has completed before executing other I/O accesses.  Since 
we're doing
- * the accesses through an uncachable (UC) translation, the CPU will execute 
them in
- * program order.  However, we still need to tell the compiler not to shuffle 
them around
- * during optimization, which is why we use "volatile" pointers.
- */
-
-static inline unsigned int
-___ia64_inb (unsigned long port)
-{
-       volatile unsigned char *addr = __ia64_mk_io_addr(port);
-       unsigned char ret;
-
-       ret = *addr;
-       __ia64_mf_a();
-       return ret;
-}
-
-static inline unsigned int
-___ia64_inw (unsigned long port)
-{
-       volatile unsigned short *addr = __ia64_mk_io_addr(port);
-       unsigned short ret;
-
-       ret = *addr;
-       __ia64_mf_a();
-       return ret;
-}
-
-static inline unsigned int
-___ia64_inl (unsigned long port)
-{
-       volatile unsigned int *addr = __ia64_mk_io_addr(port);
-       unsigned int ret;
-
-       ret = *addr;
-       __ia64_mf_a();
-       return ret;
-}
-
-static inline void
-___ia64_outb (unsigned char val, unsigned long port)
-{
-       volatile unsigned char *addr = __ia64_mk_io_addr(port);
-
-       *addr = val;
-       __ia64_mf_a();
-}
-
-static inline void
-___ia64_outw (unsigned short val, unsigned long port)
-{
-       volatile unsigned short *addr = __ia64_mk_io_addr(port);
-
-       *addr = val;
-       __ia64_mf_a();
-}
-
-static inline void
-___ia64_outl (unsigned int val, unsigned long port)
-{
-       volatile unsigned int *addr = __ia64_mk_io_addr(port);
-
-       *addr = val;
-       __ia64_mf_a();
-}
-
-static inline void
-__insb (unsigned long port, void *dst, unsigned long count)
-{
-       unsigned char *dp = dst;
-
-       while (count--)
-               *dp++ = platform_inb(port);
-}
-
-static inline void
-__insw (unsigned long port, void *dst, unsigned long count)
-{
-       unsigned short *dp = dst;
-
-       while (count--)
-               *dp++ = platform_inw(port);
-}
-
-static inline void
-__insl (unsigned long port, void *dst, unsigned long count)
-{
-       unsigned int *dp = dst;
-
-       while (count--)
-               *dp++ = platform_inl(port);
-}
-
-static inline void
-__outsb (unsigned long port, const void *src, unsigned long count)
-{
-       const unsigned char *sp = src;
-
-       while (count--)
-               platform_outb(*sp++, port);
-}
-
-static inline void
-__outsw (unsigned long port, const void *src, unsigned long count)
-{
-       const unsigned short *sp = src;
-
-       while (count--)
-               platform_outw(*sp++, port);
-}
-
-static inline void
-__outsl (unsigned long port, const void *src, unsigned long count)
-{
-       const unsigned int *sp = src;
-
-       while (count--)
-               platform_outl(*sp++, port);
-}
-
-/*
- * Unfortunately, some platforms are broken and do not follow the IA-64 
architecture
- * specification regarding legacy I/O support.  Thus, we have to make these 
operations
- * platform dependent...
- */
-#define __inb          platform_inb
-#define __inw          platform_inw
-#define __inl          platform_inl
-#define __outb         platform_outb
-#define __outw         platform_outw
-#define __outl         platform_outl
-#define __mmiowb       platform_mmiowb
-
-#define inb(p)         __inb(p)
-#define inw(p)         __inw(p)
-#define inl(p)         __inl(p)
-#define insb(p,d,c)    __insb(p,d,c)
-#define insw(p,d,c)    __insw(p,d,c)
-#define insl(p,d,c)    __insl(p,d,c)
-#define outb(v,p)      __outb(v,p)
-#define outw(v,p)      __outw(v,p)
-#define outl(v,p)      __outl(v,p)
-#define outsb(p,s,c)   __outsb(p,s,c)
-#define outsw(p,s,c)   __outsw(p,s,c)
-#define outsl(p,s,c)   __outsl(p,s,c)
-#define mmiowb()       __mmiowb()
-
-/*
- * The address passed to these functions are ioremap()ped already.
- *
- * We need these to be machine vectors since some platforms don't provide
- * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
- * a good idea).  Writes are ok though for all existing ia64 platforms (and
- * hopefully it'll stay that way).
- */
-static inline unsigned char
-___ia64_readb (const volatile void __iomem *addr)
-{
-       return *(volatile unsigned char __force *)addr;
-}
-
-static inline unsigned short
-___ia64_readw (const volatile void __iomem *addr)
-{
-       return *(volatile unsigned short __force *)addr;
-}
-
-static inline unsigned int
-___ia64_readl (const volatile void __iomem *addr)
-{
-       return *(volatile unsigned int __force *) addr;
-}
-
-static inline unsigned long
-___ia64_readq (const volatile void __iomem *addr)
-{
-       return *(volatile unsigned long __force *) addr;
-}
-
-static inline void
-__writeb (unsigned char val, volatile void __iomem *addr)
-{
-       *(volatile unsigned char __force *) addr = val;
-}
-
-static inline void
-__writew (unsigned short val, volatile void __iomem *addr)
-{
-       *(volatile unsigned short __force *) addr = val;
-}
-
-static inline void
-__writel (unsigned int val, volatile void __iomem *addr)
-{
-       *(volatile unsigned int __force *) addr = val;
-}
-
-static inline void
-__writeq (unsigned long val, volatile void __iomem *addr)
-{
-       *(volatile unsigned long __force *) addr = val;
-}
-
-#define __readb                platform_readb
-#define __readw                platform_readw
-#define __readl                platform_readl
-#define __readq                platform_readq
-#define __readb_relaxed        platform_readb_relaxed
-#define __readw_relaxed        platform_readw_relaxed
-#define __readl_relaxed        platform_readl_relaxed
-#define __readq_relaxed        platform_readq_relaxed
-
-#define readb(a)       __readb((a))
-#define readw(a)       __readw((a))
-#define readl(a)       __readl((a))
-#define readq(a)       __readq((a))
-#define readb_relaxed(a)       __readb_relaxed((a))
-#define readw_relaxed(a)       __readw_relaxed((a))
-#define readl_relaxed(a)       __readl_relaxed((a))
-#define readq_relaxed(a)       __readq_relaxed((a))
-#define __raw_readb    readb
-#define __raw_readw    readw
-#define __raw_readl    readl
-#define __raw_readq    readq
-#define __raw_readb_relaxed    readb_relaxed
-#define __raw_readw_relaxed    readw_relaxed
-#define __raw_readl_relaxed    readl_relaxed
-#define __raw_readq_relaxed    readq_relaxed
-#define writeb(v,a)    __writeb((v), (a))
-#define writew(v,a)    __writew((v), (a))
-#define writel(v,a)    __writel((v), (a))
-#define writeq(v,a)    __writeq((v), (a))
-#define __raw_writeb   writeb
-#define __raw_writew   writew
-#define __raw_writel   writel
-#define __raw_writeq   writeq
-
-#ifndef inb_p
-# define inb_p         inb
-#endif
-#ifndef inw_p
-# define inw_p         inw
-#endif
-#ifndef inl_p
-# define inl_p         inl
-#endif
-
-#ifndef outb_p
-# define outb_p                outb
-#endif
-#ifndef outw_p
-# define outw_p                outw
-#endif
-#ifndef outl_p
-# define outl_p                outl
-#endif
-
-/*
- * An "address" in IO memory space is not clearly either an integer or a 
pointer. We will
- * accept both, thus the casts.
- *
- * On ia-64, we access the physical I/O memory space through the uncached 
kernel region.
- */
-static inline void __iomem *
-ioremap (unsigned long offset, unsigned long size)
-{
-       return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
-}
-
-static inline void
-iounmap (volatile void __iomem *addr)
-{
-}
-
-#define ioremap_nocache(o,s)   ioremap(o,s)
-
-# ifdef __KERNEL__
-
-/*
- * String version of IO memory access ops:
- */
-extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
-extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
-extern void memset_io(volatile void __iomem *s, int c, long n);
-
-#define dma_cache_inv(_start,_size)             do { } while (0)
-#define dma_cache_wback(_start,_size)           do { } while (0)
-#define dma_cache_wback_inv(_start,_size)       do { } while (0)
-
-# endif /* __KERNEL__ */
-
-/*
- * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing.  It 
is said that
- * BIO-level virtual merging can give up to 4% performance boost (not verified 
for ia64).
- * On the other hand, we know that I/O MMU bypassing gives ~8% performance 
improvement on
- * SPECweb-like workloads on zx1-based machines.  Thus, for now we favor I/O 
MMU bypassing
- * over BIO-level virtual merging.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#if 1
-#define BIO_VMERGE_BOUNDARY    0
-#else
-/*
- * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here.  
Should be
- * replaced by dma_merge_mask() or something of that sort.  Note: the only way
- * BIO_VMERGE_BOUNDARY is used is to mask off bits.  Effectively, our 
definition gets
- * expanded into:
- *
- *     addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & 
ia64_max_iommu_vmerge_mask)
- *
- * which is precisely what we want.
- */
-#define BIO_VMERGE_BOUNDARY    (ia64_max_iommu_merge_mask + 1)
-#endif
-
-#endif /* _ASM_IA64_IO_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/kregs.h
--- a/xen/include/asm-ia64/kregs.h      Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,199 +0,0 @@
-#ifndef _ASM_IA64_KREGS_H
-#define _ASM_IA64_KREGS_H
-
-/*
- * Copyright (C) 2001-2002 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-/*
- * This file defines the kernel register usage convention used by Linux/ia64.
- */
-
-/*
- * Kernel registers:
- */
-#define IA64_KR_IO_BASE                0       /* ar.k0: legacy I/O base 
address */
-#define IA64_KR_TSSD           1       /* ar.k1: IVE uses this as the TSSD */
-#define IA64_KR_PER_CPU_DATA   3       /* ar.k3: physical per-CPU base */
-#define IA64_KR_CURRENT_STACK  4       /* ar.k4: what's mapped in 
IA64_TR_CURRENT_STACK */
-#define IA64_KR_FPU_OWNER      5       /* ar.k5: fpu-owner (UP only, at the 
moment) */
-#define IA64_KR_CURRENT                6       /* ar.k6: "current" task 
pointer */
-#define IA64_KR_PT_BASE                7       /* ar.k7: page table base 
address (physical) */
-
-#define _IA64_KR_PASTE(x,y)    x##y
-#define _IA64_KR_PREFIX(n)     _IA64_KR_PASTE(ar.k, n)
-#define IA64_KR(n)             _IA64_KR_PREFIX(IA64_KR_##n)
-
-/*
- * Translation registers:
- */
-#define IA64_TR_KERNEL         0       /* itr0, dtr0: maps kernel image (code 
& data) */
-#define IA64_TR_PALCODE                1       /* itr1: maps PALcode as 
required by EFI */
-#ifdef CONFIG_VTI
-#define IA64_TR_XEN_IN_DOM     6       /* itr6, dtr6: Double mapping for xen 
image in domain space */
-#endif // CONFIG_VTI
-#define IA64_TR_PERCPU_DATA    1       /* dtr1: percpu data */
-#define IA64_TR_CURRENT_STACK  2       /* dtr2: maps kernel's memory- & 
register-stacks */
-#ifdef XEN
-#define IA64_TR_SHARED_INFO    3       /* dtr3: page shared with domain */
-#define        IA64_TR_VHPT            4       /* dtr4: vhpt */
-#define IA64_TR_ARCH_INFO      5
-#ifdef CONFIG_VTI
-#define IA64_TR_VHPT_IN_DOM    5       /* dtr5: Double mapping for vhpt table 
in domain space */
-#define IA64_TR_RR7_SWITCH_STUB        7       /* dtr7: mapping for rr7 switch 
stub */
-#define IA64_TEMP_PHYSICAL     8       /* itr8, dtr8: temp mapping for guest 
physical memory 256M */
-#endif // CONFIG_VTI
-#endif
-
-/* Processor status register bits: */
-#define IA64_PSR_BE_BIT                1
-#define IA64_PSR_UP_BIT                2
-#define IA64_PSR_AC_BIT                3
-#define IA64_PSR_MFL_BIT       4
-#define IA64_PSR_MFH_BIT       5
-#define IA64_PSR_IC_BIT                13
-#define IA64_PSR_I_BIT         14
-#define IA64_PSR_PK_BIT                15
-#define IA64_PSR_DT_BIT                17
-#define IA64_PSR_DFL_BIT       18
-#define IA64_PSR_DFH_BIT       19
-#define IA64_PSR_SP_BIT                20
-#define IA64_PSR_PP_BIT                21
-#define IA64_PSR_DI_BIT                22
-#define IA64_PSR_SI_BIT                23
-#define IA64_PSR_DB_BIT                24
-#define IA64_PSR_LP_BIT                25
-#define IA64_PSR_TB_BIT                26
-#define IA64_PSR_RT_BIT                27
-/* The following are not affected by save_flags()/restore_flags(): */
-#define IA64_PSR_CPL0_BIT      32
-#define IA64_PSR_CPL1_BIT      33
-#define IA64_PSR_IS_BIT                34
-#define IA64_PSR_MC_BIT                35
-#define IA64_PSR_IT_BIT                36
-#define IA64_PSR_ID_BIT                37
-#define IA64_PSR_DA_BIT                38
-#define IA64_PSR_DD_BIT                39
-#define IA64_PSR_SS_BIT                40
-#define IA64_PSR_RI_BIT                41
-#define IA64_PSR_ED_BIT                43
-#define IA64_PSR_BN_BIT                44
-#define IA64_PSR_IA_BIT                45
-#ifdef CONFIG_VTI
-#define IA64_PSR_VM_BIT                46
-#endif // CONFIG_VTI
-
-/* A mask of PSR bits that we generally don't want to inherit across a 
clone2() or an
-   execve().  Only list flags here that need to be cleared/set for BOTH 
clone2() and
-   execve().  */
-#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | 
IA64_PSR_LP | \
-                                IA64_PSR_TB  | IA64_PSR_ID  | IA64_PSR_DA | 
IA64_PSR_DD | \
-                                IA64_PSR_SS  | IA64_PSR_ED  | IA64_PSR_IA)
-#define IA64_PSR_BITS_TO_SET   (IA64_PSR_DFH | IA64_PSR_SP)
-
-#define IA64_PSR_BE    (__IA64_UL(1) << IA64_PSR_BE_BIT)
-#define IA64_PSR_UP    (__IA64_UL(1) << IA64_PSR_UP_BIT)
-#define IA64_PSR_AC    (__IA64_UL(1) << IA64_PSR_AC_BIT)
-#define IA64_PSR_MFL   (__IA64_UL(1) << IA64_PSR_MFL_BIT)
-#define IA64_PSR_MFH   (__IA64_UL(1) << IA64_PSR_MFH_BIT)
-#define IA64_PSR_IC    (__IA64_UL(1) << IA64_PSR_IC_BIT)
-#define IA64_PSR_I     (__IA64_UL(1) << IA64_PSR_I_BIT)
-#define IA64_PSR_PK    (__IA64_UL(1) << IA64_PSR_PK_BIT)
-#define IA64_PSR_DT    (__IA64_UL(1) << IA64_PSR_DT_BIT)
-#define IA64_PSR_DFL   (__IA64_UL(1) << IA64_PSR_DFL_BIT)
-#define IA64_PSR_DFH   (__IA64_UL(1) << IA64_PSR_DFH_BIT)
-#define IA64_PSR_SP    (__IA64_UL(1) << IA64_PSR_SP_BIT)
-#define IA64_PSR_PP    (__IA64_UL(1) << IA64_PSR_PP_BIT)
-#define IA64_PSR_DI    (__IA64_UL(1) << IA64_PSR_DI_BIT)
-#define IA64_PSR_SI    (__IA64_UL(1) << IA64_PSR_SI_BIT)
-#define IA64_PSR_DB    (__IA64_UL(1) << IA64_PSR_DB_BIT)
-#define IA64_PSR_LP    (__IA64_UL(1) << IA64_PSR_LP_BIT)
-#define IA64_PSR_TB    (__IA64_UL(1) << IA64_PSR_TB_BIT)
-#define IA64_PSR_RT    (__IA64_UL(1) << IA64_PSR_RT_BIT)
-/* The following are not affected by save_flags()/restore_flags(): */
-#define IA64_PSR_CPL   (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
-#define IA64_PSR_IS    (__IA64_UL(1) << IA64_PSR_IS_BIT)
-#define IA64_PSR_MC    (__IA64_UL(1) << IA64_PSR_MC_BIT)
-#define IA64_PSR_IT    (__IA64_UL(1) << IA64_PSR_IT_BIT)
-#define IA64_PSR_ID    (__IA64_UL(1) << IA64_PSR_ID_BIT)
-#define IA64_PSR_DA    (__IA64_UL(1) << IA64_PSR_DA_BIT)
-#define IA64_PSR_DD    (__IA64_UL(1) << IA64_PSR_DD_BIT)
-#define IA64_PSR_SS    (__IA64_UL(1) << IA64_PSR_SS_BIT)
-#define IA64_PSR_RI    (__IA64_UL(3) << IA64_PSR_RI_BIT)
-#define IA64_PSR_ED    (__IA64_UL(1) << IA64_PSR_ED_BIT)
-#define IA64_PSR_BN    (__IA64_UL(1) << IA64_PSR_BN_BIT)
-#define IA64_PSR_IA    (__IA64_UL(1) << IA64_PSR_IA_BIT)
-#ifdef CONFIG_VTI
-#define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)
-#endif // CONFIG_VTI
-
-/* User mask bits: */
-#define IA64_PSR_UM    (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL 
| IA64_PSR_MFH)
-
-/* Default Control Register */
-#define IA64_DCR_PP_BIT                 0      /* privileged performance 
monitor default */
-#define IA64_DCR_BE_BIT                 1      /* big-endian default */
-#define IA64_DCR_LC_BIT                 2      /* ia32 lock-check enable */
-#define IA64_DCR_DM_BIT                 8      /* defer TLB miss faults */
-#define IA64_DCR_DP_BIT                 9      /* defer page-not-present 
faults */
-#define IA64_DCR_DK_BIT                10      /* defer key miss faults */
-#define IA64_DCR_DX_BIT                11      /* defer key permission faults 
*/
-#define IA64_DCR_DR_BIT                12      /* defer access right faults */
-#define IA64_DCR_DA_BIT                13      /* defer access bit faults */
-#define IA64_DCR_DD_BIT                14      /* defer debug faults */
-
-#define IA64_DCR_PP    (__IA64_UL(1) << IA64_DCR_PP_BIT)
-#define IA64_DCR_BE    (__IA64_UL(1) << IA64_DCR_BE_BIT)
-#define IA64_DCR_LC    (__IA64_UL(1) << IA64_DCR_LC_BIT)
-#define IA64_DCR_DM    (__IA64_UL(1) << IA64_DCR_DM_BIT)
-#define IA64_DCR_DP    (__IA64_UL(1) << IA64_DCR_DP_BIT)
-#define IA64_DCR_DK    (__IA64_UL(1) << IA64_DCR_DK_BIT)
-#define IA64_DCR_DX    (__IA64_UL(1) << IA64_DCR_DX_BIT)
-#define IA64_DCR_DR    (__IA64_UL(1) << IA64_DCR_DR_BIT)
-#define IA64_DCR_DA    (__IA64_UL(1) << IA64_DCR_DA_BIT)
-#define IA64_DCR_DD    (__IA64_UL(1) << IA64_DCR_DD_BIT)
-
-/* Interrupt Status Register */
-#define IA64_ISR_X_BIT         32      /* execute access */
-#define IA64_ISR_W_BIT         33      /* write access */
-#define IA64_ISR_R_BIT         34      /* read access */
-#define IA64_ISR_NA_BIT                35      /* non-access */
-#define IA64_ISR_SP_BIT                36      /* speculative load exception */
-#define IA64_ISR_RS_BIT                37      /* mandatory register-stack 
exception */
-#define IA64_ISR_IR_BIT                38      /* invalid register frame 
exception */
-#define IA64_ISR_CODE_MASK     0xf
-
-#define IA64_ISR_X     (__IA64_UL(1) << IA64_ISR_X_BIT)
-#define IA64_ISR_W     (__IA64_UL(1) << IA64_ISR_W_BIT)
-#define IA64_ISR_R     (__IA64_UL(1) << IA64_ISR_R_BIT)
-#define IA64_ISR_NA    (__IA64_UL(1) << IA64_ISR_NA_BIT)
-#define IA64_ISR_SP    (__IA64_UL(1) << IA64_ISR_SP_BIT)
-#define IA64_ISR_RS    (__IA64_UL(1) << IA64_ISR_RS_BIT)
-#define IA64_ISR_IR    (__IA64_UL(1) << IA64_ISR_IR_BIT)
-
-/* ISR code field for non-access instructions */
-#define IA64_ISR_CODE_TPA      0
-#define IA64_ISR_CODE_FC       1
-#define IA64_ISR_CODE_PROBE    2
-#define IA64_ISR_CODE_TAK      3
-#define IA64_ISR_CODE_LFETCH   4
-#define IA64_ISR_CODE_PROBEF   5
-
-#ifdef XEN
-/* Interruption Function State */
-#define IA64_IFS_V_BIT         63
-#define IA64_IFS_V     (__IA64_UL(1) << IA64_IFS_V_BIT)
-
-/* Page Table Address */
-#define IA64_PTA_VE_BIT 0
-#define IA64_PTA_SIZE_BIT 2
-#define IA64_PTA_VF_BIT 8
-#define IA64_PTA_BASE_BIT 15
-
-#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
-#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
-#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
-#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
-#endif
-
-#endif /* _ASM_IA64_kREGS_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux/asm/sn/sn_sal.h
--- a/xen/include/asm-ia64/linux/asm/sn/sn_sal.h        Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,994 +0,0 @@
-#ifndef _ASM_IA64_SN_SN_SAL_H
-#define _ASM_IA64_SN_SN_SAL_H
-
-/*
- * System Abstraction Layer definitions for IA64
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All rights reserved.
- */
-
-
-#include <linux/config.h>
-#include <asm/sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/nodepda.h>
-
-// SGI Specific Calls
-#define  SN_SAL_POD_MODE                           0x02000001
-#define  SN_SAL_SYSTEM_RESET                       0x02000002
-#define  SN_SAL_PROBE                              0x02000003
-#define  SN_SAL_GET_MASTER_NASID                   0x02000004
-#define         SN_SAL_GET_KLCONFIG_ADDR                  0x02000005
-#define  SN_SAL_LOG_CE                            0x02000006
-#define  SN_SAL_REGISTER_CE                       0x02000007
-#define  SN_SAL_GET_PARTITION_ADDR                0x02000009
-#define  SN_SAL_XP_ADDR_REGION                    0x0200000f
-#define  SN_SAL_NO_FAULT_ZONE_VIRTUAL             0x02000010
-#define  SN_SAL_NO_FAULT_ZONE_PHYSICAL            0x02000011
-#define  SN_SAL_PRINT_ERROR                       0x02000012
-#define  SN_SAL_SET_ERROR_HANDLING_FEATURES       0x0200001a   // reentrant
-#define  SN_SAL_GET_FIT_COMPT                     0x0200001b   // reentrant
-#define  SN_SAL_GET_HUB_INFO                       0x0200001c
-#define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
-#define  SN_SAL_CONSOLE_PUTC                       0x02000021
-#define  SN_SAL_CONSOLE_GETC                       0x02000022
-#define  SN_SAL_CONSOLE_PUTS                       0x02000023
-#define  SN_SAL_CONSOLE_GETS                       0x02000024
-#define  SN_SAL_CONSOLE_GETS_TIMEOUT               0x02000025
-#define  SN_SAL_CONSOLE_POLL                       0x02000026
-#define  SN_SAL_CONSOLE_INTR                       0x02000027
-#define  SN_SAL_CONSOLE_PUTB                      0x02000028
-#define  SN_SAL_CONSOLE_XMIT_CHARS                0x0200002a
-#define  SN_SAL_CONSOLE_READC                     0x0200002b
-#define  SN_SAL_SYSCTL_MODID_GET                  0x02000031
-#define  SN_SAL_SYSCTL_GET                         0x02000032
-#define  SN_SAL_SYSCTL_IOBRICK_MODULE_GET          0x02000033
-#define  SN_SAL_SYSCTL_IO_PORTSPEED_GET            0x02000035
-#define  SN_SAL_SYSCTL_SLAB_GET                    0x02000036
-#define  SN_SAL_BUS_CONFIG                        0x02000037
-#define  SN_SAL_SYS_SERIAL_GET                    0x02000038
-#define  SN_SAL_PARTITION_SERIAL_GET              0x02000039
-#define  SN_SAL_SYSCTL_PARTITION_GET              0x0200003a
-#define  SN_SAL_SYSTEM_POWER_DOWN                 0x0200003b
-#define  SN_SAL_GET_MASTER_BASEIO_NASID                   0x0200003c
-#define  SN_SAL_COHERENCE                          0x0200003d
-#define  SN_SAL_MEMPROTECT                         0x0200003e
-#define  SN_SAL_SYSCTL_FRU_CAPTURE                0x0200003f
-
-#define  SN_SAL_SYSCTL_IOBRICK_PCI_OP             0x02000042   // reentrant
-#define         SN_SAL_IROUTER_OP                         0x02000043
-#define  SN_SAL_IOIF_INTERRUPT                    0x0200004a
-#define  SN_SAL_HWPERF_OP                         0x02000050   // lock
-#define  SN_SAL_IOIF_ERROR_INTERRUPT              0x02000051
-
-#define  SN_SAL_IOIF_SLOT_ENABLE                  0x02000053
-#define  SN_SAL_IOIF_SLOT_DISABLE                 0x02000054
-#define  SN_SAL_IOIF_GET_HUBDEV_INFO              0x02000055
-#define  SN_SAL_IOIF_GET_PCIBUS_INFO              0x02000056
-#define  SN_SAL_IOIF_GET_PCIDEV_INFO              0x02000057
-#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058
-
-#define SN_SAL_HUB_ERROR_INTERRUPT                0x02000060
-
-
-/*
- * Service-specific constants
- */
-
-/* Console interrupt manipulation */
-       /* action codes */
-#define SAL_CONSOLE_INTR_OFF    0       /* turn the interrupt off */
-#define SAL_CONSOLE_INTR_ON     1       /* turn the interrupt on */
-#define SAL_CONSOLE_INTR_STATUS 2      /* retrieve the interrupt status */
-       /* interrupt specification & status return codes */
-#define SAL_CONSOLE_INTR_XMIT  1       /* output interrupt */
-#define SAL_CONSOLE_INTR_RECV  2       /* input interrupt */
-
-/* interrupt handling */
-#define SAL_INTR_ALLOC         1
-#define SAL_INTR_FREE          2
-
-/*
- * IRouter (i.e. generalized system controller) operations
- */
-#define SAL_IROUTER_OPEN       0       /* open a subchannel */
-#define SAL_IROUTER_CLOSE      1       /* close a subchannel */
-#define SAL_IROUTER_SEND       2       /* send part of an IRouter packet */
-#define SAL_IROUTER_RECV       3       /* receive part of an IRouter packet */
-#define SAL_IROUTER_INTR_STATUS        4       /* check the interrupt status 
for
-                                        * an open subchannel
-                                        */
-#define SAL_IROUTER_INTR_ON    5       /* enable an interrupt */
-#define SAL_IROUTER_INTR_OFF   6       /* disable an interrupt */
-#define SAL_IROUTER_INIT       7       /* initialize IRouter driver */
-
-/* IRouter interrupt mask bits */
-#define SAL_IROUTER_INTR_XMIT  SAL_CONSOLE_INTR_XMIT
-#define SAL_IROUTER_INTR_RECV  SAL_CONSOLE_INTR_RECV
-
-
-/*
- * SAL Error Codes
- */
-#define SALRET_MORE_PASSES     1
-#define SALRET_OK              0
-#define SALRET_NOT_IMPLEMENTED (-1)
-#define SALRET_INVALID_ARG     (-2)
-#define SALRET_ERROR           (-3)
-
-
-#ifndef XEN
-/**
- * sn_sal_rev_major - get the major SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the major value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_major(void)
-{
-       struct ia64_sal_systab *systab = efi.sal_systab;
-
-       return (int)systab->sal_b_rev_major;
-}
-
-/**
- * sn_sal_rev_minor - get the minor SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the minor value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_minor(void)
-{
-       struct ia64_sal_systab *systab = efi.sal_systab;
-       
-       return (int)systab->sal_b_rev_minor;
-}
-
-/*
- * Specify the minimum PROM revsion required for this kernel.
- * Note that they're stored in hex format...
- */
-#define SN_SAL_MIN_MAJOR       0x4  /* SN2 kernels need at least PROM 4.0 */
-#define SN_SAL_MIN_MINOR       0x0
-
-/*
- * Returns the master console nasid, if the call fails, return an illegal
- * value.
- */
-static inline u64
-ia64_sn_get_console_nasid(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       /* Master console nasid is in 'v0' */
-       return ret_stuff.v0;
-}
-
-/*
- * Returns the master baseio nasid, if the call fails, return an illegal
- * value.
- */
-static inline u64
-ia64_sn_get_master_baseio_nasid(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 
0);
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       /* Master baseio nasid is in 'v0' */
-       return ret_stuff.v0;
-}
-
-static inline char *
-ia64_sn_get_klconfig_addr(nasid_t nasid)
-{
-       struct ia64_sal_retval ret_stuff;
-       int cnodeid;
-
-       cnodeid = nasid_to_cnodeid(nasid);
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 
0, 0);
-
-       /*
-        * We should panic if a valid cnode nasid does not produce
-        * a klconfig address.
-        */
-       if (ret_stuff.status != 0) {
-               panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", 
ret_stuff.status);
-       }
-       return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
-}
-#endif /* !XEN */
-
-/*
- * Returns the next console character.
- */
-static inline u64
-ia64_sn_console_getc(int *ch)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
-
-       /* character is in 'v0' */
-       *ch = (int)ret_stuff.v0;
-
-       return ret_stuff.status;
-}
-
-/*
- * Read a character from the SAL console device, after a previous interrupt
- * or poll operation has given us to know that a character is available
- * to be read.
- */
-static inline u64
-ia64_sn_console_readc(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
-
-       /* character is in 'v0' */
-       return ret_stuff.v0;
-}
-
-/*
- * Sends the given character to the console.
- */
-static inline u64
-ia64_sn_console_putc(char ch)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 
0, 0, 0);
-
-       return ret_stuff.status;
-}
-
-/*
- * Sends the given buffer to the console.
- */
-static inline u64
-ia64_sn_console_putb(const char *buf, int len)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0; 
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, 
(uint64_t)len, 0, 0, 0, 0, 0);
-
-       if ( ret_stuff.status == 0 ) {
-               return ret_stuff.v0;
-       }
-       return (u64)0;
-}
-
-#ifndef XEN
-/*
- * Print a platform error record
- */
-static inline u64
-ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, 
(uint64_t)rec, 0, 0, 0, 0, 0);
-
-       return ret_stuff.status;
-}
-
-/*
- * Check for Platform errors
- */
-static inline u64
-ia64_sn_plat_cpei_handler(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
-
-       return ret_stuff.status;
-}
-
-/*
- * Checks for console input.
- */
-static inline u64
-ia64_sn_console_check(int *result)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
-
-       /* result is in 'v0' */
-       *result = (int)ret_stuff.v0;
-
-       return ret_stuff.status;
-}
-
-/*
- * Checks console interrupt status
- */
-static inline u64
-ia64_sn_console_intr_status(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
-                0, SAL_CONSOLE_INTR_STATUS,
-                0, 0, 0, 0, 0);
-
-       if (ret_stuff.status == 0) {
-           return ret_stuff.v0;
-       }
-       
-       return 0;
-}
-
-/*
- * Enable an interrupt on the SAL console device.
- */
-static inline void
-ia64_sn_console_intr_enable(uint64_t intr)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
-                intr, SAL_CONSOLE_INTR_ON,
-                0, 0, 0, 0, 0);
-}
-
-/*
- * Disable an interrupt on the SAL console device.
- */
-static inline void
-ia64_sn_console_intr_disable(uint64_t intr)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
-                intr, SAL_CONSOLE_INTR_OFF,
-                0, 0, 0, 0, 0);
-}
-
-/*
- * Sends a character buffer to the console asynchronously.
- */
-static inline u64
-ia64_sn_console_xmit_chars(char *buf, int len)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
-                (uint64_t)buf, (uint64_t)len,
-                0, 0, 0, 0, 0);
-
-       if (ret_stuff.status == 0) {
-           return ret_stuff.v0;
-       }
-
-       return 0;
-}
-
-/*
- * Returns the iobrick module Id
- */
-static inline u64
-ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 
0, 0, 0, 0, 0);
-
-       /* result is in 'v0' */
-       *result = (int)ret_stuff.v0;
-
-       return ret_stuff.status;
-}
-
-/**
- * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
- *
- * SN_SAL_POD_MODE actually takes an argument, but it's always
- * 0 when we call it from the kernel, so we don't have to expose
- * it to the caller.
- */
-static inline u64
-ia64_sn_pod_mode(void)
-{
-       struct ia64_sal_retval isrv;
-       SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
-       if (isrv.status)
-               return 0;
-       return isrv.v0;
-}
-
-/**
- * ia64_sn_probe_mem - read from memory safely
- * @addr: address to probe
- * @size: number bytes to read (1,2,4,8)
- * @data_ptr: address to store value read by probe (-1 returned if probe fails)
- *
- * Call into the SAL to do a memory read.  If the read generates a machine
- * check, this routine will recover gracefully and return -1 to the caller.
- * @addr is usually a kernel virtual address in uncached space (i.e. the
- * address starts with 0xc), but if called in physical mode, @addr should
- * be a physical address.
- *
- * Return values:
- *  0 - probe successful
- *  1 - probe failed (generated MCA)
- *  2 - Bad arg
- * <0 - PAL error
- */
-static inline u64
-ia64_sn_probe_mem(long addr, long size, void *data_ptr)
-{
-       struct ia64_sal_retval isrv;
-
-       SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
-
-       if (data_ptr) {
-               switch (size) {
-               case 1:
-                       *((u8*)data_ptr) = (u8)isrv.v0;
-                       break;
-               case 2:
-                       *((u16*)data_ptr) = (u16)isrv.v0;
-                       break;
-               case 4:
-                       *((u32*)data_ptr) = (u32)isrv.v0;
-                       break;
-               case 8:
-                       *((u64*)data_ptr) = (u64)isrv.v0;
-                       break;
-               default:
-                       isrv.status = 2;
-               }
-       }
-       return isrv.status;
-}
-
-/*
- * Retrieve the system serial number as an ASCII string.
- */
-static inline u64
-ia64_sn_sys_serial_get(char *buf)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 
0);
-       return ret_stuff.status;
-}
-
-extern char sn_system_serial_number_string[];
-extern u64 sn_partition_serial_number;
-
-static inline char *
-sn_system_serial_number(void) {
-       if (sn_system_serial_number_string[0]) {
-               return(sn_system_serial_number_string);
-       } else {
-               ia64_sn_sys_serial_get(sn_system_serial_number_string);
-               return(sn_system_serial_number_string);
-       }
-}
-       
-
-/*
- * Returns a unique id number for this system and partition (suitable for
- * use with license managers), based in part on the system serial number.
- */
-static inline u64
-ia64_sn_partition_serial_get(void)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
-       if (ret_stuff.status != 0)
-           return 0;
-       return ret_stuff.v0;
-}
-
-static inline u64
-sn_partition_serial_number_val(void) {
-       if (sn_partition_serial_number) {
-               return(sn_partition_serial_number);
-       } else {
-               return(sn_partition_serial_number = 
ia64_sn_partition_serial_get());
-       }
-}
-
-/*
- * Returns the partition id of the nasid passed in as an argument,
- * or INVALID_PARTID if the partition id cannot be retrieved.
- */
-static inline partid_t
-ia64_sn_sysctl_partition_get(nasid_t nasid)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
-                0, 0, 0, 0, 0, 0);
-       if (ret_stuff.status != 0)
-           return INVALID_PARTID;
-       return ((partid_t)ret_stuff.v0);
-}
-
-/*
- * Returns the partition id of the current processor.
- */
-
-extern partid_t sn_partid;
-
-static inline partid_t
-sn_local_partid(void) {
-       if (sn_partid < 0) {
-               return (sn_partid = 
ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
-       } else {
-               return sn_partid;
-       }
-}
-
-/*
- * Register or unregister a physical address range being referenced across
- * a partition boundary for which certain SAL errors should be scanned for,
- * cleaned up and ignored.  This is of value for kernel partitioning code only.
- * Values for the operation argument:
- *     1 = register this address range with SAL
- *     0 = unregister this address range with SAL
- * 
- * SAL maintains a reference count on an address range in case it is registered
- * multiple times.
- * 
- * On success, returns the reference count of the address range after the SAL
- * call has performed the current registration/unregistration.  Returns a
- * negative value if an error occurred.
- */
-static inline int
-sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
-                0, 0, 0, 0);
-       return ret_stuff.status;
-}
-
-/*
- * Register or unregister an instruction range for which SAL errors should
- * be ignored.  If an error occurs while in the registered range, SAL jumps
- * to return_addr after ignoring the error.  Values for the operation argument:
- *     1 = register this instruction range with SAL
- *     0 = unregister this instruction range with SAL
- *
- * Returns 0 on success, or a negative value if an error occurred.
- */
-static inline int
-sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
-                        int virtual, int operation)
-{
-       struct ia64_sal_retval ret_stuff;
-       u64 call;
-       if (virtual) {
-               call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
-       } else {
-               call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
-       }
-       SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
-                0, 0, 0);
-       return ret_stuff.status;
-}
-
-/*
- * Change or query the coherence domain for this partition. Each cpu-based
- * nasid is represented by a bit in an array of 64-bit words:
- *      0 = not in this partition's coherency domain
- *      1 = in this partition's coherency domain
- *
- * It is not possible for the local system's nasids to be removed from
- * the coherency domain.  Purpose of the domain arguments:
- *      new_domain = set the coherence domain to the given nasids
- *      old_domain = return the current coherence domain
- *
- * Returns 0 on success, or a negative value if an error occurred.
- */
-static inline int
-sn_change_coherence(u64 *new_domain, u64 *old_domain)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
-                0, 0, 0);
-       return ret_stuff.status;
-}
-
-/*
- * Change memory access protections for a physical address range.
- * nasid_array is not used on Altix, but may be in future architectures.
- * Available memory protection access classes are defined after the function.
- */
-static inline int
-sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
-{
-       struct ia64_sal_retval ret_stuff;
-       int cnodeid;
-       unsigned long irq_flags;
-
-       cnodeid = nasid_to_cnodeid(get_node_number(paddr));
-       // spin_lock(&NODEPDA(cnodeid)->bist_lock);
-       local_irq_save(irq_flags);
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
-                perms, 0, 0, 0);
-       local_irq_restore(irq_flags);
-       // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
-       return ret_stuff.status;
-}
-#define SN_MEMPROT_ACCESS_CLASS_0              0x14a080
-#define SN_MEMPROT_ACCESS_CLASS_1              0x2520c2
-#define SN_MEMPROT_ACCESS_CLASS_2              0x14a1ca
-#define SN_MEMPROT_ACCESS_CLASS_3              0x14a290
-#define SN_MEMPROT_ACCESS_CLASS_6              0x084080
-#define SN_MEMPROT_ACCESS_CLASS_7              0x021080
-
-/*
- * Turns off system power.
- */
-static inline void
-ia64_sn_power_down(void)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
-       while(1);
-       /* never returns */
-}
-
-/**
- * ia64_sn_fru_capture - tell the system controller to capture hw state
- *
- * This routine will call the SAL which will tell the system controller(s)
- * to capture hw mmr information from each SHub in the system.
- */
-static inline u64
-ia64_sn_fru_capture(void)
-{
-        struct ia64_sal_retval isrv;
-        SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
-        if (isrv.status)
-                return 0;
-        return isrv.v0;
-}
-
-/*
- * Performs an operation on a PCI bus or slot -- power up, power down
- * or reset.
- */
-static inline u64
-ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, 
-                             u64 bus, char slot, 
-                             u64 action)
-{
-       struct ia64_sal_retval rv = {0, 0, 0, 0};
-
-       SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, 
action,
-                bus, (u64) slot, 0, 0);
-       if (rv.status)
-               return rv.v0;
-       return 0;
-}
-
-
-/*
- * Open a subchannel for sending arbitrary data to the system
- * controller network via the system controller device associated with
- * 'nasid'.  Return the subchannel number or a negative error code.
- */
-static inline int
-ia64_sn_irtr_open(nasid_t nasid)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
-                          0, 0, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Close system controller subchannel 'subch' previously opened on 'nasid'.
- */
-static inline int
-ia64_sn_irtr_close(nasid_t nasid, int subch)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
-                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Read data from system controller associated with 'nasid' on
- * subchannel 'subch'.  The buffer to be filled is pointed to by
- * 'buf', and its capacity is in the integer pointed to by 'len'.  The
- * referent of 'len' is set to the number of bytes read by the SAL
- * call.  The return value is either SALRET_OK (for bytes read) or
- * SALRET_ERROR (for error or "no data available").
- */
-static inline int
-ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
-                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
-                          0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Write data to the system controller network via the system
- * controller associated with 'nasid' on suchannel 'subch'.  The
- * buffer to be written out is pointed to by 'buf', and 'len' is the
- * number of bytes to be written.  The return value is either the
- * number of bytes written (which could be zero) or a negative error
- * code.
- */
-static inline int
-ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
-                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
-                          0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Check whether any interrupts are pending for the system controller
- * associated with 'nasid' and its subchannel 'subch'.  The return
- * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
- * SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr(nasid_t nasid, int subch)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
-                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Enable the interrupt indicated by the intr parameter (either
- * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
-                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Disable the interrupt indicated by the intr parameter (either
- * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
-                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/**
- * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
- * @nasid: NASID of node to read
- * @index: FIT entry index to be retrieved (0..n)
- * @fitentry: 16 byte buffer where FIT entry will be stored.
- * @banbuf: optional buffer for retrieving banner
- * @banlen: length of banner buffer
- *
- * Access to the physical PROM chips needs to be serialized since reads and
- * writes can't occur at the same time, so we need to call into the SAL when
- * we want to look at the FIT entries on the chips.
- *
- * Returns:
- *     %SALRET_OK if ok
- *     %SALRET_INVALID_ARG if index too big
- *     %SALRET_NOT_IMPLEMENTED if running on older PROM
- *     ??? if nasid invalid OR banner buffer not large enough
- */
-static inline int
-ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
-                     u64 banlen)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
-                       banbuf, banlen, 0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Initialize the SAL components of the system controller
- * communication driver; specifically pass in a sizable buffer that
- * can be used for allocation of subchannel queues as new subchannels
- * are opened.  "buf" points to the buffer, and "len" specifies its
- * length.
- */
-static inline int
-ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
-                          (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Returns the nasid, subnode & slice corresponding to a SAPIC ID
- *
- *  In:
- *     arg0 - SN_SAL_GET_SAPIC_INFO
- *     arg1 - sapicid (lid >> 16) 
- *  Out:
- *     v0 - nasid
- *     v1 - subnode
- *     v2 - slice
- */
-static inline u64
-ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 
0, 0);
-
-/***** BEGIN HACK - temp til old proms no longer supported ********/
-       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
-               if (nasid) *nasid = sapicid & 0xfff;
-               if (subnode) *subnode = (sapicid >> 13) & 1;
-               if (slice) *slice = (sapicid >> 12) & 3;
-               return 0;
-       }
-/***** END HACK *******/
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       if (nasid) *nasid = (int) ret_stuff.v0;
-       if (subnode) *subnode = (int) ret_stuff.v1;
-       if (slice) *slice = (int) ret_stuff.v2;
-       return 0;
-}
- 
-/*
- * Returns information about the HUB/SHUB.
- *  In:
- *     arg0 - SN_SAL_GET_HUB_INFO
- *     arg1 - 0 (other values reserved for future use)
- *  Out:
- *     v0 - shub type (0=shub1, 1=shub2)
- *     v1 - masid mask (ex., 0x7ff for 11 bit nasid)
- *     v2 - bit position of low nasid bit
- */
-static inline u64
-ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_HUB_INFO, fc, 0, 0, 0, 0, 0, 0);
-
-/***** BEGIN HACK - temp til old proms no longer supported ********/
-       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
-               if (arg1) *arg1 = 0;
-               if (arg2) *arg2 = 0x7ff;
-               if (arg3) *arg3 = 38;
-               return 0;
-       }
-/***** END HACK *******/
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       if (arg1) *arg1 = ret_stuff.v0;
-       if (arg2) *arg2 = ret_stuff.v1;
-       if (arg3) *arg3 = ret_stuff.v2;
-       return 0;
-}
- 
-/*
- * This is the access point to the Altix PROM hardware performance
- * and status monitoring interface. For info on using this, see
- * include/asm-ia64/sn/sn2/sn_hwperf.h
- */
-static inline int
-ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
-                  u64 a3, u64 a4, int *v0)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
-               opcode, a0, a1, a2, a3, a4);
-       if (v0)
-               *v0 = (int) rv.v0;
-       return (int) rv.status;
-}
-#endif /* !XEN */
-#endif /* _ASM_IA64_SN_SN_SAL_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux/cpumask.h
--- a/xen/include/asm-ia64/linux/cpumask.h      Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,379 +0,0 @@
-#ifndef __LINUX_CPUMASK_H
-#define __LINUX_CPUMASK_H
-
-/*
- * Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number.
- *
- * See detailed comments in the file linux/bitmap.h describing the
- * data type on which these cpumasks are based.
- *
- * For details of cpumask_scnprintf() and cpumask_parse(),
- * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
- *
- * The available cpumask operations are:
- *
- * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
- * void cpus_setall(mask)              set all bits
- * void cpus_clear(mask)               clear all bits
- * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
- *
- * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
- * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
- * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
- * void cpus_complement(dst, src)      dst = ~src
- *
- * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2)       Is mask1 a subset of mask2?
- * int cpus_empty(mask)                        Is mask empty (no bits sets)?
- * int cpus_full(mask)                 Is mask full (all bits sets)?
- * int cpus_weight(mask)               Hamming weigh - number of set bits
- *
- * void cpus_shift_right(dst, src, n)  Shift right
- * void cpus_shift_left(dst, src, n)   Shift left
- *
- * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
- *
- * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
- * CPU_MASK_ALL                                Initializer - all bits set
- * CPU_MASK_NONE                       Initializer - no bits set
- * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
- *
- * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
- *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
- *
- * int num_online_cpus()               Number of online CPUs
- * int num_possible_cpus()             Number of all possible CPUs
- * int num_present_cpus()              Number of present CPUs
- *
- * int cpu_online(cpu)                 Is some cpu online?
- * int cpu_possible(cpu)               Is some cpu possible?
- * int cpu_present(cpu)                        Is some cpu present (can 
schedule)?
- *
- * int any_online_cpu(mask)            First online cpu in mask
- *
- * for_each_cpu(cpu)                   for-loop cpu over cpu_possible_map
- * for_each_online_cpu(cpu)            for-loop cpu over cpu_online_map
- * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
- *    to generate slightly worse code.  Note for example the additional
- *    40 lines of assembly code compiling the "for each possible cpu"
- *    loops buried in the disk_stat_read() macros calls when compiling
- *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
- *    one-line #define for cpu_isset(), instead of wrapping an inline
- *    inside a macro, the way we do the other calls.
- */
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/bitmap.h>
-#include <asm/bug.h>
-
-typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
-       set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
-       clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
-       bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
-       bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
-       return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
-                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int nbits)
-{
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), 
NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_full(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_right(dst, src, n) \
-                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
-                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
-{
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
-}
-
-#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
-{
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
-}
-
-#define cpumask_of_cpu(cpu)                                            \
-({                                                                     \
-       typeof(_unused_cpumask_arg_) m;                                 \
-       if (sizeof(m) == sizeof(unsigned long)) {                       \
-               m.bits[0] = 1UL<<(cpu);                                 \
-       } else {                                                        \
-               cpus_clear(m);                                          \
-               cpu_set((cpu), m);                                      \
-       }                                                               \
-       m;                                                              \
-})
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL                                                   \
-(cpumask_t) { {                                                                
\
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
-} }
-
-#else
-
-#define CPU_MASK_ALL                                                   \
-(cpumask_t) { {                                                                
\
-       [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                        \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
-} }
-
-#endif
-
-#define CPU_MASK_NONE                                                  \
-(cpumask_t) { {                                                                
\
-       [0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL                         \
-} }
-
-#define CPU_MASK_CPU0                                                  \
-(cpumask_t) { {                                                                
\
-       [0] =  1UL                                                      \
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpumask_scnprintf(buf, len, src) \
-                       __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpumask_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
-{
-       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpumask_parse(ubuf, ulen, src) \
-                       __cpumask_parse((ubuf), (ulen), &(src), NR_CPUS)
-static inline int __cpumask_parse(const char __user *buf, int len,
-                                       cpumask_t *dstp, int nbits)
-{
-       return bitmap_parse(buf, len, dstp->bits, nbits);
-}
-
-#if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < NR_CPUS;                \
-               (cpu) = next_cpu((cpu), (mask)))
-#else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
-#endif /* NR_CPUS */
-
-/*
- * The following particular system cpumasks and operations manage
- * possible, present and online cpus.  Each of them is a fixed size
- * bitmap of size NR_CPUS.
- *
- *  #ifdef CONFIG_HOTPLUG_CPU
- *     cpu_possible_map - all NR_CPUS bits set
- *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
- *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
- *  #else
- *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
- *     cpu_present_map  - copy of cpu_possible_map
- *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
- *  #endif
- *
- *  In either case, NR_CPUS is fixed at compile time, as the static
- *  size of these bitmaps.  The cpu_possible_map is fixed at boot
- *  time, as the set of CPU id's that it is possible might ever
- *  be plugged in at anytime during the life of that system boot.
- *  The cpu_present_map is dynamic(*), representing which CPUs
- *  are currently plugged in.  And cpu_online_map is the dynamic
- *  subset of cpu_present_map, indicating those CPUs available
- *  for scheduling.
- *
- *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
- *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
- *  ACPI reports present at boot.
- *
- *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
- *  depending on what ACPI reports as currently plugged in, otherwise
- *  cpu_present_map is just a copy of cpu_possible_map.
- *
- *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
- *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
- *
- * Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
- *    assumption that their single CPU is online.  The UP
- *    cpu_{online,possible,present}_maps are placebos.  Changing them
- *    will have no useful affect on the following num_*_cpus()
- *    and cpu_*() macros in the UP case.  This ugliness is a UP
- *    optimization - don't waste any instructions or memory references
- *    asking if you're online or how many CPUs there are if there is
- *    only one CPU.
- * 2) Most SMP arch's #define some of these maps to be some
- *    other map specific to that arch.  Therefore, the following
- *    must be #define macros, not inlines.  To see why, examine
- *    the assembly code produced by the following.  Note that
- *    set1() writes phys_x_map, but set2() writes x_map:
- *        int x_map, phys_x_map;
- *        #define set1(a) x_map = a
- *        inline void set2(int a) { x_map = a; }
- *        #define x_map phys_x_map
- *        main(){ set1(3); set2(5); }
- */
-
-extern cpumask_t cpu_possible_map;
-#ifndef XEN
-extern cpumask_t cpu_online_map;
-#endif
-extern cpumask_t cpu_present_map;
-
-#if NR_CPUS > 1
-#define num_online_cpus()      cpus_weight(cpu_online_map)
-#define num_possible_cpus()    cpus_weight(cpu_possible_map)
-#define num_present_cpus()     cpus_weight(cpu_present_map)
-#define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
-#define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
-#define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
-#else
-#define num_online_cpus()      1
-#define num_possible_cpus()    1
-#define num_present_cpus()     1
-#define cpu_online(cpu)                ((cpu) == 0)
-#define cpu_possible(cpu)      ((cpu) == 0)
-#define cpu_present(cpu)       ((cpu) == 0)
-#endif
-
-#define any_online_cpu(mask)                   \
-({                                             \
-       int cpu;                                \
-       for_each_cpu_mask(cpu, (mask))          \
-               if (cpu_online(cpu))            \
-                       break;                  \
-       cpu;                                    \
-})
-
-#define for_each_cpu(cpu)        for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
-
-#endif /* __LINUX_CPUMASK_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux/hardirq.h
--- a/xen/include/asm-ia64/linux/hardirq.h      Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,110 +0,0 @@
-#ifndef LINUX_HARDIRQ_H
-#define LINUX_HARDIRQ_H
-
-#include <linux/config.h>
-#include <linux/smp_lock.h>
-#include <asm/hardirq.h>
-#include <asm/system.h>
-
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count can be overridden per architecture, the default is:
- *
- * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
- * - ( bit 28 is the PREEMPT_ACTIVE flag. )
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x0fff0000
- */
-#define PREEMPT_BITS   8
-#define SOFTIRQ_BITS   8
-
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS   12
-/*
- * The hardirq mask has to be large enough to have space for potentially
- * all IRQ sources in the system nesting on a single CPU.
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-#endif
-
-#define PREEMPT_SHIFT  0
-#define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-
-#define __IRQ_MASK(x)  ((1UL << (x))-1)
-
-#define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-
-#define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
-#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
-#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- */
-#define in_irq()               (hardirq_count())
-#define in_softirq()           (softirq_count())
-#ifndef XEN
-#define in_interrupt()         (irq_count())
-#else
-#define in_interrupt()         0               // FIXME LATER
-#endif
-
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
-# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
-#else
-# define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-#endif
-
-#ifdef CONFIG_PREEMPT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
-#else
-# define preemptible() 0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
-#endif
-
-#ifdef CONFIG_SMP
-extern void synchronize_irq(unsigned int irq);
-#else
-# define synchronize_irq(irq)  barrier()
-#endif
-
-#define nmi_enter()            irq_enter()
-#define nmi_exit()             sub_preempt_count(HARDIRQ_OFFSET)
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-static inline void account_user_vtime(struct task_struct *tsk)
-{
-}
-
-static inline void account_system_vtime(struct task_struct *tsk)
-{
-}
-#endif
-
-#define irq_enter()                                    \
-       do {                                            \
-               account_system_vtime(current);          \
-               add_preempt_count(HARDIRQ_OFFSET);      \
-       } while (0)
-
-extern void irq_exit(void);
-
-#endif /* LINUX_HARDIRQ_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/linux/interrupt.h
--- a/xen/include/asm-ia64/linux/interrupt.h    Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,291 +0,0 @@
-/* interrupt.h */
-#ifndef _LINUX_INTERRUPT_H
-#define _LINUX_INTERRUPT_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/linkage.h>
-#include <linux/bitops.h>
-#include <linux/preempt.h>
-#include <linux/cpumask.h>
-#include <linux/hardirq.h>
-#include <asm/atomic.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-
-/*
- * For 2.4.x compatibility, 2.4.x can use
- *
- *     typedef void irqreturn_t;
- *     #define IRQ_NONE
- *     #define IRQ_HANDLED
- *     #define IRQ_RETVAL(x)
- *
- * To mix old-style and new-style irq handler returns.
- *
- * IRQ_NONE means we didn't handle it.
- * IRQ_HANDLED means that we did have a valid interrupt and handled it.
- * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
- */
-typedef int irqreturn_t;
-
-#define IRQ_NONE       (0)
-#define IRQ_HANDLED    (1)
-#define IRQ_RETVAL(x)  ((x) != 0)
-
-#ifndef XEN
-struct irqaction {
-       irqreturn_t (*handler)(int, void *, struct pt_regs *);
-       unsigned long flags;
-       cpumask_t mask;
-       const char *name;
-       void *dev_id;
-       struct irqaction *next;
-       int irq;
-       struct proc_dir_entry *dir;
-};
-
-extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
-extern int request_irq(unsigned int,
-                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
-                      unsigned long, const char *, void *);
-extern void free_irq(unsigned int, void *);
-#endif
-
-
-#ifdef CONFIG_GENERIC_HARDIRQS
-extern void disable_irq_nosync(unsigned int irq);
-extern void disable_irq(unsigned int irq);
-extern void enable_irq(unsigned int irq);
-#endif
-
-/*
- * Temporary defines for UP kernels, until all code gets fixed.
- */
-#ifndef CONFIG_SMP
-static inline void __deprecated cli(void)
-{
-       local_irq_disable();
-}
-static inline void __deprecated sti(void)
-{
-       local_irq_enable();
-}
-static inline void __deprecated save_flags(unsigned long *x)
-{
-       local_save_flags(*x);
-}
-#define save_flags(x) save_flags(&x);
-static inline void __deprecated restore_flags(unsigned long x)
-{
-       local_irq_restore(x);
-}
-
-static inline void __deprecated save_and_cli(unsigned long *x)
-{
-       local_irq_save(*x);
-}
-#define save_and_cli(x)        save_and_cli(&x)
-#endif /* CONFIG_SMP */
-
-/* SoftIRQ primitives.  */
-#define local_bh_disable() \
-               do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
-#define __local_bh_enable() \
-               do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
-
-extern void local_bh_enable(void);
-
-/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
-   frequency threaded job scheduling. For almost all the purposes
-   tasklets are more than enough. F.e. all serial device BHs et
-   al. should be converted to tasklets, not to softirqs.
- */
-
-enum
-{
-       HI_SOFTIRQ=0,
-       TIMER_SOFTIRQ,
-       NET_TX_SOFTIRQ,
-       NET_RX_SOFTIRQ,
-       SCSI_SOFTIRQ,
-       TASKLET_SOFTIRQ
-};
-
-/* softirq mask and active fields moved to irq_cpustat_t in
- * asm/hardirq.h to get better cache usage.  KAO
- */
-
-struct softirq_action
-{
-       void    (*action)(struct softirq_action *);
-       void    *data;
-};
-
-asmlinkage void do_softirq(void);
-//extern void open_softirq(int nr, void (*action)(struct softirq_action*), 
void *data);
-extern void softirq_init(void);
-#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << 
(nr); } while (0)
-extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
-extern void FASTCALL(raise_softirq(unsigned int nr));
-
-
-/* Tasklets --- multithreaded analogue of BHs.
-
-   Main feature differing them of generic softirqs: tasklet
-   is running only on one CPU simultaneously.
-
-   Main feature differing them of BHs: different tasklets
-   may be run simultaneously on different CPUs.
-
-   Properties:
-   * If tasklet_schedule() is called, then tasklet is guaranteed
-     to be executed on some cpu at least once after this.
-   * If the tasklet is already scheduled, but its excecution is still not
-     started, it will be executed only once.
-   * If this tasklet is already running on another CPU (or schedule is called
-     from tasklet itself), it is rescheduled for later.
-   * Tasklet is strictly serialized wrt itself, but not
-     wrt another tasklets. If client needs some intertask synchronization,
-     he makes it with spinlocks.
- */
-
-struct tasklet_struct
-{
-       struct tasklet_struct *next;
-       unsigned long state;
-       atomic_t count;
-       void (*func)(unsigned long);
-       unsigned long data;
-};
-
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
-
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
-
-
-enum
-{
-       TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
-       TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
-};
-
-#ifdef CONFIG_SMP
-static inline int tasklet_trylock(struct tasklet_struct *t)
-{
-       return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
-       smp_mb__before_clear_bit(); 
-       clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
-       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
-#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
-#endif
-
-extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
-
-static inline void tasklet_schedule(struct tasklet_struct *t)
-{
-       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
-               __tasklet_schedule(t);
-}
-
-extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
-
-static inline void tasklet_hi_schedule(struct tasklet_struct *t)
-{
-       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
-               __tasklet_hi_schedule(t);
-}
-
-
-static inline void tasklet_disable_nosync(struct tasklet_struct *t)
-{
-       atomic_inc(&t->count);
-       smp_mb__after_atomic_inc();
-}
-
-static inline void tasklet_disable(struct tasklet_struct *t)
-{
-       tasklet_disable_nosync(t);
-       tasklet_unlock_wait(t);
-       smp_mb();
-}
-
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
-       smp_mb__before_atomic_dec();
-       atomic_dec(&t->count);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
-       smp_mb__before_atomic_dec();
-       atomic_dec(&t->count);
-}
-
-extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
-extern void tasklet_init(struct tasklet_struct *t,
-                        void (*func)(unsigned long), unsigned long data);
-
-/*
- * Autoprobing for irqs:
- *
- * probe_irq_on() and probe_irq_off() provide robust primitives
- * for accurate IRQ probing during kernel initialization.  They are
- * reasonably simple to use, are not "fooled" by spurious interrupts,
- * and, unlike other attempts at IRQ probing, they do not get hung on
- * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
- *
- * For reasonably foolproof probing, use them as follows:
- *
- * 1. clear and/or mask the device's internal interrupt.
- * 2. sti();
- * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
- * 4. enable the device and cause it to trigger an interrupt.
- * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
- * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
- * 7. service the device to clear its pending interrupt.
- * 8. loop again if paranoia is required.
- *
- * probe_irq_on() returns a mask of allocated irq's.
- *
- * probe_irq_off() takes the mask as a parameter,
- * and returns the irq number which occurred,
- * or zero if none occurred, or a negative irq number
- * if more than one irq occurred.
- */
-
-#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 
-static inline unsigned long probe_irq_on(void)
-{
-       return 0;
-}
-static inline int probe_irq_off(unsigned long val)
-{
-       return 0;
-}
-static inline unsigned int probe_irq_mask(unsigned long val)
-{
-       return 0;
-}
-#else
-extern unsigned long probe_irq_on(void);       /* returns 0 on failure */
-extern int probe_irq_off(unsigned long);       /* returns 0 or negative on 
failure */
-extern unsigned int probe_irq_mask(unsigned long);     /* returns mask of ISA 
interrupts */
-#endif
-
-#endif
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/mca_asm.h
--- a/xen/include/asm-ia64/mca_asm.h    Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,323 +0,0 @@
-/*
- * File:       mca_asm.h
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@xxxxxxxxxxxx)
- * Copyright (C) Srinivasa Thirumalachar <sprasad@xxxxxxxxxxxx>
- * Copyright (C) 2000 Hewlett-Packard Co.
- * Copyright (C) 2000 David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2002 Jenna Hall <jenna.s.hall@xxxxxxxxx>
- */
-#ifndef _ASM_IA64_MCA_ASM_H
-#define _ASM_IA64_MCA_ASM_H
-
-#define PSR_IC         13
-#define PSR_I          14
-#define        PSR_DT          17
-#define PSR_RT         27
-#define PSR_MC         35
-#define PSR_IT         36
-#define PSR_BN         44
-
-/*
- * This macro converts a instruction virtual address to a physical address
- * Right now for simulation purposes the virtual addresses are
- * direct mapped to physical addresses.
- *     1. Lop off bits 61 thru 63 in the virtual address
- */
-#ifdef XEN
-#define INST_VA_TO_PA(addr)                                                    
\
-       dep     addr    = 0, addr, 60, 4
-#else // XEN
-#define INST_VA_TO_PA(addr)                                                    
\
-       dep     addr    = 0, addr, 61, 3
-#endif // XEN
-/*
- * This macro converts a data virtual address to a physical address
- * Right now for simulation purposes the virtual addresses are
- * direct mapped to physical addresses.
- *     1. Lop off bits 61 thru 63 in the virtual address
- */
-#define DATA_VA_TO_PA(addr)                                                    
\
-       tpa     addr    = addr
-/*
- * This macro converts a data physical address to a virtual address
- * Right now for simulation purposes the virtual addresses are
- * direct mapped to physical addresses.
- *     1. Put 0x7 in bits 61 thru 63.
- */
-#ifdef XEN
-#define DATA_PA_TO_VA(addr,temp)                                               
        \
-       mov     temp    = 0xf   ;;                                              
        \
-       dep     addr    = temp, addr, 60, 4
-#else // XEN
-#define DATA_PA_TO_VA(addr,temp)                                               
        \
-       mov     temp    = 0x7   ;;                                              
        \
-       dep     addr    = temp, addr, 61, 3
-#endif // XEN
-
-#define GET_THIS_PADDR(reg, var)               \
-       mov     reg = IA64_KR(PER_CPU_DATA);;   \
-        addl   reg = THIS_CPU(var), reg
-
-/*
- * This macro jumps to the instruction at the given virtual address
- * and starts execution in physical mode with all the address
- * translations turned off.
- *     1.      Save the current psr
- *     2.      Make sure that all the upper 32 bits are off
- *
- *     3.      Clear the interrupt enable and interrupt state collection bits
- *             in the psr before updating the ipsr and iip.
- *
- *     4.      Turn off the instruction, data and rse translation bits of the 
psr
- *             and store the new value into ipsr
- *             Also make sure that the interrupts are disabled.
- *             Ensure that we are in little endian mode.
- *             [psr.{rt, it, dt, i, be} = 0]
- *
- *     5.      Get the physical address corresponding to the virtual address
- *             of the next instruction bundle and put it in iip.
- *             (Using magic numbers 24 and 40 in the deposint instruction since
- *              the IA64_SDK code directly maps to lower 24bits as physical 
address
- *              from a virtual address).
- *
- *     6.      Do an rfi to move the values from ipsr to psr and iip to ip.
- */
-#define  PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)                
                \
-       mov     old_psr = psr;                                                  
        \
-       ;;                                                                      
        \
-       dep     old_psr = 0, old_psr, 32, 32;                                   
        \
-                                                                               
        \
-       mov     ar.rsc = 0 ;                                                    
        \
-       ;;                                                                      
        \
-       srlz.d;                                                                 
        \
-       mov     temp2 = ar.bspstore;                                            
        \
-       ;;                                                                      
        \
-       DATA_VA_TO_PA(temp2);                                                   
        \
-       ;;                                                                      
        \
-       mov     temp1 = ar.rnat;                                                
        \
-       ;;                                                                      
        \
-       mov     ar.bspstore = temp2;                                            
        \
-       ;;                                                                      
        \
-       mov     ar.rnat = temp1;                                                
        \
-       mov     temp1 = psr;                                                    
        \
-       mov     temp2 = psr;                                                    
        \
-       ;;                                                                      
        \
-                                                                               
        \
-       dep     temp2 = 0, temp2, PSR_IC, 2;                                    
        \
-       ;;                                                                      
        \
-       mov     psr.l = temp2;                                                  
        \
-       ;;                                                                      
        \
-       srlz.d;                                                                 
        \
-       dep     temp1 = 0, temp1, 32, 32;                                       
        \
-       ;;                                                                      
        \
-       dep     temp1 = 0, temp1, PSR_IT, 1;                                    
        \
-       ;;                                                                      
        \
-       dep     temp1 = 0, temp1, PSR_DT, 1;                                    
        \
-       ;;                                                                      
        \
-       dep     temp1 = 0, temp1, PSR_RT, 1;                                    
        \
-       ;;                                                                      
        \
-       dep     temp1 = 0, temp1, PSR_I, 1;                                     
        \
-       ;;                                                                      
        \
-       dep     temp1 = 0, temp1, PSR_IC, 1;                                    
        \
-       ;;                                                                      
        \
-       dep     temp1 = -1, temp1, PSR_MC, 1;                                   
        \
-       ;;                                                                      
        \
-       mov     cr.ipsr = temp1;                                                
        \
-       ;;                                                                      
        \
-       LOAD_PHYSICAL(p0, temp2, start_addr);                                   
        \
-       ;;                                                                      
        \
-       mov     cr.iip = temp2;                                                 
        \
-       mov     cr.ifs = r0;                                                    
        \
-       DATA_VA_TO_PA(sp);                                                      
        \
-       DATA_VA_TO_PA(gp);                                                      
        \
-       ;;                                                                      
        \
-       srlz.i;                                                                 
        \
-       ;;                                                                      
        \
-       nop     1;                                                              
        \
-       nop     2;                                                              
        \
-       nop     1;                                                              
        \
-       nop     2;                                                              
        \
-       rfi;                                                                    
        \
-       ;;
-
-/*
- * This macro jumps to the instruction at the given virtual address
- * and starts execution in virtual mode with all the address
- * translations turned on.
- *     1.      Get the old saved psr
- *
- *     2.      Clear the interrupt state collection bit in the current psr.
- *
- *     3.      Set the instruction translation bit back in the old psr
- *             Note we have to do this since we are right now saving only the
- *             lower 32-bits of old psr.(Also the old psr has the data and
- *             rse translation bits on)
- *
- *     4.      Set ipsr to this old_psr with "it" bit set and "bn" = 1.
- *
- *     5.      Reset the current thread pointer (r13).
- *
- *     6.      Set iip to the virtual address of the next instruction bundle.
- *
- *     7.      Do an rfi to move ipsr to psr and iip to ip.
- */
-
-#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)  \
-       mov     temp2 = psr;                                    \
-       ;;                                                      \
-       mov     old_psr = temp2;                                \
-       ;;                                                      \
-       dep     temp2 = 0, temp2, PSR_IC, 2;                    \
-       ;;                                                      \
-       mov     psr.l = temp2;                                  \
-       mov     ar.rsc = 0;                                     \
-       ;;                                                      \
-       srlz.d;                                                 \
-       mov     r13 = ar.k6;                                    \
-       mov     temp2 = ar.bspstore;                            \
-       ;;                                                      \
-       DATA_PA_TO_VA(temp2,temp1);                             \
-       ;;                                                      \
-       mov     temp1 = ar.rnat;                                \
-       ;;                                                      \
-       mov     ar.bspstore = temp2;                            \
-       ;;                                                      \
-       mov     ar.rnat = temp1;                                \
-       ;;                                                      \
-       mov     temp1 = old_psr;                                \
-       ;;                                                      \
-       mov     temp2 = 1;                                      \
-       ;;                                                      \
-       dep     temp1 = temp2, temp1, PSR_IC, 1;                \
-       ;;                                                      \
-       dep     temp1 = temp2, temp1, PSR_IT, 1;                \
-       ;;                                                      \
-       dep     temp1 = temp2, temp1, PSR_DT, 1;                \
-       ;;                                                      \
-       dep     temp1 = temp2, temp1, PSR_RT, 1;                \
-       ;;                                                      \
-       dep     temp1 = temp2, temp1, PSR_BN, 1;                \
-       ;;                                                      \
-                                                               \
-       mov     cr.ipsr = temp1;                                \
-       movl    temp2 = start_addr;                             \
-       ;;                                                      \
-       mov     cr.iip = temp2;                                 \
-       ;;                                                      \
-       DATA_PA_TO_VA(sp, temp1);                               \
-       DATA_PA_TO_VA(gp, temp2);                               \
-       srlz.i;                                                 \
-       ;;                                                      \
-       nop     1;                                              \
-       nop     2;                                              \
-       nop     1;                                              \
-       rfi                                                     \
-       ;;
-
-/*
- * The following offsets capture the order in which the
- * RSE related registers from the old context are
- * saved onto the new stack frame.
- *
- *     +-----------------------+
- *     |NDIRTY [BSP - BSPSTORE]|
- *     +-----------------------+
- *     |       RNAT            |
- *     +-----------------------+
- *     |       BSPSTORE        |
- *     +-----------------------+
- *     |       IFS             |
- *     +-----------------------+
- *     |       PFS             |
- *     +-----------------------+
- *     |       RSC             |
- *     +-----------------------+ <-------- Bottom of new stack frame
- */
-#define  rse_rsc_offset                0
-#define  rse_pfs_offset                (rse_rsc_offset+0x08)
-#define  rse_ifs_offset                (rse_pfs_offset+0x08)
-#define  rse_bspstore_offset   (rse_ifs_offset+0x08)
-#define  rse_rnat_offset       (rse_bspstore_offset+0x08)
-#define  rse_ndirty_offset     (rse_rnat_offset+0x08)
-
-/*
- * rse_switch_context
- *
- *     1. Save old RSC onto the new stack frame
- *     2. Save PFS onto new stack frame
- *     3. Cover the old frame and start a new frame.
- *     4. Save IFS onto new stack frame
- *     5. Save the old BSPSTORE on the new stack frame
- *     6. Save the old RNAT on the new stack frame
- *     7. Write BSPSTORE with the new backing store pointer
- *     8. Read and save the new BSP to calculate the #dirty registers
- * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
- */
-#define rse_switch_context(temp,p_stackframe,p_bspstore)                       
\
-       ;;                                                                      
\
-       mov     temp=ar.rsc;;                                                   
\
-       st8     [p_stackframe]=temp,8;;                                 \
-       mov     temp=ar.pfs;;                                                   
\
-       st8     [p_stackframe]=temp,8;                                          
\
-       cover ;;                                                                
\
-       mov     temp=cr.ifs;;                                                   
\
-       st8     [p_stackframe]=temp,8;;                                         
\
-       mov     temp=ar.bspstore;;                                              
\
-       st8     [p_stackframe]=temp,8;;                                 \
-       mov     temp=ar.rnat;;                                                  
\
-       st8     [p_stackframe]=temp,8;                                          
\
-       mov     ar.bspstore=p_bspstore;;                                        
\
-       mov     temp=ar.bsp;;                                                   
\
-       sub     temp=temp,p_bspstore;;                                          
\
-       st8     [p_stackframe]=temp,8;;
-
-/*
- * rse_return_context
- *     1. Allocate a zero-sized frame
- *     2. Store the number of dirty registers RSC.loadrs field
- *     3. Issue a loadrs to insure that any registers from the interrupted
- *        context which were saved on the new stack frame have been loaded
- *        back into the stacked registers
- *     4. Restore BSPSTORE
- *     5. Restore RNAT
- *     6. Restore PFS
- *     7. Restore IFS
- *     8. Restore RSC
- *     9. Issue an RFI
- */
-#define rse_return_context(psr_mask_reg,temp,p_stackframe)                     
\
-       ;;                                                                      
\
-       alloc   temp=ar.pfs,0,0,0,0;                                            
\
-       add     p_stackframe=rse_ndirty_offset,p_stackframe;;                   
\
-       ld8     temp=[p_stackframe];;                                           
\
-       shl     temp=temp,16;;                                                  
\
-       mov     ar.rsc=temp;;                                                   
\
-       loadrs;;                                                                
\
-       add     
p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
-       ld8     temp=[p_stackframe];;                                           
\
-       mov     ar.bspstore=temp;;                                              
\
-       add     
p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
-       ld8     temp=[p_stackframe];;                                           
\
-       mov     ar.rnat=temp;;                                                  
\
-       add     p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;;     
\
-       ld8     temp=[p_stackframe];;                                           
\
-       mov     ar.pfs=temp;;                                                   
\
-       add     p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;;      
\
-       ld8     temp=[p_stackframe];;                                           
\
-       mov     cr.ifs=temp;;                                                   
\
-       add     p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;;      
\
-       ld8     temp=[p_stackframe];;                                           
\
-       mov     ar.rsc=temp ;                                                   
\
-       mov     temp=psr;;                                                      
\
-       or      temp=temp,psr_mask_reg;;                                        
\
-       mov     cr.ipsr=temp;;                                                  
\
-       mov     temp=ip;;                                                       
\
-       add     temp=0x30,temp;;                                                
\
-       mov     cr.iip=temp;;                                                   
\
-       srlz.i;;                                                                
\
-       rfi;;
-
-#endif /* _ASM_IA64_MCA_ASM_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/page.h
--- a/xen/include/asm-ia64/page.h       Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,238 +0,0 @@
-#ifndef _ASM_IA64_PAGE_H
-#define _ASM_IA64_PAGE_H
-/*
- * Pagetable related stuff.
- *
- * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-
-#include <linux/config.h>
-
-#include <asm/intrinsics.h>
-#include <asm/types.h>
-
-/*
- * PAGE_SHIFT determines the actual kernel page size.
- */
-#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
-# define PAGE_SHIFT    12
-#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
-# define PAGE_SHIFT    13
-#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
-# define PAGE_SHIFT    14
-#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
-# define PAGE_SHIFT    16
-#else
-# error Unsupported page size!
-#endif
-
-#define PAGE_SIZE              (__IA64_UL_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK              (~(PAGE_SIZE - 1))
-#define PAGE_ALIGN(addr)       (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
-
-#define PERCPU_PAGE_SHIFT      16      /* log2() of max. size of per-CPU area 
*/
-
-#define PERCPU_PAGE_SIZE       (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
-
-#define RGN_MAP_LIMIT  ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)      /* per 
region addr limit */
-
-#ifdef CONFIG_HUGETLB_PAGE
-# define REGION_HPAGE          (4UL)   /* note: this is hardcoded in 
reload_context()!*/
-# define REGION_SHIFT          61
-# define HPAGE_REGION_BASE     (REGION_HPAGE << REGION_SHIFT)
-# define HPAGE_SHIFT           hpage_shift
-# define HPAGE_SHIFT_DEFAULT   28      /* check ia64 SDM for architecture 
supported size */
-# define HPAGE_SIZE            (__IA64_UL_CONST(1) << HPAGE_SHIFT)
-# define HPAGE_MASK            (~(HPAGE_SIZE - 1))
-
-# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#endif /* CONFIG_HUGETLB_PAGE */
-
-#ifdef __ASSEMBLY__
-# define __pa(x)               ((x) - PAGE_OFFSET)
-# define __va(x)               ((x) + PAGE_OFFSET)
-#else /* !__ASSEMBLY */
-# ifdef __KERNEL__
-#  define STRICT_MM_TYPECHECKS
-
-extern void clear_page (void *page);
-extern void copy_page (void *to, void *from);
-
-/*
- * clear_user_page() and copy_user_page() can't be inline functions because
- * flush_dcache_page() can't be defined until later...
- */
-#define clear_user_page(addr, vaddr, page)     \
-do {                                           \
-       clear_page(addr);                       \
-       flush_dcache_page(page);                \
-} while (0)
-
-#define copy_user_page(to, from, vaddr, page)  \
-do {                                           \
-       copy_page((to), (from));                \
-       flush_dcache_page(page);                \
-} while (0)
-
-
-#define alloc_zeroed_user_highpage(vma, vaddr) \
-({                                             \
-       struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, 
vaddr); \
-       if (page)                               \
-               flush_dcache_page(page);        \
-       page;                                   \
-})
-
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-extern int ia64_pfn_valid (unsigned long pfn);
-#else
-# define ia64_pfn_valid(pfn) 1
-#endif
-
-#ifndef CONFIG_DISCONTIGMEM
-#ifdef XEN
-# define pfn_valid(pfn)                (0)
-# define page_to_pfn(_page)    ((unsigned long)((_page) - frame_table))
-# define pfn_to_page(_pfn)     (frame_table + (_pfn))
-#else
-# define pfn_valid(pfn)                (((pfn) < max_mapnr) && 
ia64_pfn_valid(pfn))
-# define page_to_pfn(page)     ((unsigned long) (page - mem_map))
-# define pfn_to_page(pfn)      (mem_map + (pfn))
-#endif
-#else
-extern struct page *vmem_map;
-extern unsigned long max_low_pfn;
-# define pfn_valid(pfn)                (((pfn) < max_low_pfn) && 
ia64_pfn_valid(pfn))
-# define page_to_pfn(page)     ((unsigned long) (page - vmem_map))
-# define pfn_to_page(pfn)      (vmem_map + (pfn))
-#endif
-
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-
-#ifdef XEN
-#define page_to_virt(_page)    phys_to_virt(page_to_phys(_page))
-#define phys_to_page(kaddr)    pfn_to_page(((kaddr) >> PAGE_SHIFT))
-#endif
-
-typedef union ia64_va {
-       struct {
-               unsigned long off : 61;         /* intra-region offset */
-               unsigned long reg :  3;         /* region number */
-       } f;
-       unsigned long l;
-       void *p;
-} ia64_va;
-
-/*
- * Note: These macros depend on the fact that PAGE_OFFSET has all
- * region bits set to 1 and all other bits set to zero.  They are
- * expressed in this way to ensure they result in a single "dep"
- * instruction.
- */
-#ifdef XEN
-typedef union xen_va {
-       struct {
-               unsigned long off : 60;
-               unsigned long reg : 4;
-       } f;
-       unsigned long l;
-       void *p;
-} xen_va;
-
-// xen/drivers/console.c uses __va in a declaration (should be fixed!)
-#define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
-#define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
-#else
-#define __pa(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; 
_v.l;})
-#define __va(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; 
_v.p;})
-#endif
-
-#define REGION_NUMBER(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
-#define REGION_OFFSET(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
-
-#define REGION_SIZE            REGION_NUMBER(1)
-#define REGION_KERNEL          7
-
-#ifdef CONFIG_HUGETLB_PAGE
-# define htlbpage_to_page(x)   (((unsigned long) REGION_NUMBER(x) << 61)       
                \
-                                | (REGION_OFFSET(x) >> 
(HPAGE_SHIFT-PAGE_SHIFT)))
-# define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT - PAGE_SHIFT)
-# define is_hugepage_only_range(addr, len)             \
-        (REGION_NUMBER(addr) == REGION_HPAGE &&        \
-         REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
-extern unsigned int hpage_shift;
-#endif
-
-static __inline__ int
-get_order (unsigned long size)
-{
-       long double d = size - 1;
-       long order;
-
-       order = ia64_getf_exp(d);
-       order = order - PAGE_SHIFT - 0xffff + 1;
-       if (order < 0)
-               order = 0;
-       return order;
-}
-
-# endif /* __KERNEL__ */
-#endif /* !__ASSEMBLY__ */
-
-#ifdef STRICT_MM_TYPECHECKS
-  /*
-   * These are used to make use of C type-checking..
-   */
-  typedef struct { unsigned long pte; } pte_t;
-  typedef struct { unsigned long pmd; } pmd_t;
-  typedef struct { unsigned long pgd; } pgd_t;
-  typedef struct { unsigned long pgprot; } pgprot_t;
-
-# define pte_val(x)    ((x).pte)
-# define pmd_val(x)    ((x).pmd)
-# define pgd_val(x)    ((x).pgd)
-# define pgprot_val(x) ((x).pgprot)
-
-# define __pte(x)      ((pte_t) { (x) } )
-# define __pgprot(x)   ((pgprot_t) { (x) } )
-
-#else /* !STRICT_MM_TYPECHECKS */
-  /*
-   * .. while these make it easier on the compiler
-   */
-# ifndef __ASSEMBLY__
-    typedef unsigned long pte_t;
-    typedef unsigned long pmd_t;
-    typedef unsigned long pgd_t;
-    typedef unsigned long pgprot_t;
-# endif
-
-# define pte_val(x)    (x)
-# define pmd_val(x)    (x)
-# define pgd_val(x)    (x)
-# define pgprot_val(x) (x)
-
-# define __pte(x)      (x)
-# define __pgd(x)      (x)
-# define __pgprot(x)   (x)
-#endif /* !STRICT_MM_TYPECHECKS */
-
-#ifdef XEN
-#define PAGE_OFFSET                    __IA64_UL_CONST(0xf000000000000000)
-#else
-#define PAGE_OFFSET                    __IA64_UL_CONST(0xe000000000000000)
-#endif
-
-#define VM_DATA_DEFAULT_FLAGS          (VM_READ | VM_WRITE |                   
                \
-                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC 
|                \
-                                        (((current->personality & 
READ_IMPLIES_EXEC) != 0)     \
-                                         ? VM_EXEC : 0))
-
-#endif /* _ASM_IA64_PAGE_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/pal.h
--- a/xen/include/asm-ia64/pal.h        Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,1567 +0,0 @@
-#ifndef _ASM_IA64_PAL_H
-#define _ASM_IA64_PAL_H
-
-/*
- * Processor Abstraction Layer definitions.
- *
- * This is based on Intel IA-64 Architecture Software Developer's Manual rev 
1.0
- * chapter 11 IA-64 Processor Abstraction Layer
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 1999 Srinivasa Prasad Thirumalachar 
<sprasad@xxxxxxxxxxxxxxxxxxxx>
- *
- * 99/10/01    davidm  Make sure we pass zero for reserved parameters.
- * 00/03/07    davidm  Updated pal_cache_flush() to be in sync with PAL v2.6.
- * 00/03/23     cfleck  Modified processor min-state save area to match 
updated PAL & SAL info
- * 00/05/24     eranian Updated to latest PAL spec, fix structures bugs, added
- * 00/05/25    eranian Support for stack calls, and static physical calls
- * 00/06/18    eranian Support for stacked physical calls
- */
-
-/*
- * Note that some of these calls use a static-register only calling
- * convention which has nothing to do with the regular calling
- * convention.
- */
-#define PAL_CACHE_FLUSH                1       /* flush i/d cache */
-#define PAL_CACHE_INFO         2       /* get detailed i/d cache info */
-#define PAL_CACHE_INIT         3       /* initialize i/d cache */
-#define PAL_CACHE_SUMMARY      4       /* get summary of cache heirarchy */
-#define PAL_MEM_ATTRIB         5       /* list supported memory attributes */
-#define PAL_PTCE_INFO          6       /* purge TLB info */
-#define PAL_VM_INFO            7       /* return supported virtual memory 
features */
-#define PAL_VM_SUMMARY         8       /* return summary on supported vm 
features */
-#define PAL_BUS_GET_FEATURES   9       /* return processor bus interface 
features settings */
-#define PAL_BUS_SET_FEATURES   10      /* set processor bus features */
-#define PAL_DEBUG_INFO         11      /* get number of debug registers */
-#define PAL_FIXED_ADDR         12      /* get fixed component of processors's 
directed address */
-#define PAL_FREQ_BASE          13      /* base frequency of the platform */
-#define PAL_FREQ_RATIOS                14      /* ratio of processor, bus and 
ITC frequency */
-#define PAL_PERF_MON_INFO      15      /* return performance monitor info */
-#define PAL_PLATFORM_ADDR      16      /* set processor interrupt block and IO 
port space addr */
-#define PAL_PROC_GET_FEATURES  17      /* get configurable processor features 
& settings */
-#define PAL_PROC_SET_FEATURES  18      /* enable/disable configurable 
processor features */
-#define PAL_RSE_INFO           19      /* return rse information */
-#define PAL_VERSION            20      /* return version of PAL code */
-#define PAL_MC_CLEAR_LOG       21      /* clear all processor log info */
-#define PAL_MC_DRAIN           22      /* drain operations which could result 
in an MCA */
-#define PAL_MC_EXPECTED                23      /* set/reset expected MCA 
indicator */
-#define PAL_MC_DYNAMIC_STATE   24      /* get processor dynamic state */
-#define PAL_MC_ERROR_INFO      25      /* get processor MCA info and static 
state */
-#define PAL_MC_RESUME          26      /* Return to interrupted process */
-#define PAL_MC_REGISTER_MEM    27      /* Register memory for PAL to use 
during MCAs and inits */
-#define PAL_HALT               28      /* enter the low power HALT state */
-#define PAL_HALT_LIGHT         29      /* enter the low power light halt 
state*/
-#define PAL_COPY_INFO          30      /* returns info needed to relocate PAL 
*/
-#define PAL_CACHE_LINE_INIT    31      /* init tags & data of cache line */
-#define PAL_PMI_ENTRYPOINT     32      /* register PMI memory entry points 
with the processor */
-#define PAL_ENTER_IA_32_ENV    33      /* enter IA-32 system environment */
-#define PAL_VM_PAGE_SIZE       34      /* return vm TC and page walker page 
sizes */
-
-#define PAL_MEM_FOR_TEST       37      /* get amount of memory needed for late 
processor test */
-#define PAL_CACHE_PROT_INFO    38      /* get i/d cache protection info */
-#define PAL_REGISTER_INFO      39      /* return AR and CR register 
information*/
-#define PAL_SHUTDOWN           40      /* enter processor shutdown state */
-#define PAL_PREFETCH_VISIBILITY        41      /* Make Processor Prefetches 
Visible */
-
-#define PAL_COPY_PAL           256     /* relocate PAL procedures and PAL PMI 
*/
-#define PAL_HALT_INFO          257     /* return the low power capabilities of 
processor */
-#define PAL_TEST_PROC          258     /* perform late processor self-test */
-#define PAL_CACHE_READ         259     /* read tag & data of cacheline for 
diagnostic testing */
-#define PAL_CACHE_WRITE                260     /* write tag & data of 
cacheline for diagnostic testing */
-#define PAL_VM_TR_READ         261     /* read contents of translation 
register */
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/fpu.h>
-
-/*
- * Data types needed to pass information into PAL procedures and
- * interpret information returned by them.
- */
-
-/* Return status from the PAL procedure */
-typedef s64                            pal_status_t;
-
-#define PAL_STATUS_SUCCESS             0       /* No error */
-#define PAL_STATUS_UNIMPLEMENTED       (-1)    /* Unimplemented procedure */
-#define PAL_STATUS_EINVAL              (-2)    /* Invalid argument */
-#define PAL_STATUS_ERROR               (-3)    /* Error */
-#define PAL_STATUS_CACHE_INIT_FAIL     (-4)    /* Could not initialize the
-                                                * specified level and type of
-                                                * cache without sideeffects
-                                                * and "restrict" was 1
-                                                */
-
-/* Processor cache level in the heirarchy */
-typedef u64                            pal_cache_level_t;
-#define PAL_CACHE_LEVEL_L0             0       /* L0 */
-#define PAL_CACHE_LEVEL_L1             1       /* L1 */
-#define PAL_CACHE_LEVEL_L2             2       /* L2 */
-
-
-/* Processor cache type at a particular level in the heirarchy */
-
-typedef u64                            pal_cache_type_t;
-#define PAL_CACHE_TYPE_INSTRUCTION     1       /* Instruction cache */
-#define PAL_CACHE_TYPE_DATA            2       /* Data or unified cache */
-#define PAL_CACHE_TYPE_INSTRUCTION_DATA        3       /* Both Data & 
Instruction */
-
-
-#define PAL_CACHE_FLUSH_INVALIDATE     1       /* Invalidate clean lines */
-#define PAL_CACHE_FLUSH_CHK_INTRS      2       /* check for interrupts/mc 
while flushing */
-
-/* Processor cache line size in bytes  */
-typedef int                            pal_cache_line_size_t;
-
-/* Processor cache line state */
-typedef u64                            pal_cache_line_state_t;
-#define PAL_CACHE_LINE_STATE_INVALID   0       /* Invalid */
-#define PAL_CACHE_LINE_STATE_SHARED    1       /* Shared */
-#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2       /* Exclusive */
-#define PAL_CACHE_LINE_STATE_MODIFIED  3       /* Modified */
-
-typedef struct pal_freq_ratio {
-       u64 den : 32, num : 32; /* numerator & denominator */
-} itc_ratio, proc_ratio;
-
-typedef        union  pal_cache_config_info_1_s {
-       struct {
-               u64             u               : 1,    /* 0 Unified cache ? */
-                               at              : 2,    /* 2-1 Cache mem attr*/
-                               reserved        : 5,    /* 7-3 Reserved */
-                               associativity   : 8,    /* 16-8 Associativity*/
-                               line_size       : 8,    /* 23-17 Line size */
-                               stride          : 8,    /* 31-24 Stride */
-                               store_latency   : 8,    /*39-32 Store latency*/
-                               load_latency    : 8,    /* 47-40 Load latency*/
-                               store_hints     : 8,    /* 55-48 Store hints*/
-                               load_hints      : 8;    /* 63-56 Load hints */
-       } pcci1_bits;
-       u64                     pcci1_data;
-} pal_cache_config_info_1_t;
-
-typedef        union  pal_cache_config_info_2_s {
-       struct {
-               u64             cache_size      : 32,   /*cache size in bytes*/
-
-
-                               alias_boundary  : 8,    /* 39-32 aliased addr
-                                                        * separation for max
-                                                        * performance.
-                                                        */
-                               tag_ls_bit      : 8,    /* 47-40 LSb of addr*/
-                               tag_ms_bit      : 8,    /* 55-48 MSb of addr*/
-                               reserved        : 8;    /* 63-56 Reserved */
-       } pcci2_bits;
-       u64                     pcci2_data;
-} pal_cache_config_info_2_t;
-
-
-typedef struct pal_cache_config_info_s {
-       pal_status_t                    pcci_status;
-       pal_cache_config_info_1_t       pcci_info_1;
-       pal_cache_config_info_2_t       pcci_info_2;
-       u64                             pcci_reserved;
-} pal_cache_config_info_t;
-
-#define pcci_ld_hints          pcci_info_1.pcci1_bits.load_hints
-#define pcci_st_hints          pcci_info_1.pcci1_bits.store_hints
-#define pcci_ld_latency                pcci_info_1.pcci1_bits.load_latency
-#define pcci_st_latency                pcci_info_1.pcci1_bits.store_latency
-#define pcci_stride            pcci_info_1.pcci1_bits.stride
-#define pcci_line_size         pcci_info_1.pcci1_bits.line_size
-#define pcci_assoc             pcci_info_1.pcci1_bits.associativity
-#define pcci_cache_attr                pcci_info_1.pcci1_bits.at
-#define pcci_unified           pcci_info_1.pcci1_bits.u
-#define pcci_tag_msb           pcci_info_2.pcci2_bits.tag_ms_bit
-#define pcci_tag_lsb           pcci_info_2.pcci2_bits.tag_ls_bit
-#define pcci_alias_boundary    pcci_info_2.pcci2_bits.alias_boundary
-#define pcci_cache_size                pcci_info_2.pcci2_bits.cache_size
-
-
-
-/* Possible values for cache attributes */
-
-#define PAL_CACHE_ATTR_WT              0       /* Write through cache */
-#define PAL_CACHE_ATTR_WB              1       /* Write back cache */
-#define PAL_CACHE_ATTR_WT_OR_WB                2       /* Either write thru or 
write
-                                                * back depending on TLB
-                                                * memory attributes
-                                                */
-
-
-/* Possible values for cache hints */
-
-#define PAL_CACHE_HINT_TEMP_1          0       /* Temporal level 1 */
-#define PAL_CACHE_HINT_NTEMP_1         1       /* Non-temporal level 1 */
-#define PAL_CACHE_HINT_NTEMP_ALL       3       /* Non-temporal all levels */
-
-/* Processor cache protection  information */
-typedef union pal_cache_protection_element_u {
-       u32                     pcpi_data;
-       struct {
-               u32             data_bits       : 8, /* # data bits covered by
-                                                     * each unit of protection
-                                                     */
-
-                               tagprot_lsb     : 6, /* Least -do- */
-                               tagprot_msb     : 6, /* Most Sig. tag address
-                                                     * bit that this
-                                                     * protection covers.
-                                                     */
-                               prot_bits       : 6, /* # of protection bits */
-                               method          : 4, /* Protection method */
-                               t_d             : 2; /* Indicates which part
-                                                     * of the cache this
-                                                     * protection encoding
-                                                     * applies.
-                                                     */
-       } pcp_info;
-} pal_cache_protection_element_t;
-
-#define pcpi_cache_prot_part   pcp_info.t_d
-#define pcpi_prot_method       pcp_info.method
-#define pcpi_prot_bits         pcp_info.prot_bits
-#define pcpi_tagprot_msb       pcp_info.tagprot_msb
-#define pcpi_tagprot_lsb       pcp_info.tagprot_lsb
-#define pcpi_data_bits         pcp_info.data_bits
-
-/* Processor cache part encodings */
-#define PAL_CACHE_PROT_PART_DATA       0       /* Data protection  */
-#define PAL_CACHE_PROT_PART_TAG                1       /* Tag  protection */
-#define PAL_CACHE_PROT_PART_TAG_DATA   2       /* Tag+data protection (tag is
-                                                * more significant )
-                                                */
-#define PAL_CACHE_PROT_PART_DATA_TAG   3       /* Data+tag protection (data is
-                                                * more significant )
-                                                */
-#define PAL_CACHE_PROT_PART_MAX                6
-
-
-typedef struct pal_cache_protection_info_s {
-       pal_status_t                    pcpi_status;
-       pal_cache_protection_element_t  pcp_info[PAL_CACHE_PROT_PART_MAX];
-} pal_cache_protection_info_t;
-
-
-/* Processor cache protection method encodings */
-#define PAL_CACHE_PROT_METHOD_NONE             0       /* No protection */
-#define PAL_CACHE_PROT_METHOD_ODD_PARITY       1       /* Odd parity */
-#define PAL_CACHE_PROT_METHOD_EVEN_PARITY      2       /* Even parity */
-#define PAL_CACHE_PROT_METHOD_ECC              3       /* ECC protection */
-
-
-/* Processor cache line identification in the heirarchy */
-typedef union pal_cache_line_id_u {
-       u64                     pclid_data;
-       struct {
-               u64             cache_type      : 8,    /* 7-0 cache type */
-                               level           : 8,    /* 15-8 level of the
-                                                        * cache in the
-                                                        * heirarchy.
-                                                        */
-                               way             : 8,    /* 23-16 way in the set
-                                                        */
-                               part            : 8,    /* 31-24 part of the
-                                                        * cache
-                                                        */
-                               reserved        : 32;   /* 63-32 is reserved*/
-       } pclid_info_read;
-       struct {
-               u64             cache_type      : 8,    /* 7-0 cache type */
-                               level           : 8,    /* 15-8 level of the
-                                                        * cache in the
-                                                        * heirarchy.
-                                                        */
-                               way             : 8,    /* 23-16 way in the set
-                                                        */
-                               part            : 8,    /* 31-24 part of the
-                                                        * cache
-                                                        */
-                               mesi            : 8,    /* 39-32 cache line
-                                                        * state
-                                                        */
-                               start           : 8,    /* 47-40 lsb of data to
-                                                        * invert
-                                                        */
-                               length          : 8,    /* 55-48 #bits to
-                                                        * invert
-                                                        */
-                               trigger         : 8;    /* 63-56 Trigger error
-                                                        * by doing a load
-                                                        * after the write
-                                                        */
-
-       } pclid_info_write;
-} pal_cache_line_id_u_t;
-
-#define pclid_read_part                pclid_info_read.part
-#define pclid_read_way         pclid_info_read.way
-#define pclid_read_level       pclid_info_read.level
-#define pclid_read_cache_type  pclid_info_read.cache_type
-
-#define pclid_write_trigger    pclid_info_write.trigger
-#define pclid_write_length     pclid_info_write.length
-#define pclid_write_start      pclid_info_write.start
-#define pclid_write_mesi       pclid_info_write.mesi
-#define pclid_write_part       pclid_info_write.part
-#define pclid_write_way                pclid_info_write.way
-#define pclid_write_level      pclid_info_write.level
-#define pclid_write_cache_type pclid_info_write.cache_type
-
-/* Processor cache line part encodings */
-#define PAL_CACHE_LINE_ID_PART_DATA            0       /* Data */
-#define PAL_CACHE_LINE_ID_PART_TAG             1       /* Tag */
-#define PAL_CACHE_LINE_ID_PART_DATA_PROT       2       /* Data protection */
-#define PAL_CACHE_LINE_ID_PART_TAG_PROT                3       /* Tag 
protection */
-#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT   4       /* Data+tag
-                                                        * protection
-                                                        */
-typedef struct pal_cache_line_info_s {
-       pal_status_t            pcli_status;            /* Return status of the 
read cache line
-                                                        * info call.
-                                                        */
-       u64                     pcli_data;              /* 64-bit data, tag, 
protection bits .. */
-       u64                     pcli_data_len;          /* data length in bits 
*/
-       pal_cache_line_state_t  pcli_cache_line_state;  /* mesi state */
-
-} pal_cache_line_info_t;
-
-
-/* Machine Check related crap */
-
-/* Pending event status bits  */
-typedef u64                                    pal_mc_pending_events_t;
-
-#define PAL_MC_PENDING_MCA                     (1 << 0)
-#define PAL_MC_PENDING_INIT                    (1 << 1)
-
-/* Error information type */
-typedef u64                                    pal_mc_info_index_t;
-
-#define PAL_MC_INFO_PROCESSOR                  0       /* Processor */
-#define PAL_MC_INFO_CACHE_CHECK                        1       /* Cache check 
*/
-#define PAL_MC_INFO_TLB_CHECK                  2       /* Tlb check */
-#define PAL_MC_INFO_BUS_CHECK                  3       /* Bus check */
-#define PAL_MC_INFO_REQ_ADDR                   4       /* Requestor address */
-#define PAL_MC_INFO_RESP_ADDR                  5       /* Responder address */
-#define PAL_MC_INFO_TARGET_ADDR                        6       /* Target 
address */
-#define PAL_MC_INFO_IMPL_DEP                   7       /* Implementation
-                                                        * dependent
-                                                        */
-
-
-typedef struct pal_process_state_info_s {
-       u64             reserved1       : 2,
-                       rz              : 1,    /* PAL_CHECK processor
-                                                * rendezvous
-                                                * successful.
-                                                */
-
-                       ra              : 1,    /* PAL_CHECK attempted
-                                                * a rendezvous.
-                                                */
-                       me              : 1,    /* Distinct multiple
-                                                * errors occurred
-                                                */
-
-                       mn              : 1,    /* Min. state save
-                                                * area has been
-                                                * registered with PAL
-                                                */
-
-                       sy              : 1,    /* Storage integrity
-                                                * synched
-                                                */
-
-
-                       co              : 1,    /* Continuable */
-                       ci              : 1,    /* MC isolated */
-                       us              : 1,    /* Uncontained storage
-                                                * damage.
-                                                */
-
-
-                       hd              : 1,    /* Non-essential hw
-                                                * lost (no loss of
-                                                * functionality)
-                                                * causing the
-                                                * processor to run in
-                                                * degraded mode.
-                                                */
-
-                       tl              : 1,    /* 1 => MC occurred
-                                                * after an instr was
-                                                * executed but before
-                                                * the trap that
-                                                * resulted from instr
-                                                * execution was
-                                                * generated.
-                                                * (Trap Lost )
-                                                */
-                       mi              : 1,    /* More information available
-                                                * call PAL_MC_ERROR_INFO
-                                                */
-                       pi              : 1,    /* Precise instruction pointer 
*/
-                       pm              : 1,    /* Precise min-state save area 
*/
-
-                       dy              : 1,    /* Processor dynamic
-                                                * state valid
-                                                */
-
-
-                       in              : 1,    /* 0 = MC, 1 = INIT */
-                       rs              : 1,    /* RSE valid */
-                       cm              : 1,    /* MC corrected */
-                       ex              : 1,    /* MC is expected */
-                       cr              : 1,    /* Control regs valid*/
-                       pc              : 1,    /* Perf cntrs valid */
-                       dr              : 1,    /* Debug regs valid */
-                       tr              : 1,    /* Translation regs
-                                                * valid
-                                                */
-                       rr              : 1,    /* Region regs valid */
-                       ar              : 1,    /* App regs valid */
-                       br              : 1,    /* Branch regs valid */
-                       pr              : 1,    /* Predicate registers
-                                                * valid
-                                                */
-
-                       fp              : 1,    /* fp registers valid*/
-                       b1              : 1,    /* Preserved bank one
-                                                * general registers
-                                                * are valid
-                                                */
-                       b0              : 1,    /* Preserved bank zero
-                                                * general registers
-                                                * are valid
-                                                */
-                       gr              : 1,    /* General registers
-                                                * are valid
-                                                * (excl. banked regs)
-                                                */
-                       dsize           : 16,   /* size of dynamic
-                                                * state returned
-                                                * by the processor
-                                                */
-
-                       reserved2       : 11,
-                       cc              : 1,    /* Cache check */
-                       tc              : 1,    /* TLB check */
-                       bc              : 1,    /* Bus check */
-                       rc              : 1,    /* Register file check */
-                       uc              : 1;    /* Uarch check */
-
-} pal_processor_state_info_t;
-
-typedef struct pal_cache_check_info_s {
-       u64             op              : 4,    /* Type of cache
-                                                * operation that
-                                                * caused the machine
-                                                * check.
-                                                */
-                       level           : 2,    /* Cache level */
-                       reserved1       : 2,
-                       dl              : 1,    /* Failure in data part
-                                                * of cache line
-                                                */
-                       tl              : 1,    /* Failure in tag part
-                                                * of cache line
-                                                */
-                       dc              : 1,    /* Failure in dcache */
-                       ic              : 1,    /* Failure in icache */
-                       mesi            : 3,    /* Cache line state */
-                       mv              : 1,    /* mesi valid */
-                       way             : 5,    /* Way in which the
-                                                * error occurred
-                                                */
-                       wiv             : 1,    /* Way field valid */
-                       reserved2       : 10,
-
-                       index           : 20,   /* Cache line index */
-                       reserved3       : 2,
-
-                       is              : 1,    /* instruction set (1 == ia32) 
*/
-                       iv              : 1,    /* instruction set field valid 
*/
-                       pl              : 2,    /* privilege level */
-                       pv              : 1,    /* privilege level field valid 
*/
-                       mcc             : 1,    /* Machine check corrected */
-                       tv              : 1,    /* Target address
-                                                * structure is valid
-                                                */
-                       rq              : 1,    /* Requester identifier
-                                                * structure is valid
-                                                */
-                       rp              : 1,    /* Responder identifier
-                                                * structure is valid
-                                                */
-                       pi              : 1;    /* Precise instruction pointer
-                                                * structure is valid
-                                                */
-} pal_cache_check_info_t;
-
-typedef struct pal_tlb_check_info_s {
-
-       u64             tr_slot         : 8,    /* Slot# of TR where
-                                                * error occurred
-                                                */
-                       trv             : 1,    /* tr_slot field is valid */
-                       reserved1       : 1,
-                       level           : 2,    /* TLB level where failure 
occurred */
-                       reserved2       : 4,
-                       dtr             : 1,    /* Fail in data TR */
-                       itr             : 1,    /* Fail in inst TR */
-                       dtc             : 1,    /* Fail in data TC */
-                       itc             : 1,    /* Fail in inst. TC */
-                       op              : 4,    /* Cache operation */
-                       reserved3       : 30,
-
-                       is              : 1,    /* instruction set (1 == ia32) 
*/
-                       iv              : 1,    /* instruction set field valid 
*/
-                       pl              : 2,    /* privilege level */
-                       pv              : 1,    /* privilege level field valid 
*/
-                       mcc             : 1,    /* Machine check corrected */
-                       tv              : 1,    /* Target address
-                                                * structure is valid
-                                                */
-                       rq              : 1,    /* Requester identifier
-                                                * structure is valid
-                                                */
-                       rp              : 1,    /* Responder identifier
-                                                * structure is valid
-                                                */
-                       pi              : 1;    /* Precise instruction pointer
-                                                * structure is valid
-                                                */
-} pal_tlb_check_info_t;
-
-typedef struct pal_bus_check_info_s {
-       u64             size            : 5,    /* Xaction size */
-                       ib              : 1,    /* Internal bus error */
-                       eb              : 1,    /* External bus error */
-                       cc              : 1,    /* Error occurred
-                                                * during cache-cache
-                                                * transfer.
-                                                */
-                       type            : 8,    /* Bus xaction type*/
-                       sev             : 5,    /* Bus error severity*/
-                       hier            : 2,    /* Bus hierarchy level */
-                       reserved1       : 1,
-                       bsi             : 8,    /* Bus error status
-                                                * info
-                                                */
-                       reserved2       : 22,
-
-                       is              : 1,    /* instruction set (1 == ia32) 
*/
-                       iv              : 1,    /* instruction set field valid 
*/
-                       pl              : 2,    /* privilege level */
-                       pv              : 1,    /* privilege level field valid 
*/
-                       mcc             : 1,    /* Machine check corrected */
-                       tv              : 1,    /* Target address
-                                                * structure is valid
-                                                */
-                       rq              : 1,    /* Requester identifier
-                                                * structure is valid
-                                                */
-                       rp              : 1,    /* Responder identifier
-                                                * structure is valid
-                                                */
-                       pi              : 1;    /* Precise instruction pointer
-                                                * structure is valid
-                                                */
-} pal_bus_check_info_t;
-
-typedef struct pal_reg_file_check_info_s {
-       u64             id              : 4,    /* Register file identifier */
-                       op              : 4,    /* Type of register
-                                                * operation that
-                                                * caused the machine
-                                                * check.
-                                                */
-                       reg_num         : 7,    /* Register number */
-                       rnv             : 1,    /* reg_num valid */
-                       reserved2       : 38,
-
-                       is              : 1,    /* instruction set (1 == ia32) 
*/
-                       iv              : 1,    /* instruction set field valid 
*/
-                       pl              : 2,    /* privilege level */
-                       pv              : 1,    /* privilege level field valid 
*/
-                       mcc             : 1,    /* Machine check corrected */
-                       reserved3       : 3,
-                       pi              : 1;    /* Precise instruction pointer
-                                                * structure is valid
-                                                */
-} pal_reg_file_check_info_t;
-
-typedef struct pal_uarch_check_info_s {
-       u64             sid             : 5,    /* Structure identification */
-                       level           : 3,    /* Level of failure */
-                       array_id        : 4,    /* Array identification */
-                       op              : 4,    /* Type of
-                                                * operation that
-                                                * caused the machine
-                                                * check.
-                                                */
-                       way             : 6,    /* Way of structure */
-                       wv              : 1,    /* way valid */
-                       xv              : 1,    /* index valid */
-                       reserved1       : 8,
-                       index           : 8,    /* Index or set of the uarch
-                                                * structure that failed.
-                                                */
-                       reserved2       : 24,
-
-                       is              : 1,    /* instruction set (1 == ia32) 
*/
-                       iv              : 1,    /* instruction set field valid 
*/
-                       pl              : 2,    /* privilege level */
-                       pv              : 1,    /* privilege level field valid 
*/
-                       mcc             : 1,    /* Machine check corrected */
-                       tv              : 1,    /* Target address
-                                                * structure is valid
-                                                */
-                       rq              : 1,    /* Requester identifier
-                                                * structure is valid
-                                                */
-                       rp              : 1,    /* Responder identifier
-                                                * structure is valid
-                                                */
-                       pi              : 1;    /* Precise instruction pointer
-                                                * structure is valid
-                                                */
-} pal_uarch_check_info_t;
-
-typedef union pal_mc_error_info_u {
-       u64                             pmei_data;
-       pal_processor_state_info_t      pme_processor;
-       pal_cache_check_info_t          pme_cache;
-       pal_tlb_check_info_t            pme_tlb;
-       pal_bus_check_info_t            pme_bus;
-       pal_reg_file_check_info_t       pme_reg_file;
-       pal_uarch_check_info_t          pme_uarch;
-} pal_mc_error_info_t;
-
-#define pmci_proc_unknown_check                        pme_processor.uc
-#define pmci_proc_bus_check                    pme_processor.bc
-#define pmci_proc_tlb_check                    pme_processor.tc
-#define pmci_proc_cache_check                  pme_processor.cc
-#define pmci_proc_dynamic_state_size           pme_processor.dsize
-#define pmci_proc_gpr_valid                    pme_processor.gr
-#define pmci_proc_preserved_bank0_gpr_valid    pme_processor.b0
-#define pmci_proc_preserved_bank1_gpr_valid    pme_processor.b1
-#define pmci_proc_fp_valid                     pme_processor.fp
-#define pmci_proc_predicate_regs_valid         pme_processor.pr
-#define pmci_proc_branch_regs_valid            pme_processor.br
-#define pmci_proc_app_regs_valid               pme_processor.ar
-#define pmci_proc_region_regs_valid            pme_processor.rr
-#define pmci_proc_translation_regs_valid       pme_processor.tr
-#define pmci_proc_debug_regs_valid             pme_processor.dr
-#define pmci_proc_perf_counters_valid          pme_processor.pc
-#define pmci_proc_control_regs_valid           pme_processor.cr
-#define pmci_proc_machine_check_expected       pme_processor.ex
-#define pmci_proc_machine_check_corrected      pme_processor.cm
-#define pmci_proc_rse_valid                    pme_processor.rs
-#define pmci_proc_machine_check_or_init                pme_processor.in
-#define pmci_proc_dynamic_state_valid          pme_processor.dy
-#define pmci_proc_operation                    pme_processor.op
-#define pmci_proc_trap_lost                    pme_processor.tl
-#define pmci_proc_hardware_damage              pme_processor.hd
-#define pmci_proc_uncontained_storage_damage   pme_processor.us
-#define pmci_proc_machine_check_isolated       pme_processor.ci
-#define pmci_proc_continuable                  pme_processor.co
-#define pmci_proc_storage_intergrity_synced    pme_processor.sy
-#define pmci_proc_min_state_save_area_regd     pme_processor.mn
-#define        pmci_proc_distinct_multiple_errors      pme_processor.me
-#define pmci_proc_pal_attempted_rendezvous     pme_processor.ra
-#define pmci_proc_pal_rendezvous_complete      pme_processor.rz
-
-
-#define pmci_cache_level                       pme_cache.level
-#define pmci_cache_line_state                  pme_cache.mesi
-#define pmci_cache_line_state_valid            pme_cache.mv
-#define pmci_cache_line_index                  pme_cache.index
-#define pmci_cache_instr_cache_fail            pme_cache.ic
-#define pmci_cache_data_cache_fail             pme_cache.dc
-#define pmci_cache_line_tag_fail               pme_cache.tl
-#define pmci_cache_line_data_fail              pme_cache.dl
-#define pmci_cache_operation                   pme_cache.op
-#define pmci_cache_way_valid                   pme_cache.wv
-#define pmci_cache_target_address_valid                pme_cache.tv
-#define pmci_cache_way                         pme_cache.way
-#define pmci_cache_mc                          pme_cache.mc
-
-#define pmci_tlb_instr_translation_cache_fail  pme_tlb.itc
-#define pmci_tlb_data_translation_cache_fail   pme_tlb.dtc
-#define pmci_tlb_instr_translation_reg_fail    pme_tlb.itr
-#define pmci_tlb_data_translation_reg_fail     pme_tlb.dtr
-#define pmci_tlb_translation_reg_slot          pme_tlb.tr_slot
-#define pmci_tlb_mc                            pme_tlb.mc
-
-#define pmci_bus_status_info                   pme_bus.bsi
-#define pmci_bus_req_address_valid             pme_bus.rq
-#define pmci_bus_resp_address_valid            pme_bus.rp
-#define pmci_bus_target_address_valid          pme_bus.tv
-#define pmci_bus_error_severity                        pme_bus.sev
-#define pmci_bus_transaction_type              pme_bus.type
-#define pmci_bus_cache_cache_transfer          pme_bus.cc
-#define pmci_bus_transaction_size              pme_bus.size
-#define pmci_bus_internal_error                        pme_bus.ib
-#define pmci_bus_external_error                        pme_bus.eb
-#define pmci_bus_mc                            pme_bus.mc
-
-/*
- * NOTE: this min_state_save area struct only includes the 1KB
- * architectural state save area.  The other 3 KB is scratch space
- * for PAL.
- */
-
-typedef struct pal_min_state_area_s {
-       u64     pmsa_nat_bits;          /* nat bits for saved GRs  */
-       u64     pmsa_gr[15];            /* GR1  - GR15             */
-       u64     pmsa_bank0_gr[16];      /* GR16 - GR31             */
-       u64     pmsa_bank1_gr[16];      /* GR16 - GR31             */
-       u64     pmsa_pr;                /* predicate registers     */
-       u64     pmsa_br0;               /* branch register 0       */
-       u64     pmsa_rsc;               /* ar.rsc                  */
-       u64     pmsa_iip;               /* cr.iip                  */
-       u64     pmsa_ipsr;              /* cr.ipsr                 */
-       u64     pmsa_ifs;               /* cr.ifs                  */
-       u64     pmsa_xip;               /* previous iip            */
-       u64     pmsa_xpsr;              /* previous psr            */
-       u64     pmsa_xfs;               /* previous ifs            */
-       u64     pmsa_br1;               /* branch register 1       */
-       u64     pmsa_reserved[70];      /* pal_min_state_area should total to 
1KB */
-} pal_min_state_area_t;
-
-
-struct ia64_pal_retval {
-       /*
-        * A zero status value indicates call completed without error.
-        * A negative status value indicates reason of call failure.
-        * A positive status value indicates success but an
-        * informational value should be printed (e.g., "reboot for
-        * change to take effect").
-        */
-       s64 status;
-       u64 v0;
-       u64 v1;
-       u64 v2;
-};
-
-/*
- * Note: Currently unused PAL arguments are generally labeled
- * "reserved" so the value specified in the PAL documentation
- * (generally 0) MUST be passed.  Reserved parameters are not optional
- * parameters.
- */
-extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
-extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
-extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
-extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
-
-#define PAL_CALL(iprv,a0,a1,a2,a3) do {                        \
-       struct ia64_fpreg fr[6];                        \
-       ia64_save_scratch_fpregs(fr);                   \
-       iprv = ia64_pal_call_static(a0, a1, a2, a3, 0); \
-       ia64_load_scratch_fpregs(fr);                   \
-} while (0)
-
-#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) do {         \
-       struct ia64_fpreg fr[6];                        \
-       ia64_save_scratch_fpregs(fr);                   \
-       iprv = ia64_pal_call_static(a0, a1, a2, a3, 1); \
-       ia64_load_scratch_fpregs(fr);                   \
-} while (0)
-
-#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do {            \
-       struct ia64_fpreg fr[6];                        \
-       ia64_save_scratch_fpregs(fr);                   \
-       iprv = ia64_pal_call_stacked(a0, a1, a2, a3);   \
-       ia64_load_scratch_fpregs(fr);                   \
-} while (0)
-
-#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do {                   \
-       struct ia64_fpreg fr[6];                                \
-       ia64_save_scratch_fpregs(fr);                           \
-       iprv = ia64_pal_call_phys_static(a0, a1, a2, a3);       \
-       ia64_load_scratch_fpregs(fr);                           \
-} while (0)
-
-#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do {               \
-       struct ia64_fpreg fr[6];                                \
-       ia64_save_scratch_fpregs(fr);                           \
-       iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3);      \
-       ia64_load_scratch_fpregs(fr);                           \
-} while (0)
-
-typedef int (*ia64_pal_handler) (u64, ...);
-extern ia64_pal_handler ia64_pal;
-extern void ia64_pal_handler_init (void *);
-
-extern ia64_pal_handler ia64_pal;
-
-extern pal_cache_config_info_t         l0d_cache_config_info;
-extern pal_cache_config_info_t         l0i_cache_config_info;
-extern pal_cache_config_info_t         l1_cache_config_info;
-extern pal_cache_config_info_t         l2_cache_config_info;
-
-extern pal_cache_protection_info_t     l0d_cache_protection_info;
-extern pal_cache_protection_info_t     l0i_cache_protection_info;
-extern pal_cache_protection_info_t     l1_cache_protection_info;
-extern pal_cache_protection_info_t     l2_cache_protection_info;
-
-extern pal_cache_config_info_t         
pal_cache_config_info_get(pal_cache_level_t,
-                                                                 
pal_cache_type_t);
-
-extern pal_cache_protection_info_t     
pal_cache_protection_info_get(pal_cache_level_t,
-                                                                     
pal_cache_type_t);
-
-
-extern void                            pal_error(int);
-
-
-/* Useful wrappers for the current list of pal procedures */
-
-typedef union pal_bus_features_u {
-       u64     pal_bus_features_val;
-       struct {
-               u64     pbf_reserved1                           :       29;
-               u64     pbf_req_bus_parking                     :       1;
-               u64     pbf_bus_lock_mask                       :       1;
-               u64     pbf_enable_half_xfer_rate               :       1;
-               u64     pbf_reserved2                           :       22;
-               u64     pbf_disable_xaction_queueing            :       1;
-               u64     pbf_disable_resp_err_check              :       1;
-               u64     pbf_disable_berr_check                  :       1;
-               u64     pbf_disable_bus_req_internal_err_signal :       1;
-               u64     pbf_disable_bus_req_berr_signal         :       1;
-               u64     pbf_disable_bus_init_event_check        :       1;
-               u64     pbf_disable_bus_init_event_signal       :       1;
-               u64     pbf_disable_bus_addr_err_check          :       1;
-               u64     pbf_disable_bus_addr_err_signal         :       1;
-               u64     pbf_disable_bus_data_err_check          :       1;
-       } pal_bus_features_s;
-} pal_bus_features_u_t;
-
-extern void pal_bus_features_print (u64);
-
-/* Provide information about configurable processor bus features */
-static inline s64
-ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
-                          pal_bus_features_u_t *features_status,
-                          pal_bus_features_u_t *features_control)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
-       if (features_avail)
-               features_avail->pal_bus_features_val = iprv.v0;
-       if (features_status)
-               features_status->pal_bus_features_val = iprv.v1;
-       if (features_control)
-               features_control->pal_bus_features_val = iprv.v2;
-       return iprv.status;
-}
-
-/* Enables/disables specific processor bus features */
-static inline s64
-ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, 
feature_select.pal_bus_features_val, 0, 0);
-       return iprv.status;
-}
-
-/* Get detailed cache information */
-static inline s64
-ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, 
pal_cache_config_info_t *conf)
-{
-       struct ia64_pal_retval iprv;
-
-       PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
-
-       if (iprv.status == 0) {
-               conf->pcci_status                 = iprv.status;
-               conf->pcci_info_1.pcci1_data      = iprv.v0;
-               conf->pcci_info_2.pcci2_data      = iprv.v1;
-               conf->pcci_reserved               = iprv.v2;
-       }
-       return iprv.status;
-
-}
-
-/* Get detailed cche protection information */
-static inline s64
-ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, 
pal_cache_protection_info_t *prot)
-{
-       struct ia64_pal_retval iprv;
-
-       PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
-
-       if (iprv.status == 0) {
-               prot->pcpi_status           = iprv.status;
-               prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
-               prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
-               prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
-               prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
-               prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
-               prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
-       }
-       return iprv.status;
-}
-
-/*
- * Flush the processor instruction or data caches.  *PROGRESS must be
- * initialized to zero before calling this for the first time..
- */
-static inline s64
-ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 
*vector)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, 
*progress);
-       if (vector)
-               *vector = iprv.v0;
-       *progress = iprv.v1;
-       return iprv.status;
-}
-
-
-/* Initialize the processor controlled caches */
-static inline s64
-ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
-       return iprv.status;
-}
-
-/* Initialize the tags and data of a data or unified cache line of
- * processor controlled cache to known values without the availability
- * of backing memory.
- */
-static inline s64
-ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
-       return iprv.status;
-}
-
-
-/* Read the data and tag of a processor controlled cache line for diags */
-static inline s64
-ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0);
-       return iprv.status;
-}
-
-/* Return summary information about the heirarchy of caches controlled by the 
processor */
-static inline s64
-ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
-       if (cache_levels)
-               *cache_levels = iprv.v0;
-       if (unique_caches)
-               *unique_caches = iprv.v1;
-       return iprv.status;
-}
-
-/* Write the data and tag of a processor-controlled cache line for diags */
-static inline s64
-ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 
data)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, 
data);
-       return iprv.status;
-}
-
-
-/* Return the parameters needed to copy relocatable PAL procedures from ROM to 
memory */
-static inline s64
-ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
-                   u64 *buffer_size, u64 *buffer_align)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
-       if (buffer_size)
-               *buffer_size = iprv.v0;
-       if (buffer_align)
-               *buffer_align = iprv.v1;
-       return iprv.status;
-}
-
-/* Copy relocatable PAL procedures from ROM to memory */
-static inline s64
-ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 
*pal_proc_offset)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
-       if (pal_proc_offset)
-               *pal_proc_offset = iprv.v0;
-       return iprv.status;
-}
-
-/* Return the number of instruction and data debug register pairs */
-static inline s64
-ia64_pal_debug_info (u64 *inst_regs,  u64 *data_regs)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
-       if (inst_regs)
-               *inst_regs = iprv.v0;
-       if (data_regs)
-               *data_regs = iprv.v1;
-
-       return iprv.status;
-}
-
-#ifdef TBD
-/* Switch from IA64-system environment to IA-32 system environment */
-static inline s64
-ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
-       return iprv.status;
-}
-#endif
-
-/* Get unique geographical address of this processor on its bus */
-static inline s64
-ia64_pal_fixed_addr (u64 *global_unique_addr)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
-       if (global_unique_addr)
-               *global_unique_addr = iprv.v0;
-       return iprv.status;
-}
-
-/* Get base frequency of the platform if generated by the processor */
-static inline s64
-ia64_pal_freq_base (u64 *platform_base_freq)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
-       if (platform_base_freq)
-               *platform_base_freq = iprv.v0;
-       return iprv.status;
-}
-
-/*
- * Get the ratios for processor frequency, bus frequency and interval timer to
- * to base frequency of the platform
- */
-static inline s64
-ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio 
*bus_ratio,
-                     struct pal_freq_ratio *itc_ratio)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
-       if (proc_ratio)
-               *(u64 *)proc_ratio = iprv.v0;
-       if (bus_ratio)
-               *(u64 *)bus_ratio = iprv.v1;
-       if (itc_ratio)
-               *(u64 *)itc_ratio = iprv.v2;
-       return iprv.status;
-}
-
-/* Make the processor enter HALT or one of the implementation dependent low
- * power states where prefetching and execution are suspended and cache and
- * TLB coherency is not maintained.
- */
-static inline s64
-ia64_pal_halt (u64 halt_state)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
-       return iprv.status;
-}
-
-typedef union pal_power_mgmt_info_u {
-       u64                     ppmi_data;
-       struct {
-              u64              exit_latency            : 16,
-                               entry_latency           : 16,
-                               power_consumption       : 28,
-                               im                      : 1,
-                               co                      : 1,
-                               reserved                : 2;
-       } pal_power_mgmt_info_s;
-} pal_power_mgmt_info_u_t;
-
-/* Return information about processor's optional power management 
capabilities. */
-static inline s64
-ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
-       return iprv.status;
-}
-
-/* Cause the processor to enter LIGHT HALT state, where prefetching and 
execution are
- * suspended, but cache and TLB coherency is maintained.
- */
-static inline s64
-ia64_pal_halt_light (void)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
-       return iprv.status;
-}
-
-/* Clear all the processor error logging   registers and reset the indicator 
that allows
- * the error logging registers to be written. This procedure also checks the 
pending
- * machine check bit and pending INIT bit and reports their states.
- */
-static inline s64
-ia64_pal_mc_clear_log (u64 *pending_vector)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
-       if (pending_vector)
-               *pending_vector = iprv.v0;
-       return iprv.status;
-}
-
-/* Ensure that all outstanding transactions in a processor are completed or 
that any
- * MCA due to thes outstanding transaction is taken.
- */
-static inline s64
-ia64_pal_mc_drain (void)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
-       return iprv.status;
-}
-
-/* Return the machine check dynamic processor state */
-static inline s64
-ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
-       if (size)
-               *size = iprv.v0;
-       if (pds)
-               *pds = iprv.v1;
-       return iprv.status;
-}
-
-/* Return processor machine check information */
-static inline s64
-ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 
*error_info)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
-       if (size)
-               *size = iprv.v0;
-       if (error_info)
-               *error_info = iprv.v1;
-       return iprv.status;
-}
-
-/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK 
willnot
- * attempt to correct any expected machine checks.
- */
-static inline s64
-ia64_pal_mc_expected (u64 expected, u64 *previous)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
-       if (previous)
-               *previous = iprv.v0;
-       return iprv.status;
-}
-
-/* Register a platform dependent location with PAL to which it can save
- * minimal processor state in the event of a machine check or initialization
- * event.
- */
-static inline s64
-ia64_pal_mc_register_mem (u64 physical_addr)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
-       return iprv.status;
-}
-
-/* Restore minimal architectural processor state, set CMC interrupt if 
necessary
- * and resume execution
- */
-static inline s64
-ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
-       return iprv.status;
-}
-
-/* Return the memory attributes implemented by the processor */
-static inline s64
-ia64_pal_mem_attrib (u64 *mem_attrib)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
-       if (mem_attrib)
-               *mem_attrib = iprv.v0 & 0xff;
-       return iprv.status;
-}
-
-/* Return the amount of memory needed for second phase of processor
- * self-test and the required alignment of memory.
- */
-static inline s64
-ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
-       if (bytes_needed)
-               *bytes_needed = iprv.v0;
-       if (alignment)
-               *alignment = iprv.v1;
-       return iprv.status;
-}
-
-typedef union pal_perf_mon_info_u {
-       u64                       ppmi_data;
-       struct {
-              u64              generic         : 8,
-                               width           : 8,
-                               cycles          : 8,
-                               retired         : 8,
-                               reserved        : 32;
-       } pal_perf_mon_info_s;
-} pal_perf_mon_info_u_t;
-
-/* Return the performance monitor information about what can be counted
- * and how to configure the monitors to count the desired events.
- */
-static inline s64
-ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
-       if (pm_info)
-               pm_info->ppmi_data = iprv.v0;
-       return iprv.status;
-}
-
-/* Specifies the physical address of the processor interrupt block
- * and I/O port space.
- */
-static inline s64
-ia64_pal_platform_addr (u64 type, u64 physical_addr)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
-       return iprv.status;
-}
-
-/* Set the SAL PMI entrypoint in memory */
-static inline s64
-ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
-       return iprv.status;
-}
-
-struct pal_features_s;
-/* Provide information about configurable processor features */
-static inline s64
-ia64_pal_proc_get_features (u64 *features_avail,
-                           u64 *features_status,
-                           u64 *features_control)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
-       if (iprv.status == 0) {
-               *features_avail   = iprv.v0;
-               *features_status  = iprv.v1;
-               *features_control = iprv.v2;
-       }
-       return iprv.status;
-}
-
-/* Enable/disable processor dependent features */
-static inline s64
-ia64_pal_proc_set_features (u64 feature_select)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
-       return iprv.status;
-}
-
-/*
- * Put everything in a struct so we avoid the global offset table whenever
- * possible.
- */
-typedef struct ia64_ptce_info_s {
-       u64             base;
-       u32             count[2];
-       u32             stride[2];
-} ia64_ptce_info_t;
-
-/* Return the information required for the architected loop used to purge
- * (initialize) the entire TC
- */
-static inline s64
-ia64_get_ptce (ia64_ptce_info_t *ptce)
-{
-       struct ia64_pal_retval iprv;
-
-       if (!ptce)
-               return -1;
-
-       PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
-       if (iprv.status == 0) {
-               ptce->base = iprv.v0;
-               ptce->count[0] = iprv.v1 >> 32;
-               ptce->count[1] = iprv.v1 & 0xffffffff;
-               ptce->stride[0] = iprv.v2 >> 32;
-               ptce->stride[1] = iprv.v2 & 0xffffffff;
-       }
-       return iprv.status;
-}
-
-/* Return info about implemented application and control registers. */
-static inline s64
-ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
-       if (reg_info_1)
-               *reg_info_1 = iprv.v0;
-       if (reg_info_2)
-               *reg_info_2 = iprv.v1;
-       return iprv.status;
-}
-
-typedef union pal_hints_u {
-       u64                     ph_data;
-       struct {
-              u64              si              : 1,
-                               li              : 1,
-                               reserved        : 62;
-       } pal_hints_s;
-} pal_hints_u_t;
-
-/* Return information about the register stack and RSE for this processor
- * implementation.
- */
-static inline s64
-ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
-       if (num_phys_stacked)
-               *num_phys_stacked = iprv.v0;
-       if (hints)
-               hints->ph_data = iprv.v1;
-       return iprv.status;
-}
-
-/* Cause the processor to enter        SHUTDOWN state, where prefetching and 
execution are
- * suspended, but cause cache and TLB coherency to be maintained.
- * This is usually called in IA-32 mode.
- */
-static inline s64
-ia64_pal_shutdown (void)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
-       return iprv.status;
-}
-
-/* Perform the second phase of processor self-test. */
-static inline s64
-ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 
*self_test_state)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
-       if (self_test_state)
-               *self_test_state = iprv.v0;
-       return iprv.status;
-}
-
-typedef union  pal_version_u {
-       u64     pal_version_val;
-       struct {
-               u64     pv_pal_b_rev            :       8;
-               u64     pv_pal_b_model          :       8;
-               u64     pv_reserved1            :       8;
-               u64     pv_pal_vendor           :       8;
-               u64     pv_pal_a_rev            :       8;
-               u64     pv_pal_a_model          :       8;
-               u64     pv_reserved2            :       16;
-       } pal_version_s;
-} pal_version_u_t;
-
-
-/* Return PAL version information */
-static inline s64
-ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t 
*pal_cur_version)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
-       if (pal_min_version)
-               pal_min_version->pal_version_val = iprv.v0;
-
-       if (pal_cur_version)
-               pal_cur_version->pal_version_val = iprv.v1;
-
-       return iprv.status;
-}
-
-typedef union pal_tc_info_u {
-       u64                     pti_val;
-       struct {
-              u64              num_sets        :       8,
-                               associativity   :       8,
-                               num_entries     :       16,
-                               pf              :       1,
-                               unified         :       1,
-                               reduce_tr       :       1,
-                               reserved        :       29;
-       } pal_tc_info_s;
-} pal_tc_info_u_t;
-
-#define tc_reduce_tr           pal_tc_info_s.reduce_tr
-#define tc_unified             pal_tc_info_s.unified
-#define tc_pf                  pal_tc_info_s.pf
-#define tc_num_entries         pal_tc_info_s.num_entries
-#define tc_associativity       pal_tc_info_s.associativity
-#define tc_num_sets            pal_tc_info_s.num_sets
-
-
-/* Return information about the virtual memory characteristics of the processor
- * implementation.
- */
-static inline s64
-ia64_pal_vm_info (u64 tc_level, u64 tc_type,  pal_tc_info_u_t *tc_info, u64 
*tc_pages)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
-       if (tc_info)
-               tc_info->pti_val = iprv.v0;
-       if (tc_pages)
-               *tc_pages = iprv.v1;
-       return iprv.status;
-}
-
-/* Get page size information about the virtual memory characteristics of the 
processor
- * implementation.
- */
-static inline s64
-ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
-       if (tr_pages)
-               *tr_pages = iprv.v0;
-       if (vw_pages)
-               *vw_pages = iprv.v1;
-       return iprv.status;
-}
-
-typedef union pal_vm_info_1_u {
-       u64                     pvi1_val;
-       struct {
-               u64             vw              : 1,
-                               phys_add_size   : 7,
-                               key_size        : 8,
-                               max_pkr         : 8,
-                               hash_tag_id     : 8,
-                               max_dtr_entry   : 8,
-                               max_itr_entry   : 8,
-                               max_unique_tcs  : 8,
-                               num_tc_levels   : 8;
-       } pal_vm_info_1_s;
-} pal_vm_info_1_u_t;
-
-typedef union pal_vm_info_2_u {
-       u64                     pvi2_val;
-       struct {
-               u64             impl_va_msb     : 8,
-                               rid_size        : 8,
-                               reserved        : 48;
-       } pal_vm_info_2_s;
-} pal_vm_info_2_u_t;
-
-/* Get summary information about the virtual memory characteristics of the 
processor
- * implementation.
- */
-static inline s64
-ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t 
*vm_info_2)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
-       if (vm_info_1)
-               vm_info_1->pvi1_val = iprv.v0;
-       if (vm_info_2)
-               vm_info_2->pvi2_val = iprv.v1;
-       return iprv.status;
-}
-
-typedef union pal_itr_valid_u {
-       u64                     piv_val;
-       struct {
-              u64              access_rights_valid     : 1,
-                               priv_level_valid        : 1,
-                               dirty_bit_valid         : 1,
-                               mem_attr_valid          : 1,
-                               reserved                : 60;
-       } pal_tr_valid_s;
-} pal_tr_valid_u_t;
-
-/* Read a translation register */
-static inline s64
-ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t 
*tr_valid)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, 
tr_type,(u64)ia64_tpa(tr_buffer));
-       if (tr_valid)
-               tr_valid->piv_val = iprv.v0;
-       return iprv.status;
-}
-
-/*
- * PAL_PREFETCH_VISIBILITY transaction types
- */
-#define PAL_VISIBILITY_VIRTUAL         0
-#define PAL_VISIBILITY_PHYSICAL                1
-
-/*
- * PAL_PREFETCH_VISIBILITY return codes
- */
-#define PAL_VISIBILITY_OK              1
-#define PAL_VISIBILITY_OK_REMOTE_NEEDED        0
-#define PAL_VISIBILITY_INVAL_ARG       -2
-#define PAL_VISIBILITY_ERROR           -3
-
-static inline s64
-ia64_pal_prefetch_visibility (s64 trans_type)
-{
-       struct ia64_pal_retval iprv;
-       PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
-       return iprv.status;
-}
-
-#ifdef CONFIG_VTI
-#include <asm/vmx_pal.h>
-#endif // CONFIG_VTI
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_PAL_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/pgalloc.h
--- a/xen/include/asm-ia64/pgalloc.h    Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,196 +0,0 @@
-#ifndef _ASM_IA64_PGALLOC_H
-#define _ASM_IA64_PGALLOC_H
-
-/*
- * This file contains the functions and defines necessary to allocate
- * page tables.
- *
- * This hopefully works with any (fixed) ia-64 page-size, as defined
- * in <asm/page.h> (currently 8192).
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 2000, Goutham Rao <goutham.rao@xxxxxxxxx>
- */
-
-#include <linux/config.h>
-
-#include <linux/compiler.h>
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/threads.h>
-
-#include <asm/mmu_context.h>
-#include <asm/processor.h>
-
-/*
- * Very stupidly, we used to get new pgd's and pmd's, init their contents
- * to point to the NULL versions of the next level page table, later on
- * completely re-init them the same way, then free them up.  This wasted
- * a lot of work and caused unnecessary memory traffic.  How broken...
- * We fix this by caching them.
- */
-#define pgd_quicklist          (local_cpu_data->pgd_quick)
-#define pmd_quicklist          (local_cpu_data->pmd_quick)
-#define pgtable_cache_size     (local_cpu_data->pgtable_cache_sz)
-
-static inline pgd_t*
-pgd_alloc_one_fast (struct mm_struct *mm)
-{
-       unsigned long *ret = NULL;
-
-       preempt_disable();
-
-       ret = pgd_quicklist;
-       if (likely(ret != NULL)) {
-               pgd_quicklist = (unsigned long *)(*ret);
-               ret[0] = 0;
-               --pgtable_cache_size;
-       } else
-               ret = NULL;
-
-       preempt_enable();
-
-       return (pgd_t *) ret;
-}
-
-static inline pgd_t*
-pgd_alloc (struct mm_struct *mm)
-{
-       /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
-       pgd_t *pgd = pgd_alloc_one_fast(mm);
-
-       if (unlikely(pgd == NULL)) {
-#ifdef XEN
-               pgd = (pgd_t *)alloc_xenheap_page();
-               memset(pgd,0,PAGE_SIZE);
-#else
-               pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-#endif
-       }
-       return pgd;
-}
-
-static inline void
-pgd_free (pgd_t *pgd)
-{
-       preempt_disable();
-       *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
-       pgd_quicklist = (unsigned long *) pgd;
-       ++pgtable_cache_size;
-       preempt_enable();
-}
-
-static inline void
-pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
-{
-       pud_val(*pud_entry) = __pa(pmd);
-}
-
-static inline pmd_t*
-pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
-{
-       unsigned long *ret = NULL;
-
-       preempt_disable();
-
-       ret = (unsigned long *)pmd_quicklist;
-       if (likely(ret != NULL)) {
-               pmd_quicklist = (unsigned long *)(*ret);
-               ret[0] = 0;
-               --pgtable_cache_size;
-       }
-
-       preempt_enable();
-
-       return (pmd_t *)ret;
-}
-
-static inline pmd_t*
-pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
-{
-#ifdef XEN
-       pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
-       memset(pmd,0,PAGE_SIZE);
-#else
-       pmd_t *pmd = (pmd_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-#endif
-
-       return pmd;
-}
-
-static inline void
-pmd_free (pmd_t *pmd)
-{
-       preempt_disable();
-       *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
-       pmd_quicklist = (unsigned long *) pmd;
-       ++pgtable_cache_size;
-       preempt_enable();
-}
-
-#define __pmd_free_tlb(tlb, pmd)       pmd_free(pmd)
-
-static inline void
-pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
-{
-       pmd_val(*pmd_entry) = page_to_phys(pte);
-}
-
-static inline void
-pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
-{
-       pmd_val(*pmd_entry) = __pa(pte);
-}
-
-static inline struct page *
-pte_alloc_one (struct mm_struct *mm, unsigned long addr)
-{
-#ifdef XEN
-       struct page *pte = alloc_xenheap_page();
-       memset(pte,0,PAGE_SIZE);
-#else
-       struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-#endif
-
-       return pte;
-}
-
-static inline pte_t *
-pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
-{
-#ifdef XEN
-       pte_t *pte = (pte_t *)alloc_xenheap_page();
-       memset(pte,0,PAGE_SIZE);
-#else
-       pte_t *pte = (pte_t 
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-#endif
-
-       return pte;
-}
-
-static inline void
-pte_free (struct page *pte)
-{
-#ifdef XEN
-       free_xenheap_page(pte);
-#else
-       __free_page(pte);
-#endif
-}
-
-static inline void
-pte_free_kernel (pte_t *pte)
-{
-#ifdef XEN
-       free_xenheap_page((unsigned long) pte);
-#else
-       free_page((unsigned long) pte);
-#endif
-}
-
-#define __pte_free_tlb(tlb, pte)       tlb_remove_page((tlb), (pte))
-
-extern void check_pgt_cache (void);
-
-#endif /* _ASM_IA64_PGALLOC_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/processor.h
--- a/xen/include/asm-ia64/processor.h  Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,705 +0,0 @@
-#ifndef _ASM_IA64_PROCESSOR_H
-#define _ASM_IA64_PROCESSOR_H
-
-/*
- * Copyright (C) 1998-2004 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
- * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
- *
- * 11/24/98    S.Eranian       added ia64_set_iva()
- * 12/03/99    D. Mosberger    implement thread_saved_pc() via kernel unwind 
API
- * 06/16/00    A. Mallick      added csd/ssd/tssd for ia32 support
- */
-
-#include <linux/config.h>
-
-#include <asm/intrinsics.h>
-#include <asm/kregs.h>
-#include <asm/ptrace.h>
-#include <asm/ustack.h>
-
-/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
-#define ARCH_HAS_SCHED_DOMAIN
-
-#define IA64_NUM_DBG_REGS      8
-/*
- * Limits for PMC and PMD are set to less than maximum architected values
- * but should be sufficient for a while
- */
-#define IA64_NUM_PMC_REGS      32
-#define IA64_NUM_PMD_REGS      32
-
-#define DEFAULT_MAP_BASE       __IA64_UL_CONST(0x2000000000000000)
-#define DEFAULT_TASK_SIZE      __IA64_UL_CONST(0xa000000000000000)
-
-/*
- * TASK_SIZE really is a mis-named.  It really is the maximum user
- * space address (plus one).  On IA-64, there are five regions of 2TB
- * each (assuming 8KB page size), for a total of 8TB of user virtual
- * address space.
- */
-#define TASK_SIZE              (current->thread.task_size)
-
-/*
- * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a 
mapping for
- * address-space MM.  Note that with 32-bit tasks, this is still 
DEFAULT_TASK_SIZE,
- * because the kernel may have installed helper-mappings above TASK_SIZE.  For 
example,
- * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
- */
-#define MM_VM_SIZE(mm)         DEFAULT_TASK_SIZE
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE     (current->thread.map_base)
-
-#define IA64_THREAD_FPH_VALID  (__IA64_UL(1) << 0)     /* floating-point high 
state valid? */
-#define IA64_THREAD_DBG_VALID  (__IA64_UL(1) << 1)     /* debug registers 
valid? */
-#define IA64_THREAD_PM_VALID   (__IA64_UL(1) << 2)     /* performance 
registers valid? */
-#define IA64_THREAD_UAC_NOPRINT        (__IA64_UL(1) << 3)     /* don't log 
unaligned accesses */
-#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4)     /* generate SIGBUS on 
unaligned acc. */
-                                                       /* bit 5 is currently 
unused */
-#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)  /* don't log any fpswa 
faults */
-#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)  /* send a SIGFPE for 
fpswa faults */
-
-#define IA64_THREAD_UAC_SHIFT  3
-#define IA64_THREAD_UAC_MASK   (IA64_THREAD_UAC_NOPRINT | 
IA64_THREAD_UAC_SIGBUS)
-#define IA64_THREAD_FPEMU_SHIFT        6
-#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | 
IA64_THREAD_FPEMU_SIGFPE)
-
-
-/*
- * This shift should be large enough to be able to represent 
1000000000/itc_freq with good
- * accuracy while being small enough to fit 
10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
- * (this will give enough slack to represent 10 seconds worth of time as a 
scaled number).
- */
-#define IA64_NSEC_PER_CYC_SHIFT        30
-
-#ifndef __ASSEMBLY__
-
-#include <linux/cache.h>
-#include <linux/compiler.h>
-#include <linux/threads.h>
-#include <linux/types.h>
-
-#include <asm/fpu.h>
-#include <asm/page.h>
-#include <asm/percpu.h>
-#include <asm/rse.h>
-#include <asm/unwind.h>
-#include <asm/atomic.h>
-#ifdef CONFIG_NUMA
-#include <asm/nodedata.h>
-#endif
-#ifdef XEN
-#include <asm/xenprocessor.h>
-#endif
-
-#ifndef XEN
-/* like above but expressed as bitfields for more efficient access: */
-struct ia64_psr {
-       __u64 reserved0 : 1;
-       __u64 be : 1;
-       __u64 up : 1;
-       __u64 ac : 1;
-       __u64 mfl : 1;
-       __u64 mfh : 1;
-       __u64 reserved1 : 7;
-       __u64 ic : 1;
-       __u64 i : 1;
-       __u64 pk : 1;
-       __u64 reserved2 : 1;
-       __u64 dt : 1;
-       __u64 dfl : 1;
-       __u64 dfh : 1;
-       __u64 sp : 1;
-       __u64 pp : 1;
-       __u64 di : 1;
-       __u64 si : 1;
-       __u64 db : 1;
-       __u64 lp : 1;
-       __u64 tb : 1;
-       __u64 rt : 1;
-       __u64 reserved3 : 4;
-       __u64 cpl : 2;
-       __u64 is : 1;
-       __u64 mc : 1;
-       __u64 it : 1;
-       __u64 id : 1;
-       __u64 da : 1;
-       __u64 dd : 1;
-       __u64 ss : 1;
-       __u64 ri : 2;
-       __u64 ed : 1;
-       __u64 bn : 1;
-       __u64 reserved4 : 19;
-};
-#endif
-
-/*
- * CPU type, hardware bug flags, and per-CPU state.  Frequently used
- * state comes earlier:
- */
-struct cpuinfo_ia64 {
-       __u32 softirq_pending;
-       __u64 itm_delta;        /* # of clock cycles between clock ticks */
-       __u64 itm_next;         /* interval timer mask value to use for next 
clock tick */
-       __u64 nsec_per_cyc;     /* 
(1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
-       __u64 unimpl_va_mask;   /* mask of unimplemented virtual address bits 
(from PAL) */
-       __u64 unimpl_pa_mask;   /* mask of unimplemented physical address bits 
(from PAL) */
-       __u64 *pgd_quick;
-       __u64 *pmd_quick;
-       __u64 pgtable_cache_sz;
-       __u64 itc_freq;         /* frequency of ITC counter */
-       __u64 proc_freq;        /* frequency of processor */
-       __u64 cyc_per_usec;     /* itc_freq/1000000 */
-       __u64 ptce_base;
-       __u32 ptce_count[2];
-       __u32 ptce_stride[2];
-       struct task_struct *ksoftirqd;  /* kernel softirq daemon for this CPU */
-
-#ifdef CONFIG_SMP
-       __u64 loops_per_jiffy;
-       int cpu;
-#endif
-
-       /* CPUID-derived information: */
-       __u64 ppn;
-       __u64 features;
-       __u8 number;
-       __u8 revision;
-       __u8 model;
-       __u8 family;
-       __u8 archrev;
-       char vendor[16];
-
-#ifdef CONFIG_NUMA
-       struct ia64_node_data *node_data;
-#endif
-};
-
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-
-/*
- * The "local" data variable.  It refers to the per-CPU data of the currently 
executing
- * CPU, much like "current" points to the per-task data of the currently 
executing task.
- * Do not use the address of local_cpu_data, since it will be different from
- * cpu_data(smp_processor_id())!
- */
-#define local_cpu_data         (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu)          (&per_cpu(cpu_info, cpu))
-
-extern void identify_cpu (struct cpuinfo_ia64 *);
-extern void print_cpu_info (struct cpuinfo_ia64 *);
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-#define SET_UNALIGN_CTL(task,value)                                            
                \
-({                                                                             
                \
-       (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)  
                \
-                               | (((value) << IA64_THREAD_UAC_SHIFT) & 
IA64_THREAD_UAC_MASK)); \
-       0;                                                                      
                \
-})
-#define GET_UNALIGN_CTL(task,addr)                                             
                \
-({                                                                             
                \
-       put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> 
IA64_THREAD_UAC_SHIFT,        \
-                (int __user *) (addr));                                        
                \
-})
-
-#define SET_FPEMU_CTL(task,value)                                              
                \
-({                                                                             
                \
-       (task)->thread.flags = (((task)->thread.flags & 
~IA64_THREAD_FPEMU_MASK)                \
-                         | (((value) << IA64_THREAD_FPEMU_SHIFT) & 
IA64_THREAD_FPEMU_MASK));   \
-       0;                                                                      
                \
-})
-#define GET_FPEMU_CTL(task,addr)                                               
                \
-({                                                                             
                \
-       put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> 
IA64_THREAD_FPEMU_SHIFT,    \
-                (int __user *) (addr));                                        
                \
-})
-
-#ifdef CONFIG_IA32_SUPPORT
-struct desc_struct {
-       unsigned int a, b;
-};
-
-#define desc_empty(desc)               (!((desc)->a + (desc)->b))
-#define desc_equal(desc1, desc2)       (((desc1)->a == (desc2)->a) && 
((desc1)->b == (desc2)->b))
-
-#define GDT_ENTRY_TLS_ENTRIES  3
-#define GDT_ENTRY_TLS_MIN      6
-#define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-
-#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-
-struct partial_page_list;
-#endif
-
-struct thread_struct {
-       __u32 flags;                    /* various thread flags (see 
IA64_THREAD_*) */
-       /* writing on_ustack is performance-critical, so it's worth spending 8 
bits on it... */
-       __u8 on_ustack;                 /* executing on user-stacks? */
-       __u8 pad[3];
-       __u64 ksp;                      /* kernel stack pointer */
-       __u64 map_base;                 /* base address for get_unmapped_area() 
*/
-       __u64 task_size;                /* limit for task size */
-       __u64 rbs_bot;                  /* the base address for the RBS */
-       int last_fph_cpu;               /* CPU that may hold the contents of 
f32-f127 */
-
-#ifdef CONFIG_IA32_SUPPORT
-       __u64 eflag;                    /* IA32 EFLAGS reg */
-       __u64 fsr;                      /* IA32 floating pt status reg */
-       __u64 fcr;                      /* IA32 floating pt control reg */
-       __u64 fir;                      /* IA32 fp except. instr. reg */
-       __u64 fdr;                      /* IA32 fp except. data reg */
-       __u64 old_k1;                   /* old value of ar.k1 */
-       __u64 old_iob;                  /* old IOBase value */
-       struct partial_page_list *ppl;  /* partial page list for 4K page size 
issue */
-        /* cached TLS descriptors. */
-       struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-
-# define INIT_THREAD_IA32      .eflag =        0,                      \
-                               .fsr =          0,                      \
-                               .fcr =          0x17800000037fULL,      \
-                               .fir =          0,                      \
-                               .fdr =          0,                      \
-                               .old_k1 =       0,                      \
-                               .old_iob =      0,                      \
-                               .ppl =          NULL,
-#else
-# define INIT_THREAD_IA32
-#endif /* CONFIG_IA32_SUPPORT */
-#ifdef CONFIG_PERFMON
-       __u64 pmcs[IA64_NUM_PMC_REGS];
-       __u64 pmds[IA64_NUM_PMD_REGS];
-       void *pfm_context;                   /* pointer to detailed PMU context 
*/
-       unsigned long pfm_needs_checking;    /* when >0, pending perfmon work 
on kernel exit */
-# define INIT_THREAD_PM                .pmcs =                 {0UL, },  \
-                               .pmds =                 {0UL, },  \
-                               .pfm_context =          NULL,     \
-                               .pfm_needs_checking =   0UL,
-#else
-# define INIT_THREAD_PM
-#endif
-       __u64 dbr[IA64_NUM_DBG_REGS];
-       __u64 ibr[IA64_NUM_DBG_REGS];
-       struct ia64_fpreg fph[96];      /* saved/loaded on demand */
-};
-
-#define INIT_THREAD {                                          \
-       .flags =        0,                                      \
-       .on_ustack =    0,                                      \
-       .ksp =          0,                                      \
-       .map_base =     DEFAULT_MAP_BASE,                       \
-       .rbs_bot =      STACK_TOP - DEFAULT_USER_STACK_SIZE,    \
-       .task_size =    DEFAULT_TASK_SIZE,                      \
-       .last_fph_cpu =  -1,                                    \
-       INIT_THREAD_IA32                                        \
-       INIT_THREAD_PM                                          \
-       .dbr =          {0, },                                  \
-       .ibr =          {0, },                                  \
-       .fph =          {{{{0}}}, }                             \
-}
-
-#define start_thread(regs,new_ip,new_sp) do {                                  
                \
-       set_fs(USER_DS);                                                        
                \
-       regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | 
IA64_PSR_CPL))                \
-                        & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | 
IA64_PSR_IS));              \
-       regs->cr_iip = new_ip;                                                  
                \
-       regs->ar_rsc = 0xf;             /* eager mode, privilege level 3 */     
                \
-       regs->ar_rnat = 0;                                                      
                \
-       regs->ar_bspstore = current->thread.rbs_bot;                            
                \
-       regs->ar_fpsr = FPSR_DEFAULT;                                           
                \
-       regs->loadrs = 0;                                                       
                \
-       regs->r8 = current->mm->dumpable;       /* set "don't zap registers" 
flag */            \
-       regs->r12 = new_sp - 16;        /* allocate 16 byte scratch area */     
                \
-       if (unlikely(!current->mm->dumpable)) {                                 
                \
-               /*                                                              
                \
-                * Zap scratch regs to avoid leaking bits between processes 
with different      \
-                * uid/privileges.                                              
                \
-                */                                                             
                \
-               regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;                   
                \
-               regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; 
regs->r15 = 0;       \
-       }                                                                       
                \
-} while (0)
-
-/* Forward declarations, a strange C thing... */
-struct mm_struct;
-struct task_struct;
-
-/*
- * Free all resources held by a thread. This is called after the
- * parent of DEAD_TASK has collected the exit status of the task via
- * wait().
- */
-#define release_thread(dead_task)
-
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk)   do { } while (0)
-
-/*
- * This is the mechanism for creating a new kernel thread.
- *
- * NOTE 1: Only a kernel-only process (ie the swapper or direct
- * descendants who haven't done an "execve()") should use this: it
- * will work within a system call from a "real" process, but the
- * process memory space will not be free'd until both the parent and
- * the child have exited.
- *
- * NOTE 2: This MUST NOT be an inlined function.  Otherwise, we get
- * into trouble in init/main.c when the child thread returns to
- * do_basic_setup() and the timing is such that free_initmem() has
- * been called already.
- */
-extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
-
-/* Get wait channel for task P.  */
-extern unsigned long get_wchan (struct task_struct *p);
-
-/* Return instruction pointer of blocked task TSK.  */
-#define KSTK_EIP(tsk)                                  \
-  ({                                                   \
-       struct pt_regs *_regs = ia64_task_regs(tsk);    \
-       _regs->cr_iip + ia64_psr(_regs)->ri;            \
-  })
-
-/* Return stack pointer of blocked task TSK.  */
-#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
-
-extern void ia64_getreg_unknown_kr (void);
-extern void ia64_setreg_unknown_kr (void);
-
-#define ia64_get_kr(regnum)                                    \
-({                                                             \
-       unsigned long r = 0;                                    \
-                                                               \
-       switch (regnum) {                                       \
-           case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;   \
-           case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;   \
-           case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;   \
-           case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;   \
-           case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;   \
-           case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;   \
-           case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;   \
-           case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;   \
-           default: ia64_getreg_unknown_kr(); break;           \
-       }                                                       \
-       r;                                                      \
-})
-
-#define ia64_set_kr(regnum, r)                                         \
-({                                                             \
-       switch (regnum) {                                       \
-           case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;    \
-           case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;    \
-           case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;    \
-           case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;    \
-           case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;    \
-           case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;    \
-           case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;    \
-           case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;    \
-           default: ia64_setreg_unknown_kr(); break;           \
-       }                                                       \
-})
-
-/*
- * The following three macros can't be inline functions because we don't have 
struct
- * task_struct at this point.
- */
-
-/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
-#ifndef XEN
-#define ia64_is_local_fpu_owner(t)                                             
                \
-({                                                                             
                \
-       struct task_struct *__ia64_islfo_task = (t);                            
                \
-       (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()           
                \
-        && __ia64_islfo_task == (struct task_struct *) 
ia64_get_kr(IA64_KR_FPU_OWNER));        \
-})
-#endif
-
-/* Mark task T as owning the fph partition of the CPU we're running on. */
-#define ia64_set_local_fpu_owner(t) do {                                       
        \
-       struct task_struct *__ia64_slfo_task = (t);                             
        \
-       __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();             
        \
-       ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);       
        \
-} while (0)
-
-/* Mark the fph partition of task T as being invalid on all CPUs.  */
-#define ia64_drop_fpu(t)       ((t)->thread.last_fph_cpu = -1)
-
-extern void __ia64_init_fpu (void);
-extern void __ia64_save_fpu (struct ia64_fpreg *fph);
-extern void __ia64_load_fpu (struct ia64_fpreg *fph);
-extern void ia64_save_debug_regs (unsigned long *save_area);
-extern void ia64_load_debug_regs (unsigned long *save_area);
-
-#ifdef CONFIG_IA32_SUPPORT
-extern void ia32_save_state (struct task_struct *task);
-extern void ia32_load_state (struct task_struct *task);
-#endif
-
-#define ia64_fph_enable()      do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } 
while (0)
-#define ia64_fph_disable()     do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } 
while (0)
-
-/* load fp 0.0 into fph */
-static inline void
-ia64_init_fpu (void) {
-       ia64_fph_enable();
-       __ia64_init_fpu();
-       ia64_fph_disable();
-}
-
-/* save f32-f127 at FPH */
-static inline void
-ia64_save_fpu (struct ia64_fpreg *fph) {
-       ia64_fph_enable();
-       __ia64_save_fpu(fph);
-       ia64_fph_disable();
-}
-
-/* load f32-f127 from FPH */
-static inline void
-ia64_load_fpu (struct ia64_fpreg *fph) {
-       ia64_fph_enable();
-       __ia64_load_fpu(fph);
-       ia64_fph_disable();
-}
-
-static inline __u64
-ia64_clear_ic (void)
-{
-       __u64 psr;
-       psr = ia64_getreg(_IA64_REG_PSR);
-       ia64_stop();
-       ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
-       ia64_srlz_i();
-       return psr;
-}
-
-/*
- * Restore the psr.
- */
-static inline void
-ia64_set_psr (__u64 psr)
-{
-       ia64_stop();
-       ia64_setreg(_IA64_REG_PSR_L, psr);
-       ia64_srlz_d();
-}
-
-/*
- * Insert a translation into an instruction and/or data translation
- * register.
- */
-static inline void
-ia64_itr (__u64 target_mask, __u64 tr_num,
-         __u64 vmaddr, __u64 pte,
-         __u64 log_page_size)
-{
-       ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
-       ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
-       ia64_stop();
-       if (target_mask & 0x1)
-               ia64_itri(tr_num, pte);
-       if (target_mask & 0x2)
-               ia64_itrd(tr_num, pte);
-}
-
-/*
- * Insert a translation into the instruction and/or data translation
- * cache.
- */
-static inline void
-ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
-         __u64 log_page_size)
-{
-       ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
-       ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
-       ia64_stop();
-       /* as per EAS2.6, itc must be the last instruction in an instruction 
group */
-       if (target_mask & 0x1)
-               ia64_itci(pte);
-       if (target_mask & 0x2)
-               ia64_itcd(pte);
-}
-
-/*
- * Purge a range of addresses from instruction and/or data translation
- * register(s).
- */
-static inline void
-ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
-{
-       if (target_mask & 0x1)
-               ia64_ptri(vmaddr, (log_size << 2));
-       if (target_mask & 0x2)
-               ia64_ptrd(vmaddr, (log_size << 2));
-}
-
-/* Set the interrupt vector address.  The address must be suitably aligned 
(32KB).  */
-static inline void
-ia64_set_iva (void *ivt_addr)
-{
-       ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
-       ia64_srlz_i();
-}
-
-/* Set the page table address and control bits.  */
-static inline void
-ia64_set_pta (__u64 pta)
-{
-       /* Note: srlz.i implies srlz.d */
-       ia64_setreg(_IA64_REG_CR_PTA, pta);
-       ia64_srlz_i();
-}
-
-static inline void
-ia64_eoi (void)
-{
-       ia64_setreg(_IA64_REG_CR_EOI, 0);
-       ia64_srlz_d();
-}
-
-#define cpu_relax()    ia64_hint(ia64_hint_pause)
-
-static inline void
-ia64_set_lrr0 (unsigned long val)
-{
-       ia64_setreg(_IA64_REG_CR_LRR0, val);
-       ia64_srlz_d();
-}
-
-static inline void
-ia64_set_lrr1 (unsigned long val)
-{
-       ia64_setreg(_IA64_REG_CR_LRR1, val);
-       ia64_srlz_d();
-}
-
-
-/*
- * Given the address to which a spill occurred, return the unat bit
- * number that corresponds to this address.
- */
-static inline __u64
-ia64_unat_pos (void *spill_addr)
-{
-       return ((__u64) spill_addr >> 3) & 0x3f;
-}
-
-/*
- * Set the NaT bit of an integer register which was spilled at address
- * SPILL_ADDR.  UNAT is the mask to be updated.
- */
-static inline void
-ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
-{
-       __u64 bit = ia64_unat_pos(spill_addr);
-       __u64 mask = 1UL << bit;
-
-       *unat = (*unat & ~mask) | (nat << bit);
-}
-
-/*
- * Return saved PC of a blocked thread.
- * Note that the only way T can block is through a call to schedule() -> 
switch_to().
- */
-static inline unsigned long
-thread_saved_pc (struct task_struct *t)
-{
-       struct unw_frame_info info;
-       unsigned long ip;
-
-       unw_init_from_blocked_task(&info, t);
-       if (unw_unwind(&info) < 0)
-               return 0;
-       unw_get_ip(&info, &ip);
-       return ip;
-}
-
-/*
- * Get the current instruction/program counter value.
- */
-#define current_text_addr() \
-       ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
-
-static inline __u64
-ia64_get_ivr (void)
-{
-       __u64 r;
-       ia64_srlz_d();
-       r = ia64_getreg(_IA64_REG_CR_IVR);
-       ia64_srlz_d();
-       return r;
-}
-
-static inline void
-ia64_set_dbr (__u64 regnum, __u64 value)
-{
-       __ia64_set_dbr(regnum, value);
-#ifdef CONFIG_ITANIUM
-       ia64_srlz_d();
-#endif
-}
-
-static inline __u64
-ia64_get_dbr (__u64 regnum)
-{
-       __u64 retval;
-
-       retval = __ia64_get_dbr(regnum);
-#ifdef CONFIG_ITANIUM
-       ia64_srlz_d();
-#endif
-       return retval;
-}
-
-static inline __u64
-ia64_rotr (__u64 w, __u64 n)
-{
-       return (w >> n) | (w << (64 - n));
-}
-
-#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
-
-/*
- * Take a mapped kernel address and return the equivalent address
- * in the region 7 identity mapped virtual area.
- */
-static inline void *
-ia64_imva (void *addr)
-{
-       void *result;
-       result = (void *) ia64_tpa(addr);
-       return __va(result);
-}
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define PREFETCH_STRIDE                        L1_CACHE_BYTES
-
-static inline void
-prefetch (const void *x)
-{
-        ia64_lfetch(ia64_lfhint_none, x);
-}
-
-static inline void
-prefetchw (const void *x)
-{
-       ia64_lfetch_excl(ia64_lfhint_none, x);
-}
-
-#define spin_lock_prefetch(x)  prefetchw(x)
-
-extern unsigned long boot_option_idle_override;
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IA64_PROCESSOR_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/ptrace.h
--- a/xen/include/asm-ia64/ptrace.h     Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,341 +0,0 @@
-#ifndef _ASM_IA64_PTRACE_H
-#define _ASM_IA64_PTRACE_H
-
-/*
- * Copyright (C) 1998-2004 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 2003 Intel Co
- *     Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
- *     Fenghua Yu <fenghua.yu@xxxxxxxxx>
- *     Arun Sharma <arun.sharma@xxxxxxxxx>
- *
- * 12/07/98    S. Eranian      added pt_regs & switch_stack
- * 12/21/98    D. Mosberger    updated to match latest code
- *  6/17/99    D. Mosberger    added second unat member to "struct 
switch_stack"
- *
- */
-/*
- * When a user process is blocked, its state looks as follows:
- *
- *            +----------------------+ ------- IA64_STK_OFFSET
- *                   |                      |   ^
- *            | struct pt_regs       |  |
- *           |                      |   |
- *            +----------------------+  |
- *           |                      |   |
- *                   |    memory stack      |   |
- *           | (growing downwards)  |   |
- *           //.....................//  |
- *                                      |
- *           //.....................//  |
- *           |                      |   |
- *            +----------------------+  |
- *            | struct switch_stack  |  |
- *           |                      |   |
- *           +----------------------+   |
- *           |                      |   |
- *           //.....................//  |
- *                                      |
- *           //.....................//  |
- *           |                      |   |
- *           |  register stack      |   |
- *           | (growing upwards)    |   |
- *            |                             |   |
- *           +----------------------+   |  --- IA64_RBS_OFFSET
- *            |  struct thread_info  |  |  ^
- *           +----------------------+   |  |
- *           |                      |   |  |
- *            |  struct task_struct  |  |  |
- * current -> |                             |   |  |
- *           +----------------------+ -------
- *
- * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
- * This is because ar.ec is saved as part of ar.pfs.
- */
-
-#include <linux/config.h>
-
-#include <asm/fpu.h>
-#include <asm/offsets.h>
-
-/*
- * Base-2 logarithm of number of pages to allocate per task structure
- * (including register backing store and memory stack):
- */
-#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
-# define KERNEL_STACK_SIZE_ORDER               3
-#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
-# define KERNEL_STACK_SIZE_ORDER               2
-#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
-# define KERNEL_STACK_SIZE_ORDER               1
-#else
-# define KERNEL_STACK_SIZE_ORDER               0
-#endif
-
-#define IA64_RBS_OFFSET                        ((IA64_TASK_SIZE + 
IA64_THREAD_INFO_SIZE + 15) & ~15)
-#define IA64_STK_OFFSET                        ((1 << 
KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
-
-#define KERNEL_STACK_SIZE              IA64_STK_OFFSET
-
-#ifndef __ASSEMBLY__
-
-#include <asm/current.h>
-#include <asm/page.h>
-
-/*
- * This struct defines the way the registers are saved on system
- * calls.
- *
- * We don't save all floating point register because the kernel
- * is compiled to use only a very small subset, so the other are
- * untouched.
- *
- * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
- * (because the memory stack pointer MUST ALWAYS be aligned this way)
- *
- */
-#ifdef XEN
-#include <public/arch-ia64.h>
-#else
-struct pt_regs {
-       /* The following registers are saved by SAVE_MIN: */
-       unsigned long b6;               /* scratch */
-       unsigned long b7;               /* scratch */
-
-       unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
-       unsigned long ar_ssd;           /* reserved for future use (scratch) */
-
-       unsigned long r8;               /* scratch (return value register 0) */
-       unsigned long r9;               /* scratch (return value register 1) */
-       unsigned long r10;              /* scratch (return value register 2) */
-       unsigned long r11;              /* scratch (return value register 3) */
-
-       unsigned long cr_ipsr;          /* interrupted task's psr */
-       unsigned long cr_iip;           /* interrupted task's instruction 
pointer */
-       /*
-        * interrupted task's function state; if bit 63 is cleared, it
-        * contains syscall's ar.pfs.pfm:
-        */
-       unsigned long cr_ifs;
-
-       unsigned long ar_unat;          /* interrupted task's NaT register 
(preserved) */
-       unsigned long ar_pfs;           /* prev function state  */
-       unsigned long ar_rsc;           /* RSE configuration */
-       /* The following two are valid only if cr_ipsr.cpl > 0: */
-       unsigned long ar_rnat;          /* RSE NaT */
-       unsigned long ar_bspstore;      /* RSE bspstore */
-
-       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
-       unsigned long b0;               /* return pointer (bp) */
-       unsigned long loadrs;           /* size of dirty partition << 16 */
-
-       unsigned long r1;               /* the gp pointer */
-       unsigned long r12;              /* interrupted task's memory stack 
pointer */
-       unsigned long r13;              /* thread pointer */
-
-       unsigned long ar_fpsr;          /* floating point status (preserved) */
-       unsigned long r15;              /* scratch */
-
-       /* The remaining registers are NOT saved for system calls.  */
-
-       unsigned long r14;              /* scratch */
-       unsigned long r2;               /* scratch */
-       unsigned long r3;               /* scratch */
-
-       /* The following registers are saved by SAVE_REST: */
-       unsigned long r16;              /* scratch */
-       unsigned long r17;              /* scratch */
-       unsigned long r18;              /* scratch */
-       unsigned long r19;              /* scratch */
-       unsigned long r20;              /* scratch */
-       unsigned long r21;              /* scratch */
-       unsigned long r22;              /* scratch */
-       unsigned long r23;              /* scratch */
-       unsigned long r24;              /* scratch */
-       unsigned long r25;              /* scratch */
-       unsigned long r26;              /* scratch */
-       unsigned long r27;              /* scratch */
-       unsigned long r28;              /* scratch */
-       unsigned long r29;              /* scratch */
-       unsigned long r30;              /* scratch */
-       unsigned long r31;              /* scratch */
-
-       unsigned long ar_ccv;           /* compare/exchange value (scratch) */
-
-       /*
-        * Floating point registers that the kernel considers scratch:
-        */
-       struct ia64_fpreg f6;           /* scratch */
-       struct ia64_fpreg f7;           /* scratch */
-       struct ia64_fpreg f8;           /* scratch */
-       struct ia64_fpreg f9;           /* scratch */
-       struct ia64_fpreg f10;          /* scratch */
-       struct ia64_fpreg f11;          /* scratch */
-};
-#endif
-
-/*
- * This structure contains the addition registers that need to
- * preserved across a context switch.  This generally consists of
- * "preserved" registers.
- */
-struct switch_stack {
-       unsigned long caller_unat;      /* user NaT collection register 
(preserved) */
-       unsigned long ar_fpsr;          /* floating-point status register */
-
-       struct ia64_fpreg f2;           /* preserved */
-       struct ia64_fpreg f3;           /* preserved */
-       struct ia64_fpreg f4;           /* preserved */
-       struct ia64_fpreg f5;           /* preserved */
-
-       struct ia64_fpreg f12;          /* scratch, but untouched by kernel */
-       struct ia64_fpreg f13;          /* scratch, but untouched by kernel */
-       struct ia64_fpreg f14;          /* scratch, but untouched by kernel */
-       struct ia64_fpreg f15;          /* scratch, but untouched by kernel */
-       struct ia64_fpreg f16;          /* preserved */
-       struct ia64_fpreg f17;          /* preserved */
-       struct ia64_fpreg f18;          /* preserved */
-       struct ia64_fpreg f19;          /* preserved */
-       struct ia64_fpreg f20;          /* preserved */
-       struct ia64_fpreg f21;          /* preserved */
-       struct ia64_fpreg f22;          /* preserved */
-       struct ia64_fpreg f23;          /* preserved */
-       struct ia64_fpreg f24;          /* preserved */
-       struct ia64_fpreg f25;          /* preserved */
-       struct ia64_fpreg f26;          /* preserved */
-       struct ia64_fpreg f27;          /* preserved */
-       struct ia64_fpreg f28;          /* preserved */
-       struct ia64_fpreg f29;          /* preserved */
-       struct ia64_fpreg f30;          /* preserved */
-       struct ia64_fpreg f31;          /* preserved */
-
-       unsigned long r4;               /* preserved */
-       unsigned long r5;               /* preserved */
-       unsigned long r6;               /* preserved */
-       unsigned long r7;               /* preserved */
-
-       unsigned long b0;               /* so we can force a direct return in 
copy_thread */
-       unsigned long b1;
-       unsigned long b2;
-       unsigned long b3;
-       unsigned long b4;
-       unsigned long b5;
-
-       unsigned long ar_pfs;           /* previous function state */
-       unsigned long ar_lc;            /* loop counter (preserved) */
-       unsigned long ar_unat;          /* NaT bits for r4-r7 */
-       unsigned long ar_rnat;          /* RSE NaT collection register */
-       unsigned long ar_bspstore;      /* RSE dirty base (preserved) */
-       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
-};
-
-#ifdef __KERNEL__
-/*
- * We use the ia64_psr(regs)->ri to determine which of the three
- * instructions in bundle (16 bytes) took the sample. Generate
- * the canonical representation by adding to instruction pointer.
- */
-# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
-/* Conserve space in histogram by encoding slot bits in address
- * bits 2 and 3 rather than bits 0 and 1.
- */
-#define profile_pc(regs)                                               \
-({                                                                     \
-       unsigned long __ip = instruction_pointer(regs);                 \
-       (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
-})
-
-  /* given a pointer to a task_struct, return the user's pt_regs */
-# define ia64_task_regs(t)             (((struct pt_regs *) ((char *) (t) + 
IA64_STK_OFFSET)) - 1)
-# define ia64_psr(regs)                        ((struct ia64_psr *) 
&(regs)->cr_ipsr)
-# define user_mode(regs)               (((struct ia64_psr *) 
&(regs)->cr_ipsr)->cpl != 0)
-# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - 
sizeof(*regs))
-# define fsys_mode(task,regs)                                  \
-  ({                                                           \
-         struct task_struct *_task = (task);                   \
-         struct pt_regs *_regs = (regs);                       \
-         !user_mode(_regs) && user_stack(_task, _regs);        \
-  })
-
-  /*
-   * System call handlers that, upon successful completion, need to return a 
negative value
-   * should call force_successful_syscall_return() right before returning.  On 
architectures
-   * where the syscall convention provides for a separate error flag (e.g., 
alpha, ia64,
-   * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure 
that the error
-   * flag will not get set.  On architectures which do not support a separate 
error flag,
-   * the macro is a no-op and the spurious error condition needs to be 
filtered out by some
-   * other means (e.g., in user-level, by passing an extra argument to the 
syscall handler,
-   * or something along those lines).
-   *
-   * On ia64, we can clear the user's pt_regs->r8 to force a successful 
syscall.
-   */
-# define force_successful_syscall_return()     (ia64_task_regs(current)->r8 = 
0)
-
-  struct task_struct;                  /* forward decl */
-  struct unw_frame_info;               /* forward decl */
-
-  extern void show_regs (struct pt_regs *);
-  extern void ia64_do_show_stack (struct unw_frame_info *, void *);
-  extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct 
pt_regs *,
-                                             unsigned long *);
-  extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned 
long,
-                        unsigned long, long *);
-  extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned 
long,
-                        unsigned long, long);
-  extern void ia64_flush_fph (struct task_struct *);
-  extern void ia64_sync_fph (struct task_struct *);
-  extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
-                                 unsigned long, unsigned long);
-
-  /* get nat bits for scratch registers such that bit N==1 iff scratch 
register rN is a NaT */
-  extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned 
long scratch_unat);
-  /* put nat bits for scratch registers such that scratch register rN is a NaT 
iff bit N==1 */
-  extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned 
long nat);
-
-  extern void ia64_increment_ip (struct pt_regs *pt);
-  extern void ia64_decrement_ip (struct pt_regs *pt);
-
-#endif /* !__KERNEL__ */
-
-/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
-struct pt_all_user_regs {
-       unsigned long nat;
-       unsigned long cr_iip;
-       unsigned long cfm;
-       unsigned long cr_ipsr;
-       unsigned long pr;
-
-       unsigned long gr[32];
-       unsigned long br[8];
-       unsigned long ar[128];
-       struct ia64_fpreg fr[128];
-};
-
-#endif /* !__ASSEMBLY__ */
-
-/* indices to application-registers array in pt_all_user_regs */
-#define PT_AUR_RSC     16
-#define PT_AUR_BSP     17
-#define PT_AUR_BSPSTORE        18
-#define PT_AUR_RNAT    19
-#define PT_AUR_CCV     32
-#define PT_AUR_UNAT    36
-#define PT_AUR_FPSR    40
-#define PT_AUR_PFS     64
-#define PT_AUR_LC      65
-#define PT_AUR_EC      66
-
-/*
- * The numbers chosen here are somewhat arbitrary but absolutely MUST
- * not overlap with any of the number assigned in <linux/ptrace.h>.
- */
-#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
-#define PTRACE_OLD_GETSIGINFO  13      /* (replaced by PTRACE_GETSIGINFO in 
<linux/ptrace.h>)  */
-#define PTRACE_OLD_SETSIGINFO  14      /* (replaced by PTRACE_SETSIGINFO in 
<linux/ptrace.h>)  */
-#define PTRACE_GETREGS         18      /* get all registers (pt_all_user_regs) 
in one shot */
-#define PTRACE_SETREGS         19      /* set all registers (pt_all_user_regs) 
in one shot */
-
-#define PTRACE_OLDSETOPTIONS   21
-
-#endif /* _ASM_IA64_PTRACE_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/system.h
--- a/xen/include/asm-ia64/system.h     Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,299 +0,0 @@
-#ifndef _ASM_IA64_SYSTEM_H
-#define _ASM_IA64_SYSTEM_H
-
-/*
- * System defines. Note that this is included both from .c and .S
- * files, so it does only defines, not any C code.  This is based
- * on information published in the Processor Abstraction Layer
- * and the System Abstraction Layer manual.
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
- * Copyright (C) 1999 Don Dugger <don.dugger@xxxxxxxxx>
- */
-#include <linux/config.h>
-
-#include <asm/kregs.h>
-#include <asm/page.h>
-#include <asm/pal.h>
-#include <asm/percpu.h>
-#ifdef XEN
-#include <asm/xensystem.h>
-#endif
-
-#define GATE_ADDR              __IA64_UL_CONST(0xa000000000000000)
-/*
- * 0xa000000000000000+2*PERCPU_PAGE_SIZE
- * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
- */
-#ifndef XEN
-#define KERNEL_START            __IA64_UL_CONST(0xa000000100000000)
-#define PERCPU_ADDR            (-PERCPU_PAGE_SIZE)
-#endif
-
-#ifndef __ASSEMBLY__
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct pci_vector_struct {
-       __u16 segment;  /* PCI Segment number */
-       __u16 bus;      /* PCI Bus number */
-       __u32 pci_id;   /* ACPI split 16 bits device, 16 bits function (see 
section 6.1.1) */
-       __u8 pin;       /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
-       __u32 irq;      /* IRQ assigned */
-};
-
-extern struct ia64_boot_param {
-       __u64 command_line;             /* physical address of command line 
arguments */
-       __u64 efi_systab;               /* physical address of EFI system table 
*/
-       __u64 efi_memmap;               /* physical address of EFI memory map */
-       __u64 efi_memmap_size;          /* size of EFI memory map */
-       __u64 efi_memdesc_size;         /* size of an EFI memory map descriptor 
*/
-       __u32 efi_memdesc_version;      /* memory descriptor version */
-       struct {
-               __u16 num_cols; /* number of columns on console output device */
-               __u16 num_rows; /* number of rows on console output device */
-               __u16 orig_x;   /* cursor's x position */
-               __u16 orig_y;   /* cursor's y position */
-       } console_info;
-       __u64 fpswa;            /* physical address of the fpswa interface */
-       __u64 initrd_start;
-       __u64 initrd_size;
-} *ia64_boot_param;
-
-/*
- * Macros to force memory ordering.  In these descriptions, "previous"
- * and "subsequent" refer to program order; "visible" means that all
- * architecturally visible effects of a memory access have occurred
- * (at a minimum, this means the memory has been read or written).
- *
- *   wmb():    Guarantees that all preceding stores to memory-
- *             like regions are visible before any subsequent
- *             stores and that all following stores will be
- *             visible only after all previous stores.
- *   rmb():    Like wmb(), but for reads.
- *   mb():     wmb()/rmb() combo, i.e., all previous memory
- *             accesses are visible before all subsequent
- *             accesses and vice versa.  This is also known as
- *             a "fence."
- *
- * Note: "mb()" and its variants cannot be used as a fence to order
- * accesses to memory mapped I/O registers.  For that, mf.a needs to
- * be used.  However, we don't want to always use mf.a because (a)
- * it's (presumably) much slower than mf and (b) mf.a is supported for
- * sequential memory pages only.
- */
-#define mb()   ia64_mf()
-#define rmb()  mb()
-#define wmb()  mb()
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_SMP
-# define smp_mb()      mb()
-# define smp_rmb()     rmb()
-# define smp_wmb()     wmb()
-# define smp_read_barrier_depends()    read_barrier_depends()
-#else
-# define smp_mb()      barrier()
-# define smp_rmb()     barrier()
-# define smp_wmb()     barrier()
-# define smp_read_barrier_depends()    do { } while(0)
-#endif
-
-/*
- * XXX check on these---I suspect what Linus really wants here is
- * acquire vs release semantics but we can't discuss this stuff with
- * Linus just yet.  Grrr...
- */
-#define set_mb(var, value)     do { (var) = (value); mb(); } while (0)
-#define set_wmb(var, value)    do { (var) = (value); mb(); } while (0)
-
-#define safe_halt()         ia64_pal_halt_light()    /* PAL_HALT_LIGHT */
-
-/*
- * The group barrier in front of the rsm & ssm are necessary to ensure
- * that none of the previous instructions in the same group are
- * affected by the rsm/ssm.
- */
-/* For spinlocks etc */
-
-/*
- * - clearing psr.i is implicitly serialized (visible by next insn)
- * - setting psr.i requires data serialization
- * - we need a stop-bit before reading PSR because we sometimes
- *   write a floating-point register right before reading the PSR
- *   and that writes to PSR.mfl
- */
-#define __local_irq_save(x)                    \
-do {                                           \
-       ia64_stop();                            \
-       (x) = ia64_getreg(_IA64_REG_PSR);       \
-       ia64_stop();                            \
-       ia64_rsm(IA64_PSR_I);                   \
-} while (0)
-
-#define __local_irq_disable()                  \
-do {                                           \
-       ia64_stop();                            \
-       ia64_rsm(IA64_PSR_I);                   \
-} while (0)
-
-#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
-
-#ifdef CONFIG_IA64_DEBUG_IRQ
-
-  extern unsigned long last_cli_ip;
-
-# define __save_ip()           last_cli_ip = ia64_getreg(_IA64_REG_IP)
-
-# define local_irq_save(x)                                     \
-do {                                                           \
-       unsigned long psr;                                      \
-                                                               \
-       __local_irq_save(psr);                                  \
-       if (psr & IA64_PSR_I)                                   \
-               __save_ip();                                    \
-       (x) = psr;                                              \
-} while (0)
-
-# define local_irq_disable()   do { unsigned long x; local_irq_save(x); } 
while (0)
-
-# define local_irq_restore(x)                                  \
-do {                                                           \
-       unsigned long old_psr, psr = (x);                       \
-                                                               \
-       local_save_flags(old_psr);                              \
-       __local_irq_restore(psr);                               \
-       if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I))      \
-               __save_ip();                                    \
-} while (0)
-
-#else /* !CONFIG_IA64_DEBUG_IRQ */
-# define local_irq_save(x)     __local_irq_save(x)
-# define local_irq_disable()   __local_irq_disable()
-# define local_irq_restore(x)  __local_irq_restore(x)
-#endif /* !CONFIG_IA64_DEBUG_IRQ */
-
-#define local_irq_enable()     ({ ia64_stop(); ia64_ssm(IA64_PSR_I); 
ia64_srlz_d(); })
-#define local_save_flags(flags)        ({ ia64_stop(); (flags) = 
ia64_getreg(_IA64_REG_PSR); })
-
-#define irqs_disabled()                                \
-({                                             \
-       unsigned long __ia64_id_flags;          \
-       local_save_flags(__ia64_id_flags);      \
-       (__ia64_id_flags & IA64_PSR_I) == 0;    \
-})
-
-#ifdef __KERNEL__
-
-#define prepare_to_switch()    do { } while(0)
-
-#ifdef CONFIG_IA32_SUPPORT
-# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
-#else
-# define IS_IA32_PROCESS(regs)         0
-struct task_struct;
-static inline void ia32_save_state(struct task_struct *t 
__attribute__((unused))){}
-static inline void ia32_load_state(struct task_struct *t 
__attribute__((unused))){}
-#endif
-
-/*
- * Context switch from one thread to another.  If the two threads have
- * different address spaces, schedule() has already taken care of
- * switching to the new address space by calling switch_mm().
- *
- * Disabling access to the fph partition and the debug-register
- * context switch MUST be done before calling ia64_switch_to() since a
- * newly created thread returns directly to
- * ia64_ret_from_syscall_clear_r8.
- */
-extern struct task_struct *ia64_switch_to (void *next_task);
-
-struct task_struct;
-
-extern void ia64_save_extra (struct task_struct *task);
-extern void ia64_load_extra (struct task_struct *task);
-
-#ifdef CONFIG_PERFMON
-  DECLARE_PER_CPU(unsigned long, pfm_syst_info);
-# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
-#else
-# define PERFMON_IS_SYSWIDE() (0)
-#endif
-
-#ifndef XEN
-#define IA64_HAS_EXTRA_STATE(t)                                                
        \
-       ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       
\
-        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
-
-#define __switch_to(prev,next,last) do {                                       
                 \
-       if (IA64_HAS_EXTRA_STATE(prev))                                         
                 \
-               ia64_save_extra(prev);                                          
                 \
-       if (IA64_HAS_EXTRA_STATE(next))                                         
                 \
-               ia64_load_extra(next);                                          
                 \
-       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);   
                 \
-       (last) = ia64_switch_to((next));                                        
                 \
-} while (0)
-#endif 
-
-#ifdef CONFIG_SMP
-/*
- * In the SMP case, we save the fph state when context-switching away from a 
thread that
- * modified fph.  This way, when the thread gets scheduled on another CPU, the 
CPU can
- * pick up the state from task->thread.fph, avoiding the complication of 
having to fetch
- * the latest fph state from another CPU.  In other words: eager save, lazy 
restore.
- */
-# define switch_to(prev,next,last) do {                                        
        \
-       if (ia64_psr(ia64_task_regs(prev))->mfh && 
ia64_is_local_fpu_owner(prev)) {                             \
-               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        
\
-               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  
\
-               __ia64_save_fpu((prev)->thread.fph);                            
\
-       }                                                                       
\
-       __switch_to(prev, next, last);                                          
\
-} while (0)
-#else
-# define switch_to(prev,next,last)     __switch_to(prev, next, last)
-#endif
-
-/*
- * On IA-64, we don't want to hold the runqueue's lock during the low-level 
context-switch,
- * because that could cause a deadlock.  Here is an example by Erich Focht:
- *
- * Example:
- * CPU#0:
- * schedule()
- *    -> spin_lock_irq(&rq->lock)
- *    -> context_switch()
- *       -> wrap_mmu_context()
- *          -> read_lock(&tasklist_lock)
- *
- * CPU#1:
- * sys_wait4() or release_task() or forget_original_parent()
- *    -> write_lock(&tasklist_lock)
- *    -> do_notify_parent()
- *       -> wake_up_parent()
- *          -> try_to_wake_up()
- *             -> spin_lock_irq(&parent_rq->lock)
- *
- * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
- * of that CPU which will not be released, because there we wait for the
- * tasklist_lock to become available.
- */
-#define prepare_arch_switch(rq, next)          \
-do {                                           \
-       spin_lock(&(next)->switch_lock);        \
-       spin_unlock(&(rq)->lock);               \
-} while (0)
-#define finish_arch_switch(rq, prev)   spin_unlock_irq(&(prev)->switch_lock)
-#define task_running(rq, p)            ((rq)->curr == (p) || 
spin_is_locked(&(p)->switch_lock))
-
-#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
-
-void cpu_idle_wait(void);
-#endif /* __KERNEL__ */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_SYSTEM_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/types.h
--- a/xen/include/asm-ia64/types.h      Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,104 +0,0 @@
-#ifndef _ASM_IA64_TYPES_H
-#define _ASM_IA64_TYPES_H
-#ifdef XEN
-#ifndef __ASSEMBLY__
-typedef unsigned long ssize_t;
-typedef unsigned long size_t;
-typedef long long loff_t;
-#endif
-#endif
-
-/*
- * This file is never included by application software unless explicitly 
requested (e.g.,
- * via linux/types.h) in which case the application is Linux specific so 
(user-) name
- * space pollution is not a major issue.  However, for interoperability, 
libraries still
- * need to be careful to avoid a name clashes.
- *
- * Based on <asm-alpha/types.h>.
- *
- * Modified 1998-2000, 2002
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
- */
-
-#ifdef __ASSEMBLY__
-# define __IA64_UL(x)          (x)
-# define __IA64_UL_CONST(x)    x
-
-# ifdef __KERNEL__
-#  define BITS_PER_LONG 64
-# endif
-
-#else
-# define __IA64_UL(x)          ((unsigned long)(x))
-# define __IA64_UL_CONST(x)    x##UL
-
-typedef unsigned int umode_t;
-
-/*
- * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space
- */
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-typedef __signed__ long __s64;
-typedef unsigned long __u64;
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-# ifdef __KERNEL__
-
-typedef __s8 s8;
-typedef __u8 u8;
-
-typedef __s16 s16;
-typedef __u16 u16;
-
-typedef __s32 s32;
-typedef __u32 u32;
-
-typedef __s64 s64;
-typedef __u64 u64;
-
-#ifdef XEN
-/*
- * Below are truly Linux-specific types that should never collide with
- * any application/library that wants linux/types.h.
- */
-
-#ifdef __CHECKER__
-#define __bitwise __attribute__((bitwise))
-#else
-#define __bitwise
-#endif
-
-typedef __u16 __bitwise __le16;
-typedef __u16 __bitwise __be16;
-typedef __u32 __bitwise __le32;
-typedef __u32 __bitwise __be32;
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __u64 __bitwise __le64;
-typedef __u64 __bitwise __be64;
-#endif
-#endif
-
-#define BITS_PER_LONG 64
-
-/* DMA addresses are 64-bits wide, in general.  */
-
-typedef u64 dma_addr_t;
-
-typedef unsigned short kmem_bufctl_t;
-
-# endif /* __KERNEL__ */
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IA64_TYPES_H */
diff -r e2127f19861b -r f242de2e5a3c xen/include/asm-ia64/uaccess.h
--- a/xen/include/asm-ia64/uaccess.h    Tue Aug  2 23:59:09 2005
+++ /dev/null   Wed Aug  3 00:25:11 2005
@@ -1,381 +0,0 @@
-#ifndef _ASM_IA64_UACCESS_H
-#define _ASM_IA64_UACCESS_H
-
-/*
- * This file defines various macros to transfer memory areas across
- * the user/kernel boundary.  This needs to be done carefully because
- * this code is executed in kernel mode and uses user-specified
- * addresses.  Thus, we need to be careful not to let the user to
- * trick us into accessing kernel memory that would normally be
- * inaccessible.  This code is also fairly performance sensitive,
- * so we want to spend as little time doing safety checks as
- * possible.
- *
- * To make matters a bit more interesting, these macros sometimes also
- * called from within the kernel itself, in which case the address
- * validity check must be skipped.  The get_fs() macro tells us what
- * to do: if get_fs()==USER_DS, checking is performed, if
- * get_fs()==KERNEL_DS, checking is bypassed.
- *
- * Note that even if the memory area specified by the user is in a
- * valid address range, it is still possible that we'll get a page
- * fault while accessing it.  This is handled by filling out an
- * exception handler fixup entry for each instruction that has the
- * potential to fault.  When such a fault occurs, the page fault
- * handler checks to see whether the faulting instruction has a fixup
- * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
- * then resumes execution at the continuation point.
- *
- * Based on <asm-alpha/uaccess.h>.
- *
- * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-
-#ifdef CONFIG_VTI
-#include <asm/vmx_uaccess.h>
-#else // CONFIG_VTI
-
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-
-#include <asm/intrinsics.h>
-#include <asm/pgtable.h>
-
-/*
- * For historical reasons, the following macros are grossly misnamed:
- */
-#define KERNEL_DS      ((mm_segment_t) { ~0UL })               /* cf. 
access_ok() */
-#define USER_DS                ((mm_segment_t) { TASK_SIZE-1 })        /* cf. 
access_ok() */
-
-#define VERIFY_READ    0
-#define VERIFY_WRITE   1
-
-#define get_ds()  (KERNEL_DS)
-#define get_fs()  (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define segment_eq(a, b)       ((a).seg == (b).seg)
-
-/*
- * When accessing user memory, we need to make sure the entire area really is 
in
- * user-level space.  In order to do this efficiently, we make sure that the 
page at
- * address TASK_SIZE is never valid.  We also need to make sure that the 
address doesn't
- * point inside the virtually mapped linear page table.
- */
-#ifdef XEN
-/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
-#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
-#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned 
long)(addr)))
-#else
-#define __access_ok(addr, size, segment)                                       
        \
-({                                                                             
        \
-       __chk_user_ptr(addr);                                                   
        \
-       (likely((unsigned long) (addr) <= (segment).seg)                        
        \
-        && ((segment).seg == KERNEL_DS.seg                                     
        \
-            || likely(REGION_OFFSET((unsigned long) (addr)) < 
RGN_MAP_LIMIT)));        \
-})
-#endif
-#define access_ok(type, addr, size)    __access_ok((addr), (size), get_fs())
-
-static inline int
-verify_area (int type, const void __user *addr, unsigned long size)
-{
-       return access_ok(type, addr, size) ? 0 : -EFAULT;
-}
-
-/*
- * These are the main single-value transfer routines.  They automatically
- * use the right size if we just have the right pointer type.
- *
- * Careful to not
- * (a) re-use the arguments for side effects (sizeof/typeof is ok)
- * (b) require any knowledge of processes at this stage
- */
-#define put_user(x, ptr)       __put_user_check((__typeof__(*(ptr))) (x), 
(ptr), sizeof(*(ptr)), get_fs())
-#define get_user(x, ptr)       __get_user_check((x), (ptr), sizeof(*(ptr)), 
get_fs())
-
-/*
- * The "__xxx" versions do not do address space checking, useful when
- * doing multiple accesses to the same area (the programmer has to do the
- * checks by hand with "access_ok()")
- */
-#define __put_user(x, ptr)     __put_user_nocheck((__typeof__(*(ptr))) (x), 
(ptr), sizeof(*(ptr)))
-#define __get_user(x, ptr)     __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-
-extern long __put_user_unaligned_unknown (void);
-
-#define __put_user_unaligned(x, ptr)                                           
                \
-({                                                                             
                \
-       long __ret;                                                             
                \
-       switch (sizeof(*(ptr))) {                                               
                \
-               case 1: __ret = __put_user((x), (ptr)); break;                  
                \
-               case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))           
                \
-                       | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); 
break;              \
-               case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))          
                \
-                       | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); 
break;            \
-               case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))          
                \
-                       | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); 
break;            \
-               default: __ret = __put_user_unaligned_unknown();                
                \
-       }                                                                       
                \
-       __ret;                                                                  
                \
-})
-
-extern long __get_user_unaligned_unknown (void);
-
-#define __get_user_unaligned(x, ptr)                                           
                \
-({                                                                             
                \
-       long __ret;                                                             
                \
-       switch (sizeof(*(ptr))) {                                               
                \
-               case 1: __ret = __get_user((x), (ptr)); break;                  
                \
-               case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))           
                \
-                       | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); 
break;              \
-               case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))          
                \
-                       | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); 
break;            \
-               case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))          
                \
-                       | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); 
break;            \
-               default: __ret = __get_user_unaligned_unknown();                
                \
-       }                                                                       
                \
-       __ret;                                                                  
                \
-})
-
-#ifdef ASM_SUPPORTED
-  struct __large_struct { unsigned long buf[100]; };
-# define __m(x) (*(struct __large_struct __user *)(x))
-
-/* We need to declare the __ex_table section before we can use it in .xdata.  
*/
-asm (".section \"__ex_table\", \"a\"\n\t.previous");
-
-# define __get_user_size(val, addr, n, err)                                    
                \
-do {                                                                           
                \
-       register long __gu_r8 asm ("r8") = 0;                                   
                \
-       register long __gu_r9 asm ("r9");                                       
                \
-       asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by 
exception handler\n"     \
-            "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"                         
                \
-            "[1:]"                                                             
                \
-            : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));    
                \
-       (err) = __gu_r8;                                                        
                \
-       (val) = __gu_r9;                                                        
                \
-} while (0)
-
-/*
- * The "__put_user_size()" macro tells gcc it reads from memory instead of 
writing it.  This
- * is because they do not write to any memory gcc knows about, so there are no 
aliasing
- * issues.
- */
-# define __put_user_size(val, addr, n, err)                                    
                \
-do {                                                                           
                \
-       register long __pu_r8 asm ("r8") = 0;                                   
                \
-       asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by 
exception handler\n" \
-                     "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"                  
                \
-                     "[1:]"                                                    
                \
-                     : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), 
"0"(__pu_r8));               \
-       (err) = __pu_r8;                                                        
                \
-} while (0)
-
-#else /* !ASM_SUPPORTED */
-# define RELOC_TYPE    2       /* ip-rel */
-# define __get_user_size(val, addr, n, err)                            \
-do {                                                                   \
-       __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);   \
-       (err) = ia64_getreg(_IA64_REG_R8);                              \
-       (val) = ia64_getreg(_IA64_REG_R9);                              \
-} while (0)
-# define __put_user_size(val, addr, n, err)                                    
                \
-do {                                                                           
                \
-       __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned 
long) (val));    \
-       (err) = ia64_getreg(_IA64_REG_R8);                                      
                \
-} while (0)
-#endif /* !ASM_SUPPORTED */
-
-extern void __get_user_unknown (void);
-
-/*
- * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve 
subroutine-calls, which
- * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate 
it while
- * using r8/r9.
- */
-#define __do_get_user(check, x, ptr, size, segment)                            
        \
-({                                                                             
        \
-       const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);                      
        \
-       __typeof__ (size) __gu_size = (size);                                   
        \
-       long __gu_err = -EFAULT, __gu_val = 0;                                  
        \
-                                                                               
        \
-       if (!check || __access_ok(__gu_ptr, size, segment))                     
        \
-               switch (__gu_size) {                                            
        \
-                     case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); 
break;  \
-                     case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); 
break;  \
-                     case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); 
break;  \
-                     case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); 
break;  \
-                     default: __get_user_unknown(); break;                     
        \
-               }                                                               
        \
-       (x) = (__typeof__(*(__gu_ptr))) __gu_val;                               
        \
-       __gu_err;                                                               
        \
-})
-
-#define __get_user_nocheck(x, ptr, size)       __do_get_user(0, x, ptr, size, 
KERNEL_DS)
-#define __get_user_check(x, ptr, size, segment)        __do_get_user(1, x, 
ptr, size, segment)
-
-extern void __put_user_unknown (void);
-
-/*
- * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve 
subroutine-calls, which
- * could clobber r8 (among others).  Thus, be careful not to evaluate them 
while using r8.
- */
-#define __do_put_user(check, x, ptr, size, segment)                            
        \
-({                                                                             
        \
-       __typeof__ (x) __pu_x = (x);                                            
        \
-       __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);                           
        \
-       __typeof__ (size) __pu_size = (size);                                   
        \
-       long __pu_err = -EFAULT;                                                
        \
-                                                                               
        \
-       if (!check || __access_ok(__pu_ptr, __pu_size, segment))                
        \
-               switch (__pu_size) {                                            
        \
-                     case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); 
break;    \
-                     case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); 
break;    \
-                     case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); 
break;    \
-                     case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); 
break;    \
-                     default: __put_user_unknown(); break;                     
        \
-               }                                                               
        \
-       __pu_err;                                                               
        \
-})
-
-#define __put_user_nocheck(x, ptr, size)       __do_put_user(0, x, ptr, size, 
KERNEL_DS)
-#define __put_user_check(x, ptr, size, segment)        __do_put_user(1, x, 
ptr, size, segment)
-
-/*
- * Complex access routines
- */
-extern unsigned long __must_check __copy_user (void __user *to, const void 
__user *from,
-                                              unsigned long count);
-
-static inline unsigned long
-__copy_to_user (void __user *to, const void *from, unsigned long count)
-{
-       return __copy_user(to, (void __user *) from, count);
-}
-
-static inline unsigned long
-__copy_from_user (void *to, const void __user *from, unsigned long count)
-{
-       return __copy_user((void __user *) to, from, count);
-}
-
-#define __copy_to_user_inatomic                __copy_to_user
-#define __copy_from_user_inatomic      __copy_from_user
-#define copy_to_user(to, from, n)                                              
        \
-({                                                                             
        \
-       void __user *__cu_to = (to);                                            
        \
-       const void *__cu_from = (from);                                         
        \
-       long __cu_len = (n);                                                    
        \
-                                                                               
        \
-       if (__access_ok(__cu_to, __cu_len, get_fs()))                           
        \
-               __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, 
__cu_len);   \
-       __cu_len;                                                               
        \
-})
-
-#define copy_from_user(to, from, n)                                            
        \
-({                                                                             
        \
-       void *__cu_to = (to);                                                   
        \
-       const void __user *__cu_from = (from);                                  
        \
-       long __cu_len = (n);                                                    
        \
-                                                                               
        \
-       __chk_user_ptr(__cu_from);                                              
        \
-       if (__access_ok(__cu_from, __cu_len, get_fs()))                         
        \
-               __cu_len = __copy_user((void __user *) __cu_to, __cu_from, 
__cu_len);   \
-       __cu_len;                                                               
        \
-})
-
-#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
-
-static inline unsigned long
-copy_in_user (void __user *to, const void __user *from, unsigned long n)
-{
-       if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, 
to, n)))
-               n = __copy_user(to, from, n);
-       return n;
-}
-
-extern unsigned long __do_clear_user (void __user *, unsigned long);
-
-#define __clear_user(to, n)            __do_clear_user(to, n)
-
-#define clear_user(to, n)                                      \
-({                                                             \
-       unsigned long __cu_len = (n);                           \
-       if (__access_ok(to, __cu_len, get_fs()))                \
-               __cu_len = __do_clear_user(to, __cu_len);       \
-       __cu_len;                                               \
-})
-
-
-/*
- * Returns: -EFAULT if exception before terminator, N if the entire buffer 
filled, else
- * strlen.
- */
-extern long __must_check __strncpy_from_user (char *to, const char __user 
*from, long to_len);
-
-#define strncpy_from_user(to, from, n)                                 \
-({                                                                     \
-       const char __user * __sfu_from = (from);                        \
-       long __sfu_ret = -EFAULT;                                       \
-       if (__access_ok(__sfu_from, 0, get_fs()))                       \
-               __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
-       __sfu_ret;                                                      \
-})
-
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern unsigned long __strlen_user (const char __user *);
-
-#define strlen_user(str)                               \
-({                                                     \
-       const char __user *__su_str = (str);            \
-       unsigned long __su_ret = 0;                     \
-       if (__access_ok(__su_str, 0, get_fs()))         \
-               __su_ret = __strlen_user(__su_str);     \
-       __su_ret;                                       \
-})
-
-/*
- * Returns: 0 if exception before NUL or reaching the supplied limit
- * (N), a value greater than N if the limit would be exceeded, else
- * strlen.
- */
-extern unsigned long __strnlen_user (const char __user *, long);
-
-#define strnlen_user(str, len)                                 \
-({                                                             \
-       const char __user *__su_str = (str);                    \
-       unsigned long __su_ret = 0;                             \
-       if (__access_ok(__su_str, 0, get_fs()))                 \
-               __su_ret = __strnlen_user(__su_str, len);       \
-       __su_ret;                                               \
-})
-
-#endif // CONFIG_VTI
-/* Generic code can't deal with the location-relative format that we use for 
compactness.  */
-#define ARCH_HAS_SORT_EXTABLE
-#define ARCH_HAS_SEARCH_EXTABLE
-
-struct exception_table_entry {
-       int addr;       /* location-relative address of insn this fixup is for 
*/
-       int cont;       /* location-relative continuation addr.; if bit 2 is 
set, r9 is set to 0 */
-};
-
-extern void ia64_handle_exception (struct pt_regs *regs, const struct 
exception_table_entry *e);
-extern const struct exception_table_entry *search_exception_tables (unsigned 
long addr);
-
-static inline int
-ia64_done_with_exception (struct pt_regs *regs)
-{
-       const struct exception_table_entry *e;
-       e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
-       if (e) {
-               ia64_handle_exception(regs, e);
-               return 1;
-       }
-       return 0;
-}
-
-#endif /* _ASM_IA64_UACCESS_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.