[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] More updating to linux 2.6.13 sources



# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 4e4f1db8ea94e57bbf0f318153b44942ace8a1f5
# Parent  b7276814008c9c924fceecf6fd9f67ccddaadcb2
More updating to linux 2.6.13 sources

diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/Makefile    Wed Aug 31 22:55:04 2005
@@ -23,7 +23,8 @@
 ifeq ($(CONFIG_VTI),y)
 OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
        vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
-       vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
+       vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o \
+       pal_emul.o vmx_irq_ia64.o
 endif
 
 # files from xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/efi.c
--- a/xen/arch/ia64/linux-xen/efi.c     Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/efi.c     Wed Aug 31 22:55:04 2005
@@ -418,6 +418,38 @@
                        (*callback)(start, end, arg);
        }
 }
+
+/*
+ * Walk the EFI memory map to pull out leftover pages in the lower
+ * memory regions which do not end up in the regular memory map and
+ * stick them into the uncached allocator
+ *
+ * The regular walk function is significantly more complex than the
+ * uncached walk which means it really doesn't make sense to try and
+ * marge the two.
+ */
+void __init
+efi_memmap_walk_uc (efi_freemem_callback_t callback)
+{
+       void *efi_map_start, *efi_map_end, *p;
+       efi_memory_desc_t *md;
+       u64 efi_desc_size, start, end;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               md = p;
+               if (md->attribute == EFI_MEMORY_UC) {
+                       start = PAGE_ALIGN(md->phys_addr);
+                       end = PAGE_ALIGN((md->phys_addr+(md->num_pages << 
EFI_PAGE_SHIFT)) & PAGE_MASK);
+                       if ((*callback)(start, end, NULL) < 0)
+                               return;
+               }
+       }
+}
+
 
 /*
  * Look for the PAL_CODE region reported by EFI and maps it using an
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/entry.S
--- a/xen/arch/ia64/linux-xen/entry.S   Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/entry.S   Wed Aug 31 22:55:04 2005
@@ -175,7 +175,7 @@
        mov rp=loc0
        br.ret.sptk.many rp
 END(sys_clone)
-#endif /* !XEN */
+#endif
 
 /*
  * prev_task <- ia64_switch_to(struct task_struct *next)
@@ -191,12 +191,14 @@
 
        adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
        movl r25=init_task
+#ifdef XEN
        movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
        ld8 r27=[r27]
        adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-#ifdef XEN
        dep r20=0,in0,60,4              // physical address of "next"
 #else
+       mov r27=IA64_KR(CURRENT_STACK)
+       adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
        dep r20=0,in0,61,3              // physical address of "next"
 #endif
        ;;
@@ -215,8 +217,12 @@
        ;;
 (p6)   srlz.d
        ld8 sp=[r21]                    // load kernel stack pointer of new task
+#ifdef XEN
        movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
        st8 [r8]=in0
+#else
+       mov IA64_KR(CURRENT)=in0        // update "current" application register
+#endif
        mov r8=r13                      // return pointer to previously running 
task
        mov r13=in0                     // set "current" pointer
        ;;
@@ -250,8 +256,14 @@
        mov cr.ifa=in0                  // VA of next task...
        ;;
        mov r25=IA64_TR_CURRENT_STACK
+#ifdef XEN
        movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
        st8 [r8]=r26
+       
+#else
+       mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
+#endif
+       ;;
        itr.d dtr[r25]=r23              // wire in new mapping...
        br.cond.sptk .done
 END(ia64_switch_to)
@@ -494,18 +506,6 @@
 END(load_switch_stack)
 
 #ifndef XEN
-GLOBAL_ENTRY(__ia64_syscall)
-       .regstk 6,0,0,0
-       mov r15=in5                             // put syscall number in place
-       break __BREAK_SYSCALL
-       movl r2=errno
-       cmp.eq p6,p7=-1,r10
-       ;;
-(p6)   st4 [r2]=r8
-(p6)   mov r8=-1
-       br.ret.sptk.many rp
-END(__ia64_syscall)
-
 GLOBAL_ENTRY(execve)
        mov r15=__NR_execve                     // put syscall number in place
        break __BREAK_SYSCALL
@@ -672,7 +672,7 @@
  *           r8-r11: restored (syscall return value(s))
  *              r12: restored (user-level stack pointer)
  *              r13: restored (user-level thread pointer)
- *              r14: cleared
+ *              r14: set to __kernel_syscall_via_epc
  *              r15: restored (syscall #)
  *          r16-r17: cleared
  *              r18: user-level b6
@@ -693,7 +693,7 @@
  *               pr: restored (user-level pr)
  *               b0: restored (user-level rp)
  *               b6: restored
- *               b7: cleared
+ *               b7: set to __kernel_syscall_via_epc
  *          ar.unat: restored (user-level ar.unat)
  *           ar.pfs: restored (user-level ar.pfs)
  *           ar.rsc: restored (user-level ar.rsc)
@@ -743,15 +743,15 @@
 (p6)   ld4 r31=[r18]                           // load 
current_thread_info()->flags
 #endif
        ld8 r19=[r2],PT(B6)-PT(LOADRS)          // load ar.rsc value for 
"loadrs"
-       mov b7=r0               // clear b7
-       ;;
-       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be 
garbage)
+       nop.i 0
+       ;;
+       mov r16=ar.bsp                          // M2  get existing backing 
store pointer
        ld8 r18=[r2],PT(R9)-PT(B6)              // load b6
 #ifndef XEN
 (p6)   and r15=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
 #endif
        ;;
-       mov r16=ar.bsp                          // M2  get existing backing 
store pointer
+       ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)    // load ar.bspstore (may be 
garbage)
 #ifndef XEN
 (p6)   cmp4.ne.unc p6,p0=r15, r0               // any special work pending?
 (p6)   br.cond.spnt .work_pending_syscall
@@ -760,63 +760,74 @@
        // start restoring the state saved on the kernel stack (struct pt_regs):
        ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
        ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-       mov f6=f0               // clear f6
+(pNonSys) break 0              //      bug check: we shouldn't be here if 
pNonSys is TRUE!
        ;;
        invala                  // M0|1 invalidate ALAT
-       rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and 
interruption collection
-       mov f9=f0               // clear f9
-
-       ld8 r29=[r2],16         // load cr.ipsr
-       ld8 r28=[r3],16                 // load cr.iip
-       mov f8=f0               // clear f8
+       rsm psr.i | psr.ic      // M2   turn off interrupts and interruption 
collection
+       cmp.eq p9,p0=r0,r0      // A    set p9 to indicate that we should 
restore cr.ifs
+
+       ld8 r29=[r2],16         // M0|1 load cr.ipsr
+       ld8 r28=[r3],16         // M0|1 load cr.iip
+       mov r22=r0              // A    clear r22
        ;;
        ld8 r30=[r2],16         // M0|1 load cr.ifs
-       mov.m ar.ssd=r0         // M2 clear ar.ssd
-       cmp.eq p9,p0=r0,r0      // set p9 to indicate that we should restore 
cr.ifs
-       ;;
        ld8 r25=[r3],16         // M0|1 load ar.unat
-       mov.m ar.csd=r0         // M2 clear ar.csd
-       mov r22=r0              // clear r22
+(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
        ;;
        ld8 r26=[r2],PT(B0)-PT(AR_PFS)  // M0|1 load ar.pfs
-(pKStk)        mov r22=psr             // M2 read PSR now that interrupts are 
disabled
-       mov f10=f0              // clear f10
-       ;;
-       ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
-       ld8 r27=[r3],PT(PR)-PT(AR_RSC)  // load ar.rsc
-       mov f11=f0              // clear f11
-       ;;
-       ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)    // load ar.rnat (may be garbage)
-       ld8 r31=[r3],PT(R1)-PT(PR)              // load predicates
-(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
-       ;;
-       ld8 r20=[r2],PT(R12)-PT(AR_FPSR)        // load ar.fpsr
-       ld8.fill r1=[r3],16     // load r1
-(pUStk) mov r17=1
-       ;;
-       srlz.d                  // M0  ensure interruption collection is off
-       ld8.fill r13=[r3],16
-       mov f7=f0               // clear f7
-       ;;
-       ld8.fill r12=[r2]       // restore r12 (sp)
-       ld8.fill r15=[r3]       // restore r15
+(pKStk)        mov r22=psr                     // M2   read PSR now that 
interrupts are disabled
+       nop 0
+       ;;
+       ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
+       ld8 r27=[r3],PT(PR)-PT(AR_RSC)  // M0|1 load ar.rsc
+       mov f6=f0                       // F    clear f6
+       ;;
+       ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)    // M0|1 load ar.rnat (may be 
garbage)
+       ld8 r31=[r3],PT(R1)-PT(PR)              // M0|1 load predicates
+       mov f7=f0                               // F    clear f7
+       ;;
+       ld8 r20=[r2],PT(R12)-PT(AR_FPSR)        // M0|1 load ar.fpsr
+       ld8.fill r1=[r3],16                     // M0|1 load r1
+(pUStk) mov r17=1                              // A
+       ;;
+(pUStk) st1 [r14]=r17                          // M2|3
+       ld8.fill r13=[r3],16                    // M0|1
+       mov f8=f0                               // F    clear f8
+       ;;
+       ld8.fill r12=[r2]                       // M0|1 restore r12 (sp)
+       ld8.fill r15=[r3]                       // M0|1 restore r15
+       mov b6=r18                              // I0   restore b6
+
 #ifdef XEN
-       movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
+       movl r17=THIS_CPU(ia64_phys_stacked_size_p8)    // A
 #else
-       addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
-#endif
-       ;;
-(pUStk)        ld4 r3=[r3]             // r3 = cpu_data->phys_stacked_size_p8
-(pUStk) st1 [r14]=r17
-       mov b6=r18              // I0  restore b6
-       ;;
-       mov r14=r0              // clear r14
-       shr.u r18=r19,16        // I0|1 get byte size of existing "dirty" 
partition
-(pKStk) br.cond.dpnt.many skip_rbs_switch
-
-       mov.m ar.ccv=r0         // clear ar.ccv
-(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
-       br.cond.sptk.many rbs_switch
+       addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
+#endif
+       mov f9=f0                                       // F    clear f9
+(pKStk) br.cond.dpnt.many skip_rbs_switch              // B
+
+       srlz.d                          // M0   ensure interruption collection 
is off (for cover)
+       shr.u r18=r19,16                // I0|1 get byte size of existing 
"dirty" partition
+       cover                           // B    add current frame into dirty 
partition & set cr.ifs
+       ;;
+(pUStk) ld4 r17=[r17]                  // M0|1 r17 = 
cpu_data->phys_stacked_size_p8
+       mov r19=ar.bsp                  // M2   get new backing store pointer
+       mov f10=f0                      // F    clear f10
+
+       nop.m 0
+#ifdef XEN
+       mov r14=r0
+#else
+       movl r14=__kernel_syscall_via_epc // X
+#endif
+       ;;
+       mov.m ar.csd=r0                 // M2   clear ar.csd
+       mov.m ar.ccv=r0                 // M2   clear ar.ccv
+       mov b7=r14                      // I0   clear b7 (hint with 
__kernel_syscall_via_epc)
+
+       mov.m ar.ssd=r0                 // M2   clear ar.ssd
+       mov f11=f0                      // F    clear f11
+       br.cond.sptk.many rbs_switch    // B
 END(ia64_leave_syscall)
 
 #ifdef CONFIG_IA32_SUPPORT
@@ -829,7 +840,7 @@
        st8.spill [r2]=r8       // store return value in slot for r8 and set 
unat bit
        .mem.offset 8,0
        st8.spill [r3]=r0       // clear error indication in slot for r10 and 
set unat bit
-END(ia64_ret_from_ia32_execve_syscall)
+END(ia64_ret_from_ia32_execve)
        // fall through
 #endif /* CONFIG_IA32_SUPPORT */
 GLOBAL_ENTRY(ia64_leave_kernel)
@@ -884,11 +895,15 @@
        ld8 r28=[r2],8          // load b6
        adds r29=PT(R24)+16,r12
 
+#ifdef XEN
        ld8.fill r16=[r3]
+       adds r3=PT(AR_CSD)-PT(R16),r3
+#else
+       ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
+#endif
        adds r30=PT(AR_CCV)+16,r12
 (p6)   and r19=TIF_WORK_MASK,r31               // any work other than 
TIF_SYSCALL_TRACE?
        ;;
-       adds r3=PT(AR_CSD)-PT(R16),r3
        ld8.fill r24=[r29]
        ld8 r15=[r30]           // load ar.ccv
 (p6)   cmp4.ne.unc p6,p0=r19, r0               // any special work pending?
@@ -944,14 +959,18 @@
        ldf.fill f7=[r2],PT(F11)-PT(F7)
        ldf.fill f8=[r3],32
        ;;
-       srlz.i                  // ensure interruption collection is off
+       srlz.d  // ensure that inter. collection is off (VHPT is don't care, 
since text is pinned)
        mov ar.ccv=r15
        ;;
        ldf.fill f11=[r2]
        bsw.0                   // switch back to bank 0 (no stop bit required 
beforehand...)
        ;;
+#ifdef XEN
 (pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
 (pUStk) ld8 r18=[r18]
+#else
+(pUStk)        mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
+#endif
        adds r16=PT(CR_IPSR)+16,r12
        adds r17=PT(CR_IIP)+16,r12
 
@@ -1009,11 +1028,10 @@
         * NOTE: alloc, loadrs, and cover can't be predicated.
         */
 (pNonSys) br.cond.dpnt dont_preserve_current_frame
-
+       cover                           // add current frame into dirty 
partition and set cr.ifs
+       ;;
+       mov r19=ar.bsp                  // get new backing store pointer
 rbs_switch:
-       cover                           // add current frame into dirty 
partition and set cr.ifs
-       ;;
-       mov r19=ar.bsp                  // get new backing store pointer
        sub r16=r16,r18                 // krbs = old bsp - size of dirty 
partition
        cmp.ne p9,p0=r0,r0              // clear p9 to skip restore of cr.ifs
        ;;
@@ -1088,14 +1106,14 @@
        mov loc5=0
        mov loc6=0
        mov loc7=0
-(pRecurse) br.call.sptk.few b0=rse_clear_invalid
+(pRecurse) br.call.dptk.few b0=rse_clear_invalid
        ;;
        mov loc8=0
        mov loc9=0
        cmp.ne pReturn,p0=r0,in1        // if recursion count != 0, we need to 
do a br.ret
        mov loc10=0
        mov loc11=0
-(pReturn) br.ret.sptk.many b0
+(pReturn) br.ret.dptk.many b0
 #endif /* !CONFIG_ITANIUM */
 #      undef pRecurse
 #      undef pReturn
@@ -1249,7 +1267,7 @@
        ;;
 (pNonSys) mov out2=0                           // out2==0 => not a syscall
        .fframe 16
-       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
+       .spillsp ar.unat, 16
        st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
        st8 [out1]=loc1,-8                      // save ar.pfs, out1=&sigscratch
        .body
@@ -1275,7 +1293,7 @@
        adds out2=8,sp                          // out2=&sigscratch->ar_pfs
        ;;
        .fframe 16
-       .spillpsp ar.unat, 16                   // (note that offset is 
relative to psp+0x10!)
+       .spillsp ar.unat, 16
        st8 [sp]=r9,-16                         // allocate space for ar.unat 
and save it
        st8 [out2]=loc1,-8                      // save ar.pfs, out2=&sigscratch
        .body
@@ -1322,7 +1340,7 @@
        stf.spill [r17]=f11
        adds out0=16,sp                         // out0 = &sigscratch
        br.call.sptk.many rp=ia64_rt_sigreturn
-.ret19:        .restore sp 0
+.ret19:        .restore sp,0
        adds sp=16,sp
        ;;
        ld8 r9=[sp]                             // load new ar.unat
@@ -1486,7 +1504,7 @@
        data8 sys_msgrcv
        data8 sys_msgctl
        data8 sys_shmget
-       data8 ia64_shmat
+       data8 sys_shmat
        data8 sys_shmdt                         // 1115
        data8 sys_shmctl
        data8 sys_syslog
@@ -1646,12 +1664,12 @@
        data8 sys_add_key
        data8 sys_request_key
        data8 sys_keyctl
+       data8 sys_ioprio_set
+       data8 sys_ioprio_get                    // 1275
        data8 sys_ni_syscall
-       data8 sys_ni_syscall                    // 1275
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
-       data8 sys_ni_syscall
+       data8 sys_inotify_init
+       data8 sys_inotify_add_watch
+       data8 sys_inotify_rm_watch
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to 
increase NR_syscalls
 #endif
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/entry.h
--- a/xen/arch/ia64/linux-xen/entry.h   Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/entry.h   Wed Aug 31 22:55:04 2005
@@ -7,12 +7,6 @@
 #define PRED_LEAVE_SYSCALL     1 /* TRUE iff leave from syscall */
 #define PRED_KERNEL_STACK      2 /* returning to kernel-stacks? */
 #define PRED_USER_STACK                3 /* returning to user-stacks? */
-#ifdef CONFIG_VTI
-#define PRED_EMUL              2 /* Need to save r4-r7 for inst emulation */
-#define PRED_NON_EMUL          3 /* No need to save r4-r7 for normal path */
-#define PRED_BN0               6 /* Guest is in bank 0 */
-#define PRED_BN1               7 /* Guest is in bank 1 */
-#endif // CONFIG_VTI
 #define PRED_SYSCALL           4 /* inside a system call? */
 #define PRED_NON_SYSCALL       5 /* complement of PRED_SYSCALL */
 
@@ -23,21 +17,26 @@
 # define pLvSys                PASTE(p,PRED_LEAVE_SYSCALL)
 # define pKStk         PASTE(p,PRED_KERNEL_STACK)
 # define pUStk         PASTE(p,PRED_USER_STACK)
-#ifdef CONFIG_VTI
-# define pEml          PASTE(p,PRED_EMUL)
-# define pNonEml       PASTE(p,PRED_NON_EMUL)
-# define pBN0          PASTE(p,PRED_BN0)
-# define pBN1          PASTE(p,PRED_BN1)
-#endif // CONFIG_VTI
 # define pSys          PASTE(p,PRED_SYSCALL)
 # define pNonSys       PASTE(p,PRED_NON_SYSCALL)
 #endif
 
 #define PT(f)          (IA64_PT_REGS_##f##_OFFSET)
 #define SW(f)          (IA64_SWITCH_STACK_##f##_OFFSET)
+
+#ifdef XEN
 #ifdef CONFIG_VTI
+#define PRED_EMUL              2 /* Need to save r4-r7 for inst emulation */
+#define PRED_NON_EMUL          3 /* No need to save r4-r7 for normal path */
+#define PRED_BN0               6 /* Guest is in bank 0 */
+#define PRED_BN1               7 /* Guest is in bank 1 */
+# define pEml          PASTE(p,PRED_EMUL)
+# define pNonEml       PASTE(p,PRED_NON_EMUL)
+# define pBN0          PASTE(p,PRED_BN0)
+# define pBN1          PASTE(p,PRED_BN1)
 #define VPD(f)      (VPD_##f##_START_OFFSET)
 #endif // CONFIG_VTI
+#endif
 
 #define PT_REGS_SAVES(off)                     \
        .unwabi 3, 'i';                         \
@@ -75,7 +74,7 @@
        .spillsp @priunat,SW(AR_UNAT)+16+(off);                                 
\
        .spillsp ar.rnat,SW(AR_RNAT)+16+(off);                                  
\
        .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);                          
\
-       .spillsp pr,SW(PR)+16+(off))
+       .spillsp pr,SW(PR)+16+(off)
 
 #define DO_SAVE_SWITCH_STACK                   \
        movl r28=1f;                            \
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/head.S
--- a/xen/arch/ia64/linux-xen/head.S    Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/head.S    Wed Aug 31 22:55:04 2005
@@ -15,6 +15,8 @@
  * Copyright (C) 1999 Don Dugger <Don.Dugger@xxxxxxxxx>
  * Copyright (C) 2002 Fenghua Yu <fenghua.yu@xxxxxxxxx>
  *   -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
+ * Copyright (C) 2004 Ashok Raj <ashok.raj@xxxxxxxxx>
+ *   Support for CPU Hotplug
  */
 
 #include <linux/config.h>
@@ -29,6 +31,146 @@
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
+#include <asm/mca_asm.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define SAL_PSR_BITS_TO_SET                            \
+       (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
+
+#define SAVE_FROM_REG(src, ptr, dest)  \
+       mov dest=src;;                                          \
+       st8 [ptr]=dest,0x08
+
+#define RESTORE_REG(reg, ptr, _tmp)            \
+       ld8 _tmp=[ptr],0x08;;                           \
+       mov reg=_tmp
+
+#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
+       mov ar.lc=IA64_NUM_DBG_REGS-1;;                         \
+       mov _idx=0;;                                                            
\
+1:                                                                             
                \
+       SAVE_FROM_REG(_breg[_idx], ptr, _dest);;        \
+       add _idx=1,_idx;;                                                       
\
+       br.cloop.sptk.many 1b
+
+#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
+       mov ar.lc=IA64_NUM_DBG_REGS-1;;                 \
+       mov _idx=0;;                                                    \
+_lbl:  RESTORE_REG(_breg[_idx], ptr, _tmp);;   \
+       add _idx=1, _idx;;                                              \
+       br.cloop.sptk.many _lbl
+
+#define SAVE_ONE_RR(num, _reg, _tmp) \
+       movl _tmp=(num<<61);;   \
+       mov _reg=rr[_tmp]
+
+#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
+       SAVE_ONE_RR(0,_r0, _tmp);; \
+       SAVE_ONE_RR(1,_r1, _tmp);; \
+       SAVE_ONE_RR(2,_r2, _tmp);; \
+       SAVE_ONE_RR(3,_r3, _tmp);; \
+       SAVE_ONE_RR(4,_r4, _tmp);; \
+       SAVE_ONE_RR(5,_r5, _tmp);; \
+       SAVE_ONE_RR(6,_r6, _tmp);; \
+       SAVE_ONE_RR(7,_r7, _tmp);;
+
+#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
+       st8 [ptr]=_r0, 8;; \
+       st8 [ptr]=_r1, 8;; \
+       st8 [ptr]=_r2, 8;; \
+       st8 [ptr]=_r3, 8;; \
+       st8 [ptr]=_r4, 8;; \
+       st8 [ptr]=_r5, 8;; \
+       st8 [ptr]=_r6, 8;; \
+       st8 [ptr]=_r7, 8;;
+
+#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
+       mov             ar.lc=0x08-1;;                                          
\
+       movl    _idx1=0x00;;                                            \
+RestRR:                                                                        
                \
+       dep.z   _idx2=_idx1,61,3;;                                      \
+       ld8             _tmp=[ptr],8;;                                          
\
+       mov             rr[_idx2]=_tmp;;                                        
\
+       srlz.d;;                                                                
        \
+       add             _idx1=1,_idx1;;                                         
\
+       br.cloop.sptk.few       RestRR
+
+#define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \
+       movl reg1=sal_state_for_booting_cpu;;   \
+       ld8 reg2=[reg1];;
+
+/*
+ * Adjust region registers saved before starting to save
+ * break regs and rest of the states that need to be preserved.
+ */
+#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred)  \
+       SAVE_FROM_REG(b0,_reg1,_reg2);;                                         
\
+       SAVE_FROM_REG(b1,_reg1,_reg2);;                                         
\
+       SAVE_FROM_REG(b2,_reg1,_reg2);;                                         
\
+       SAVE_FROM_REG(b3,_reg1,_reg2);;                                         
\
+       SAVE_FROM_REG(b4,_reg1,_reg2);;                                         
\
+       SAVE_FROM_REG(b5,_reg1,_reg2);;                                         
\
+       st8 [_reg1]=r1,0x08;;                                                   
        \
+       st8 [_reg1]=r12,0x08;;                                                  
        \
+       st8 [_reg1]=r13,0x08;;                                                  
        \
+       SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);;                            \
+       SAVE_FROM_REG(ar.pfs,_reg1,_reg2);;                                     
\
+       SAVE_FROM_REG(ar.rnat,_reg1,_reg2);;                            \
+       SAVE_FROM_REG(ar.unat,_reg1,_reg2);;                            \
+       SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);;                        \
+       SAVE_FROM_REG(cr.dcr,_reg1,_reg2);;                                     
\
+       SAVE_FROM_REG(cr.iva,_reg1,_reg2);;                                     
\
+       SAVE_FROM_REG(cr.pta,_reg1,_reg2);;                                     
\
+       SAVE_FROM_REG(cr.itv,_reg1,_reg2);;                                     
\
+       SAVE_FROM_REG(cr.pmv,_reg1,_reg2);;                                     
\
+       SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);;                            \
+       SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);;                            \
+       SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);;                            \
+       st8 [_reg1]=r4,0x08;;                                                   
        \
+       st8 [_reg1]=r5,0x08;;                                                   
        \
+       st8 [_reg1]=r6,0x08;;                                                   
        \
+       st8 [_reg1]=r7,0x08;;                                                   
        \
+       st8 [_reg1]=_pred,0x08;;                                                
        \
+       SAVE_FROM_REG(ar.lc, _reg1, _reg2);;                            \
+       stf.spill.nta [_reg1]=f2,16;;                                           
\
+       stf.spill.nta [_reg1]=f3,16;;                                           
\
+       stf.spill.nta [_reg1]=f4,16;;                                           
\
+       stf.spill.nta [_reg1]=f5,16;;                                           
\
+       stf.spill.nta [_reg1]=f16,16;;                                          
\
+       stf.spill.nta [_reg1]=f17,16;;                                          
\
+       stf.spill.nta [_reg1]=f18,16;;                                          
\
+       stf.spill.nta [_reg1]=f19,16;;                                          
\
+       stf.spill.nta [_reg1]=f20,16;;                                          
\
+       stf.spill.nta [_reg1]=f21,16;;                                          
\
+       stf.spill.nta [_reg1]=f22,16;;                                          
\
+       stf.spill.nta [_reg1]=f23,16;;                                          
\
+       stf.spill.nta [_reg1]=f24,16;;                                          
\
+       stf.spill.nta [_reg1]=f25,16;;                                          
\
+       stf.spill.nta [_reg1]=f26,16;;                                          
\
+       stf.spill.nta [_reg1]=f27,16;;                                          
\
+       stf.spill.nta [_reg1]=f28,16;;                                          
\
+       stf.spill.nta [_reg1]=f29,16;;                                          
\
+       stf.spill.nta [_reg1]=f30,16;;                                          
\
+       stf.spill.nta [_reg1]=f31,16;;
+
+#else
+#define SET_AREA_FOR_BOOTING_CPU(a1, a2)
+#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3)
+#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
+#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
+#endif
+
+#ifdef XEN
+#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
+       movl _tmp1=(num << 61);;        \
+       movl _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize 
<< 2) | vhpt);; \
+       mov rr[_tmp1]=_tmp2
+#else
+#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
+       movl _tmp1=(num << 61);;        \
+       mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize 
<< 2) | vhpt);; \
+       mov rr[_tmp1]=_tmp2
+#endif
 
        .section __special_page_section,"ax"
 
@@ -63,6 +205,12 @@
        ;;
        srlz.i
        ;;
+       /*
+        * Save the region registers, predicate before they get clobbered
+        */
+       SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
+       mov r25=pr;;
+
        /*
         * Initialize kernel region registers:
         *      rr[0]: VHPT enabled, page size = PAGE_SHIFT
@@ -76,32 +224,14 @@
         * We initialize all of them to prevent inadvertently assuming
         * something about the state of address translation early in boot.
         */
-       movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r7=(0<<61)
-       movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r9=(1<<61)
-       movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r11=(2<<61)
-       movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r13=(3<<61)
-       movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r15=(4<<61)
-       movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT 
<< 2) | 1)
-       movl r17=(5<<61)
-       movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-       movl r19=(6<<61)
-       movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2))
-       movl r21=(7<<61)
-       ;;
-       mov rr[r7]=r6
-       mov rr[r9]=r8
-       mov rr[r11]=r10
-       mov rr[r13]=r12
-       mov rr[r15]=r14
-       mov rr[r17]=r16
-       mov rr[r19]=r18
-       mov rr[r21]=r20
-       ;;
+       SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
+       SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
+       SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
+       SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
+       SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
+       SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
+       SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
+       SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
        /*
         * Now pin mappings into the TLB for kernel text and data
         */
@@ -129,13 +259,13 @@
        /*
         * Switch into virtual mode:
         */
-#ifdef CONFIG_VTI
-       movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
+#if defined(XEN) && defined(CONFIG_VTI)
+       movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH\
                  |IA64_PSR_DI)
-#else // CONFIG_VTI
+#else
        movl 
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
                  |IA64_PSR_DI)
-#endif // CONFIG_VTI
+#endif
        ;;
        mov cr.ipsr=r16
        movl r17=1f
@@ -147,12 +277,18 @@
        ;;
 1:     // now we are in virtual mode
 
+       SET_AREA_FOR_BOOTING_CPU(r2, r16);
+
+       STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
+       SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
+       ;;
+
        // set IVT entry point---can't access I/O ports without it
-#ifdef CONFIG_VTI
-    movl r3=vmx_ia64_ivt
-#else // CONFIG_VTI
+#if defined(XEN) && defined(CONFIG_VTI)
+       movl r3=vmx_ia64_ivt
+#else
        movl r3=ia64_ivt
-#endif // CONFIG_VTI
+#endif
        ;;
        mov cr.iva=r3
        movl r2=FPSR_DEFAULT
@@ -220,23 +356,24 @@
 
 .load_current:
        // load the "current" pointer (r13) and ar.k6 with the current task
-#ifdef CONFIG_VTI
-       mov r21=r2              // virtual address
+#if defined(XEN) && defined(CONFIG_VTI)
+       mov r21=r2
        ;;
        bsw.1
        ;;
-#else // CONFIG_VTI
-       mov IA64_KR(CURRENT)=r2
+#else
+       mov IA64_KR(CURRENT)=r2         // virtual address
        mov IA64_KR(CURRENT_STACK)=r16
-#endif // CONFIG_VTI
+#endif
        mov r13=r2
        /*
-        * Reserve space at the top of the stack for "struct pt_regs".  Kernel 
threads
-        * don't store interesting values in that structure, but the space 
still needs
-        * to be there because time-critical stuff such as the context 
switching can
-        * be implemented more efficiently (for example, __switch_to()
+        * Reserve space at the top of the stack for "struct pt_regs".  Kernel
+        * threads don't store interesting values in that structure, but the 
space
+        * still needs to be there because time-critical stuff such as the 
context
+        * switching can be implemented more efficiently (for example, 
__switch_to()
         * always sets the psr.dfh bit of the task it is switching to).
         */
+
        addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
        addl r2=IA64_RBS_OFFSET,r2      // initialize the RSE
        mov ar.rsc=0            // place RSE in enforced lazy mode
@@ -278,9 +415,13 @@
        br.call.sptk.many b0=console_print
 
 self:  hint @pause
+#ifdef XEN
        ;;
        br.sptk.many self               // endless loop
        ;;
+#else
+       br.sptk.many self               // endless loop
+#endif
 END(_start)
 
 GLOBAL_ENTRY(ia64_save_debug_regs)
@@ -1023,4 +1164,98 @@
 
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+GLOBAL_ENTRY(ia64_jump_to_sal)
+       alloc r16=ar.pfs,1,0,0,0;;
+       rsm psr.i  | psr.ic
+{
+       flushrs
+       srlz.i
+}
+       tpa r25=in0
+       movl r18=tlb_purge_done;;
+       DATA_VA_TO_PA(r18);;
+       mov b1=r18      // Return location
+       movl r18=ia64_do_tlb_purge;;
+       DATA_VA_TO_PA(r18);;
+       mov b2=r18      // doing tlb_flush work
+       mov ar.rsc=0  // Put RSE  in enforced lazy, LE mode
+       movl r17=1f;;
+       DATA_VA_TO_PA(r17);;
+       mov cr.iip=r17
+       movl r16=SAL_PSR_BITS_TO_SET;;
+       mov cr.ipsr=r16
+       mov cr.ifs=r0;;
+       rfi;;
+1:
+       /*
+        * Invalidate all TLB data/inst
+        */
+       br.sptk.many b2;; // jump to tlb purge code
+
+tlb_purge_done:
+       RESTORE_REGION_REGS(r25, r17,r18,r19);;
+       RESTORE_REG(b0, r25, r17);;
+       RESTORE_REG(b1, r25, r17);;
+       RESTORE_REG(b2, r25, r17);;
+       RESTORE_REG(b3, r25, r17);;
+       RESTORE_REG(b4, r25, r17);;
+       RESTORE_REG(b5, r25, r17);;
+       ld8 r1=[r25],0x08;;
+       ld8 r12=[r25],0x08;;
+       ld8 r13=[r25],0x08;;
+       RESTORE_REG(ar.fpsr, r25, r17);;
+       RESTORE_REG(ar.pfs, r25, r17);;
+       RESTORE_REG(ar.rnat, r25, r17);;
+       RESTORE_REG(ar.unat, r25, r17);;
+       RESTORE_REG(ar.bspstore, r25, r17);;
+       RESTORE_REG(cr.dcr, r25, r17);;
+       RESTORE_REG(cr.iva, r25, r17);;
+       RESTORE_REG(cr.pta, r25, r17);;
+       RESTORE_REG(cr.itv, r25, r17);;
+       RESTORE_REG(cr.pmv, r25, r17);;
+       RESTORE_REG(cr.cmcv, r25, r17);;
+       RESTORE_REG(cr.lrr0, r25, r17);;
+       RESTORE_REG(cr.lrr1, r25, r17);;
+       ld8 r4=[r25],0x08;;
+       ld8 r5=[r25],0x08;;
+       ld8 r6=[r25],0x08;;
+       ld8 r7=[r25],0x08;;
+       ld8 r17=[r25],0x08;;
+       mov pr=r17,-1;;
+       RESTORE_REG(ar.lc, r25, r17);;
+       /*
+        * Now Restore floating point regs
+        */
+       ldf.fill.nta f2=[r25],16;;
+       ldf.fill.nta f3=[r25],16;;
+       ldf.fill.nta f4=[r25],16;;
+       ldf.fill.nta f5=[r25],16;;
+       ldf.fill.nta f16=[r25],16;;
+       ldf.fill.nta f17=[r25],16;;
+       ldf.fill.nta f18=[r25],16;;
+       ldf.fill.nta f19=[r25],16;;
+       ldf.fill.nta f20=[r25],16;;
+       ldf.fill.nta f21=[r25],16;;
+       ldf.fill.nta f22=[r25],16;;
+       ldf.fill.nta f23=[r25],16;;
+       ldf.fill.nta f24=[r25],16;;
+       ldf.fill.nta f25=[r25],16;;
+       ldf.fill.nta f26=[r25],16;;
+       ldf.fill.nta f27=[r25],16;;
+       ldf.fill.nta f28=[r25],16;;
+       ldf.fill.nta f29=[r25],16;;
+       ldf.fill.nta f30=[r25],16;;
+       ldf.fill.nta f31=[r25],16;;
+
+       /*
+        * Now that we have done all the register restores
+        * we are now ready for the big DIVE to SAL Land
+        */
+       ssm psr.ic;;
+       srlz.d;;
+       br.ret.sptk.many b0;;
+END(ia64_jump_to_sal)
+#endif /* CONFIG_HOTPLUG_CPU */
+
 #endif /* CONFIG_SMP */
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c        Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c        Wed Aug 31 22:55:04 2005
@@ -70,8 +70,7 @@
        pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
        vector = IA64_FIRST_DEVICE_VECTOR + pos;
        if (vector > IA64_LAST_DEVICE_VECTOR)
-               /* XXX could look for sharable vectors instead of panic'ing... 
*/
-               panic("assign_irq_vector: out of interrupt vectors!");
+               return -ENOSPC;
        if (test_and_set_bit(pos, ia64_vector_mask))
                goto again;
        return vector;
@@ -173,103 +172,6 @@
        irq_exit();
 }
 
-#ifdef  CONFIG_VTI
-#define vmx_irq_enter()                \
-       add_preempt_count(HARDIRQ_OFFSET);
-
-/* Now softirq will be checked when leaving hypervisor, or else
- * scheduler irq will be executed too early.
- */
-#define vmx_irq_exit(void)     \
-       sub_preempt_count(HARDIRQ_OFFSET);
-/*
- * That's where the IVT branches when we get an external
- * interrupt. This branches to the correct hardware IRQ handler via
- * function ptr.
- */
-void
-vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-{
-       unsigned long saved_tpr;
-       int     wake_dom0 = 0;
-
-
-#if IRQ_DEBUG
-       {
-               unsigned long bsp, sp;
-
-               /*
-                * Note: if the interrupt happened while executing in
-                * the context switch routine (ia64_switch_to), we may
-                * get a spurious stack overflow here.  This is
-                * because the register and the memory stack are not
-                * switched atomically.
-                */
-               bsp = ia64_getreg(_IA64_REG_AR_BSP);
-               sp = ia64_getreg(_IA64_REG_AR_SP);
-
-               if ((sp - bsp) < 1024) {
-                       static unsigned char count;
-                       static long last_time;
-
-                       if (jiffies - last_time > 5*HZ)
-                               count = 0;
-                       if (++count < 5) {
-                               last_time = jiffies;
-                               printk("ia64_handle_irq: DANGER: less than "
-                                      "1KB of free stack space!!\n"
-                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
-                       }
-               }
-       }
-#endif /* IRQ_DEBUG */
-
-       /*
-        * Always set TPR to limit maximum interrupt nesting depth to
-        * 16 (without this, it would be ~240, which could easily lead
-        * to kernel stack overflows).
-        */
-       vmx_irq_enter();
-       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
-       ia64_srlz_d();
-       while (vector != IA64_SPURIOUS_INT_VECTOR) {
-           if (!IS_RESCHEDULE(vector)) {
-               ia64_setreg(_IA64_REG_CR_TPR, vector);
-               ia64_srlz_d();
-
-               if (vector != IA64_TIMER_VECTOR) {
-                       /* FIXME: Leave IRQ re-route later */
-                       vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
-                       wake_dom0 = 1;
-               }
-               else {  // FIXME: Handle Timer only now
-                       __do_IRQ(local_vector_to_irq(vector), regs);
-               }
-               
-               /*
-                * Disable interrupts and send EOI:
-                */
-               local_irq_disable();
-               ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
-           }
-           else {
-                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
-            }
-           ia64_eoi();
-           vector = ia64_get_ivr();
-       }
-       /*
-        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
-        * handler needs to be able to wait for further keyboard interrupts, 
which can't
-        * come through until ia64_eoi() has been done.
-        */
-       vmx_irq_exit();
-       if ( wake_dom0 && current != dom0 ) 
-               vcpu_wake(dom0->vcpu[0]);
-}
-#endif
-
-
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * This function emulates a interrupt processing when a cpu is about to be
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/minstate.h
--- a/xen/arch/ia64/linux-xen/minstate.h        Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/minstate.h        Wed Aug 31 22:55:04 2005
@@ -26,7 +26,7 @@
 (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;                 /* if in kernel mode, 
use sp (r12) */   \
        ;;                                                                      
                \
 (pUStk)        mov r18=ar.bsp;                                                 
                        \
-(pUStk)        mov ar.rsc=0x3;         /* set eager mode, pl 0, little-endian, 
loadrs=0 */             \
+(pUStk)        mov ar.rsc=0x3;         /* set eager mode, pl 0, little-endian, 
loadrs=0 */
 
 #define MINSTATE_END_SAVE_MIN_VIRT                                             
                \
        bsw.1;                  /* switch back to bank 1 (must be last in insn 
group) */        \
@@ -41,7 +41,7 @@
 (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;                                   
                \
 (pKStk) ld8 r3 = [r3];;                                                        
                        \
 (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;                            
                \
-(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;                          
                \
+(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;                          
                \
 (pUStk)        mov ar.rsc=0;           /* set enforced lazy mode, pl 0, 
little-endian, loadrs=0 */     \
 (pUStk)        addl r22=IA64_RBS_OFFSET,r1;            /* compute base of 
register backing store */    \
        ;;                                                                      
                \
@@ -50,7 +50,6 @@
 (pUStk)        mov r23=ar.bspstore;                            /* save 
ar.bspstore */                  \
 (pUStk)        dep r22=-1,r22,61,3;                    /* compute kernel 
virtual addr of RBS */        \
        ;;                                                                      
                \
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;         /* if in kernel mode, use sp 
(r12) */           \
 (pUStk)        mov ar.bspstore=r22;                    /* switch to kernel RBS 
*/                      \
        ;;                                                                      
                \
 (pUStk)        mov r18=ar.bsp;                                                 
                        \
@@ -61,9 +60,13 @@
        ;;
 
 #ifdef MINSTATE_VIRT
-# define MINSTATE_GET_CURRENT(reg)     \
-               movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;\
-               ld8 reg=[reg]
+#ifdef XEN
+# define MINSTATE_GET_CURRENT(reg)                                     \
+               movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;      \
+               ld8 reg=[reg]
+#else
+# define MINSTATE_GET_CURRENT(reg)     mov reg=IA64_KR(CURRENT)
+#endif
 # define MINSTATE_START_SAVE_MIN       MINSTATE_START_SAVE_MIN_VIRT
 # define MINSTATE_END_SAVE_MIN         MINSTATE_END_SAVE_MIN_VIRT
 #endif
@@ -172,8 +175,8 @@
        ;;                                                                      
                \
 .mem.offset 0,0; st8.spill [r16]=r13,16;                                       
                \
 .mem.offset 8,0; st8.spill [r17]=r21,16;       /* save ar.fpsr */              
                \
-       movl r13=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;                      
                \
-       ld8 r13=[r13];                  /* establish 'current' */               
                \
+       /* XEN mov r13=IA64_KR(CURRENT);        /* establish `current' */       
                        \
+       MINSTATE_GET_CURRENT(r13);              /* XEN establish `current' */   
                        \
        ;;                                                                      
                \
 .mem.offset 0,0; st8.spill [r16]=r15,16;                                       
                \
 .mem.offset 8,0; st8.spill [r17]=r14,16;                                       
                \
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/mm_contig.c
--- a/xen/arch/ia64/linux-xen/mm_contig.c       Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/mm_contig.c       Wed Aug 31 22:55:04 2005
@@ -62,7 +62,8 @@
        printk("%d reserved pages\n", reserved);
        printk("%d pages shared\n", shared);
        printk("%d pages swap cached\n", cached);
-       printk("%ld pages in page table cache\n", pgtable_cache_size);
+       printk("%ld pages in page table cache\n",
+               pgtable_quicklist_total_size());
 }
 #endif
 
@@ -290,7 +291,7 @@
                vmem_map = (struct page *) vmalloc_end;
                efi_memmap_walk(create_mem_map_page_table, NULL);
 
-               mem_map = contig_page_data.node_mem_map = vmem_map;
+               NODE_DATA(0)->node_mem_map = vmem_map;
                free_area_init_node(0, &contig_page_data, zones_size,
                                    0, zholes_size);
 
@@ -307,4 +308,4 @@
 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 }
-#endif /* !CONFIG_XEN */
+#endif
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/unaligned.c
--- a/xen/arch/ia64/linux-xen/unaligned.c       Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/unaligned.c       Wed Aug 31 22:55:04 2005
@@ -201,7 +201,7 @@
 
        RPT(r1), RPT(r2), RPT(r3),
 
-#ifdef  CONFIG_VTI
+#if defined(XEN) && defined(CONFIG_VTI)
        RPT(r4), RPT(r5), RPT(r6), RPT(r7),
 #else   //CONFIG_VTI
        RSW(r4), RSW(r5), RSW(r6), RSW(r7),
@@ -295,7 +295,7 @@
        return reg;
 }
 
-#ifdef CONFIG_VTI
+#if defined(XEN) && defined(CONFIG_VTI)
 static void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
 {
@@ -359,56 +359,6 @@
     }
     ia64_set_rsc(old_rsc);
 }
-
-
-static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
-{
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
-       unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-       unsigned long rnats, nat_mask;
-       unsigned long on_kbs;
-    unsigned long old_rsc, new_rsc;
-       long sof = (regs->cr_ifs) & 0x7f;
-       long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
-       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-       long ridx = r1 - 32;
-
-       if (ridx >= sof) {
-               /* read of out-of-frame register returns an undefined value; 0 
in our case.  */
-               DPRINT("ignoring read from r%lu; only %lu registers are 
allocated!\n", r1, sof);
-               panic("wrong stack register number");
-       }
-
-       if (ridx < sor)
-               ridx = rotate_reg(sor, rrb_gr, ridx);
-
-    old_rsc=ia64_get_rsc();
-    new_rsc=old_rsc&(~(0x3));
-    ia64_set_rsc(new_rsc);
-
-    bspstore = ia64_get_bspstore();
-    bsp =kbs + (regs->loadrs >> 19); //16+3;
-
-       addr = ia64_rse_skip_regs(bsp, -sof + ridx);
-    nat_mask = 1UL << ia64_rse_slot_num(addr);
-       rnat_addr = ia64_rse_rnat_addr(addr);
-
-    if(addr >= bspstore){
-
-        ia64_flushrs ();
-        ia64_mf ();
-        bspstore = ia64_get_bspstore();
-    }
-       *val=*addr;
-    if(bspstore < rnat_addr){
-        *nat=!!(ia64_get_rnat()&nat_mask);
-    }else{
-        *nat = !!((*rnat_addr)&nat_mask);
-    }
-    ia64_set_rsc(old_rsc);
-}
 #else // CONFIG_VTI
 static void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
@@ -590,7 +540,7 @@
                unat = &sw->ar_unat;
        } else {
                addr = (unsigned long)regs;
-#ifdef CONFIG_VTI
+#if defined(XEN) && defined(CONFIG_VTI)
                unat = &regs->eml_unat;
 #else //CONFIG_VTI
                unat = &sw->caller_unat;
@@ -780,7 +730,7 @@
                unat = &sw->ar_unat;
        } else {
                addr = (unsigned long)regs;
-#ifdef  CONFIG_VTI
+#if defined(XEN) && defined(CONFIG_VTI)
                unat = &regs->eml_unat;;
 #else   //CONFIG_VTI
                unat = &sw->caller_unat;
@@ -1527,6 +1477,10 @@
         *              - ldX.spill
         *              - stX.spill
         *      Reason: RNATs are based on addresses
+        *              - ld16
+        *              - st16
+        *      Reason: ld16 and st16 are supposed to occur in a single
+        *              memory op
         *
         *      synchronization:
         *              - cmpxchg
@@ -1548,6 +1502,10 @@
        switch (opcode) {
              case LDS_OP:
              case LDSA_OP:
+               if (u.insn.x)
+                       /* oops, really a semaphore op (cmpxchg, etc) */
+                       goto failure;
+               /* no break */
              case LDS_IMM_OP:
              case LDSA_IMM_OP:
              case LDFS_OP:
@@ -1572,6 +1530,10 @@
              case LDCCLR_OP:
              case LDCNC_OP:
              case LDCCLRACQ_OP:
+               if (u.insn.x)
+                       /* oops, really a semaphore op (cmpxchg, etc) */
+                       goto failure;
+               /* no break */
              case LD_IMM_OP:
              case LDA_IMM_OP:
              case LDBIAS_IMM_OP:
@@ -1584,6 +1546,10 @@
 
              case ST_OP:
              case STREL_OP:
+               if (u.insn.x)
+                       /* oops, really a semaphore op (cmpxchg, etc) */
+                       goto failure;
+               /* no break */
              case ST_IMM_OP:
              case STREL_IMM_OP:
                ret = emulate_store_int(ifa, u.insn, regs);
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux/README.origin
--- a/xen/arch/ia64/linux/README.origin Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux/README.origin Wed Aug 31 22:55:04 2005
@@ -1,4 +1,8 @@
 Source files in this directory are identical copies of linux-2.6.13 files:
+
+NOTE: DO NOT commit changes to these files!   If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
 
 cmdline.c              -> linux/lib/cmdline.c
 efi_stub.S             -> linux/arch/ia64/efi_stub.S
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/linux-xen/README.origin
--- /dev/null   Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/linux-xen/README.origin     Wed Aug 31 22:55:04 2005
@@ -0,0 +1,21 @@
+Source files in this directory are near-identical copies of linux-2.6.13 files:
+
+NOTE: ALL changes to these files should be clearly marked (e.g. with
+#ifdef XEN or XEN in a comment) so that they can be easily updated
+to future versions of the corresponding Linux files.
+
+efi.c          -> linux/arch/ia64/kernel/efi.c
+entry.h                -> linux/arch/ia64/kernel/entry.h
+entry.S                -> linux/arch/ia64/kernel/entry.S
+irq_ia64.c     -> linux/arch/ia64/kernel/irq_ia64.c
+minstate.h     -> linux/arch/ia64/kernel/minstate.h
+mm_contig.c    -> linux/arch/ia64/mm/contig.c
+pal.S          -> linux/arch/ia64/kernel/pal.S
+sal.c          -> linux/arch/ia64/kernel/sal.c
+setup.c                -> linux/arch/ia64/kernel/setup.c
+smp.c          -> linux/arch/ia64/kernel/smp.c
+smpboot.c      -> linux/arch/ia64/kernel/smpboot.c
+sort.c         -> linux/lib/sort.c
+time.c         -> linux/arch/ia64/kernel/time.c
+tlb.c          -> linux/arch/ia64/mm/tlb.c
+unaligned.c    -> linux/arch/ia64/kernel/unaligned.c
diff -r b7276814008c -r 4e4f1db8ea94 xen/arch/ia64/vmx_irq_ia64.c
--- /dev/null   Wed Aug 31 20:32:27 2005
+++ b/xen/arch/ia64/vmx_irq_ia64.c      Wed Aug 31 22:55:04 2005
@@ -0,0 +1,127 @@
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/jiffies.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel_stat.h>
+#include <linux/slab.h>
+#include <linux/ptrace.h>
+#include <linux/random.h>      /* for rand_initialize_irq() */
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/threads.h>
+#include <linux/bitops.h>
+
+#include <asm/delay.h>
+#include <asm/intrinsics.h>
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+#include <asm/machvec.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_PERFMON
+# include <asm/perfmon.h>
+#endif
+
+#define IRQ_DEBUG      0
+
+#ifdef  CONFIG_VTI
+#define vmx_irq_enter()                \
+       add_preempt_count(HARDIRQ_OFFSET);
+
+/* Now softirq will be checked when leaving hypervisor, or else
+ * scheduler irq will be executed too early.
+ */
+#define vmx_irq_exit(void)     \
+       sub_preempt_count(HARDIRQ_OFFSET);
+/*
+ * That's where the IVT branches when we get an external
+ * interrupt. This branches to the correct hardware IRQ handler via
+ * function ptr.
+ */
+void
+vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+{
+       unsigned long saved_tpr;
+       int     wake_dom0 = 0;
+
+
+#if IRQ_DEBUG
+       {
+               unsigned long bsp, sp;
+
+               /*
+                * Note: if the interrupt happened while executing in
+                * the context switch routine (ia64_switch_to), we may
+                * get a spurious stack overflow here.  This is
+                * because the register and the memory stack are not
+                * switched atomically.
+                */
+               bsp = ia64_getreg(_IA64_REG_AR_BSP);
+               sp = ia64_getreg(_IA64_REG_AR_SP);
+
+               if ((sp - bsp) < 1024) {
+                       static unsigned char count;
+                       static long last_time;
+
+                       if (jiffies - last_time > 5*HZ)
+                               count = 0;
+                       if (++count < 5) {
+                               last_time = jiffies;
+                               printk("ia64_handle_irq: DANGER: less than "
+                                      "1KB of free stack space!!\n"
+                                      "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+                       }
+               }
+       }
+#endif /* IRQ_DEBUG */
+
+       /*
+        * Always set TPR to limit maximum interrupt nesting depth to
+        * 16 (without this, it would be ~240, which could easily lead
+        * to kernel stack overflows).
+        */
+       vmx_irq_enter();
+       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+       ia64_srlz_d();
+       while (vector != IA64_SPURIOUS_INT_VECTOR) {
+           if (!IS_RESCHEDULE(vector)) {
+               ia64_setreg(_IA64_REG_CR_TPR, vector);
+               ia64_srlz_d();
+
+               if (vector != IA64_TIMER_VECTOR) {
+                       /* FIXME: Leave IRQ re-route later */
+                       vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
+                       wake_dom0 = 1;
+               }
+               else {  // FIXME: Handle Timer only now
+                       __do_IRQ(local_vector_to_irq(vector), regs);
+               }
+               
+               /*
+                * Disable interrupts and send EOI:
+                */
+               local_irq_disable();
+               ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
+           }
+           else {
+                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
+            }
+           ia64_eoi();
+           vector = ia64_get_ivr();
+       }
+       /*
+        * This must be done *after* the ia64_eoi().  For example, the keyboard 
softirq
+        * handler needs to be able to wait for further keyboard interrupts, 
which can't
+        * come through until ia64_eoi() has been done.
+        */
+       vmx_irq_exit();
+       if ( wake_dom0 && current != dom0 ) 
+               vcpu_wake(dom0->vcpu[0]);
+}
+#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.