|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] remove ia64
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1333449335 -7200
# Node ID 8aa1697d57e480e00b5aaef587c940b52055c064
# Parent 2386288b1bf12e550e267c53976dba51eed8e74d
remove ia64
It retains IA64-specific bits in code imported from elsewhere (e.g.
ACPI, EFI) as well as in the public headers.
It also doesn't touch the tools, mini-os, and unmodified_drivers
sub-trees.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---
diff -r 2386288b1bf1 -r 8aa1697d57e4 MAINTAINERS
--- a/MAINTAINERS Mon Apr 02 18:14:31 2012 +0100
+++ b/MAINTAINERS Tue Apr 03 12:35:35 2012 +0200
@@ -138,14 +138,6 @@ M: Tim Deegan <tim@xxxxxxx>
S: Supported
F: tools/debugger/kdd/
-IA64 ARCHITECTURE
-M: KUWAMURA Shin'ya <kuwa@xxxxxxxxxxxxxx>
-S: Supported
-L: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
-F: xen/arch/ia64/*
-F: xen/include/asm-ia64/*
-F: tools/libxc/ia64/*
-
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
M: Joseph Cihula <joseph.cihula@xxxxxxxxx>
M: Gang Wei <gang.wei@xxxxxxxxx>
diff -r 2386288b1bf1 -r 8aa1697d57e4 config/ia64.mk
--- a/config/ia64.mk Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-CONFIG_IA64 := y
-CONFIG_IA64_$(XEN_OS) := y
-
-CONFIG_IOEMU := y
-CONFIG_XCUTILS := y
-CONFIG_XENCOMM := y
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-subdir-y += xen
-subdir-y += vmx
-subdir-y += linux
-subdir-y += linux-xen
-
-ALL_OBJS := linux-xen/head.o $(ALL_OBJS)
-
-$(TARGET)-syms: $(ALL_OBJS) xen.lds.s
- $(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/common/symbols-dummy.o
- $(LD) $(LDFLAGS) -T xen.lds.s -N -Map $(@D)/.$(@F).0.map $(ALL_OBJS) \
- $(BASEDIR)/common/symbols-dummy.o -o $(@D)/.$(@F).0
- $(NM) -n $(@D)/.$(@F).0 | $(BASEDIR)/tools/symbols >$(@D)/.$(@F).0.S
- $(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).0.o
- $(LD) $(LDFLAGS) -T xen.lds.s -N -Map $(@D)/.$(@F).1.map $(ALL_OBJS) \
- $(@D)/.$(@F).0.o -o $(@D)/.$(@F).1
- $(NM) -n $(@D)/.$(@F).1 | $(BASEDIR)/tools/symbols >$(@D)/.$(@F).1.S
- $(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).1.o
- $(LD) $(LDFLAGS) -T xen.lds.s -N -Map $@.map $(ALL_OBJS) \
- $(@D)/.$(@F).1.o -o $@
- rm -f $(@D)/.$(@F).[0-9]*
-
-$(TARGET): $(TARGET)-syms
- $(NM) -n $< | grep -v ' [aUw] ' > $(@D)/System.map
- $(OBJCOPY) -R .note -R .comment -S $< $@
-
-# Headers do not depend on auto-generated header, but object files do.
-$(ALL_OBJS): $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
-
-asm-offsets.s: asm-offsets.c \
- $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
- $(CC) $(CFLAGS) -DGENERATE_ASM_OFFSETS -DIA64_TASK_SIZE=0 -S -o $@ $<
-
-asm-xsi-offsets.s: asm-xsi-offsets.c
- $(CC) $(CFLAGS) -S -o $@ $<
-
-$(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s
- @(set -e; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was auto-generated from $<"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- echo "#ifndef __ASM_XSI_OFFSETS_H__"; \
- echo "#define __ASM_XSI_OFFSETS_H__"; \
- echo ""; \
- sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2
/* \3 */:; s:->::; p;}"; \
- echo ""; \
- echo "#endif") <$< >$@
-
-$(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
-# Need such symbol link to make linux headers available
- [ -e $(BASEDIR)/include/linux ] \
- || ln -sf $(BASEDIR)/include/xen $(BASEDIR)/include/linux
- [ -e $(BASEDIR)/include/asm-ia64/xen ] \
- || ln -sf $(BASEDIR)/include/asm-ia64/linux
$(BASEDIR)/include/asm-ia64/xen
- touch $@
-
-# I'm sure a Makefile wizard would know a better way to do this
-xen.lds.s: xen/xen.lds.S
- $(CC) -E $(CPPFLAGS) -P -DXEN $(AFLAGS) \
- -o xen.lds.s xen/xen.lds.S
-
-.PHONY: clean
-clean::
- rm -f *.o *~ core xen.lds.s
$(BASEDIR)/include/asm-ia64/.offsets.h.stamp asm-offsets.s map.out
- rm -f asm-xsi-offsets.s $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h
- rm -f $(BASEDIR)/System.map
- rm -f $(BASEDIR)/include/linux
- rm -f $(BASEDIR)/include/asm-ia64/xen
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/Rules.mk
--- a/xen/arch/ia64/Rules.mk Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-########################################
-# ia64-specific definitions
-
-ia64 := y
-HAS_ACPI := y
-HAS_VGA := y
-HAS_CPUFREQ := y
-HAS_PCI := y
-HAS_PASSTHROUGH := y
-HAS_NS16550 := y
-HAS_KEXEC := y
-xenoprof := y
-no_warns ?= n
-vti_debug ?= n
-vmx_panic ?= n
-vhpt_disable ?= n
-xen_ia64_expose_p2m ?= y
-xen_ia64_pervcpu_vhpt ?= y
-xen_ia64_tlb_track ?= y
-xen_ia64_tlb_track_cnt ?= n
-xen_ia64_tlbflush_clock ?= y
-xen_ia64_disable_optvfault ?= n
-
-# If they are enabled,
-# shrink struct page_info assuming all mfn can be addressed by 32 bits.
-# However, with 50bit ia64 architected physical address and 16KB page size,
-# mfn isn't always assessed by 32bit. So they are disabled by default.
-xen_ia64_shrink_page_list ?= n
-xen_ia64_pickle_domain ?= n
-
-# Used only by linux/Makefile.
-AFLAGS_KERNEL += -mconstant-gp -nostdinc $(CPPFLAGS)
-
-CFLAGS += -nostdinc -fno-builtin -fno-common
-CFLAGS += -mconstant-gp
-#CFLAGS += -O3 # -O3 over-inlines making debugging tough!
-CFLAGS += -O2 # but no optimization causes compile errors!
-CFLAGS += -fomit-frame-pointer -D__KERNEL__
-CFLAGS += -iwithprefix include
-CPPFLAGS+= -I$(BASEDIR)/include
\
- -I$(BASEDIR)/include/asm-ia64 \
- -I$(BASEDIR)/include/asm-ia64/linux \
- -I$(BASEDIR)/include/asm-ia64/linux-xen \
- -I$(BASEDIR)/include/asm-ia64/linux-null \
- -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen
-CFLAGS += $(CPPFLAGS)
-#CFLAGS += -Wno-pointer-arith -Wredundant-decls
-CFLAGS += -DIA64 -DXEN -DLINUX_2_6
-CFLAGS += -ffixed-r13 -mfixed-range=f2-f5,f12-f127,b2-b5
-CFLAGS += -g
-ifeq ($(vti_debug),y)
-CFLAGS += -DVTI_DEBUG
-endif
-ifeq ($(vmx_panic),y)
-CFLAGS += -DCONFIG_VMX_PANIC
-endif
-ifeq ($(xen_ia64_expose_p2m),y)
-CFLAGS += -DCONFIG_XEN_IA64_EXPOSE_P2M
-endif
-ifeq ($(xen_ia64_pervcpu_vhpt),y)
-CFLAGS += -DCONFIG_XEN_IA64_PERVCPU_VHPT
-ifeq ($(vhpt_disable),y)
-$(error "both xen_ia64_pervcpu_vhpt=y and vhpt_disable=y are enabled. they
can't be enabled simultaneously. disable one of them.")
-endif
-endif
-ifeq ($(xen_ia64_tlb_track),y)
-CFLAGS += -DCONFIG_XEN_IA64_TLB_TRACK
-endif
-ifeq ($(xen_ia64_tlb_track_cnt),y)
-CFLAGS += -DCONFIG_TLB_TRACK_CNT
-endif
-ifeq ($(xen_ia64_tlbflush_clock),y)
-CFLAGS += -DCONFIG_XEN_IA64_TLBFLUSH_CLOCK
-endif
-ifeq ($(no_warns),y)
-CFLAGS += -Wa,--fatal-warnings -Werror -Wno-uninitialized
-endif
-ifneq ($(vhpt_disable),y)
-CFLAGS += -DVHPT_ENABLED=1
-else
-CFLAGS += -DVHPT_ENABLED=0
-endif
-ifeq ($(xen_ia64_disable_optvfault),y)
-CFLAGS += -DCONFIG_XEN_IA64_DISABLE_OPTVFAULT
-endif
-ifeq ($(xen_ia64_shrink_page_list),y)
-CFLAGS += -DCONFIG_IA64_SHRINK_PAGE_LIST
-endif
-ifeq ($(xen_ia64_pickle_domain),y)
-CFLAGS += -DCONFIG_IA64_PICKLE_DOMAIN
-endif
-
-LDFLAGS = -g
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,273 +0,0 @@
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/mca.h>
-#include <public/xen.h>
-#include <asm/tlb.h>
-#include <asm/regs.h>
-#include <asm/xenmca.h>
-
-#define task_struct vcpu
-
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(_sym, _str, _mem) \
- DEFINE(_sym, offsetof(_str, _mem));
-
-void foo(void)
-{
- DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
- DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
- DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
- DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
- DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
- DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
- DEFINE(MAPPED_REGS_T_SIZE, sizeof (mapped_regs_t));
-
- BLANK();
- DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu,
init_stack));
-
- BLANK();
- DEFINE(VCPU_VTM_OFFSET_OFS, offsetof(struct vcpu,
arch.arch_vmx.vtm.vtm_offset));
- DEFINE(VCPU_VTM_LAST_ITC_OFS, offsetof(struct vcpu,
arch.arch_vmx.vtm.last_itc));
- DEFINE(VCPU_VRR0_OFS, offsetof(struct vcpu, arch.arch_vmx.vrr[0]));
- DEFINE(VCPU_ITR0_OFS, offsetof(struct vcpu, arch.itrs[0]));
- DEFINE(VCPU_CALLBACK_OFS, offsetof(struct vcpu,
arch.event_callback_ip));
-#ifdef VTI_DEBUG
- DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
- DEFINE(IVT_DBG_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_debug));
- DEFINE(IVT_DEBUG_SIZE, sizeof(struct ivt_debug));
-#endif
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
-
- BLANK();
-
- DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu,
arch._thread.ksp));
- DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu,
arch._thread.on_ustack));
-
- DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu,
arch.hypercall_continuation));
- DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
- DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu,
arch.metaphysical_rid_dt));
- DEFINE(IA64_VCPU_META_RID_D_OFFSET, offsetof (struct vcpu,
arch.metaphysical_rid_d));
- DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu,
arch.metaphysical_saved_rr0));
- DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu,
arch.breakimm));
- DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
- DEFINE(IA64_VCPU_EVENT_CALLBACK_IP_OFFSET, offsetof (struct vcpu,
arch.event_callback_ip));
- DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
- DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
- DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
- DEFINE(IA64_VCPU_STARTING_RID_OFFSET, offsetof (struct vcpu,
arch.starting_rid));
- DEFINE(IA64_VCPU_ENDING_RID_OFFSET, offsetof (struct vcpu,
arch.ending_rid));
- DEFINE(IA64_VCPU_RID_BITS_OFFSET, offsetof (struct vcpu,
arch.rid_bits));
- DEFINE(IA64_VCPU_DOMAIN_ITM_OFFSET, offsetof (struct vcpu,
arch.domain_itm));
- DEFINE(IA64_VCPU_DOMAIN_ITM_LAST_OFFSET, offsetof (struct vcpu,
arch.domain_itm_last));
- DEFINE(IA64_VCPU_ITLB_OFFSET, offsetof (struct vcpu, arch.itlb));
- DEFINE(IA64_VCPU_DTLB_OFFSET, offsetof (struct vcpu, arch.dtlb));
- DEFINE(IA64_VCPU_VHPT_PG_SHIFT_OFFSET, offsetof (struct vcpu,
arch.vhpt_pg_shift));
-
- BLANK();
-
- DEFINE(IA64_VCPU_SHADOW_BITMAP_OFFSET, offsetof (struct vcpu,
arch.shadow_bitmap));
-
- BLANK();
-
- DEFINE(IA64_CPUINFO_ITM_NEXT_OFFSET, offsetof (struct cpuinfo_ia64,
itm_next));
- DEFINE(IA64_CPUINFO_KSOFTIRQD_OFFSET, offsetof (struct cpuinfo_ia64,
ksoftirqd));
-
-
- BLANK();
-
- DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
- DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
- DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
- DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
- DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
- DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
- DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
- DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
- DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
- DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
- DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
- DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
- DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
- DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
- DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));
-
- DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs,
ar_bspstore));
- DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
- DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
- DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
- DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
- DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
- DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
- DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
- DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
- DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
- DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
- DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
- DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
- DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
- DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
- DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
- DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
- DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
- DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
- DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
- DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
- DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
- DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
- DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
- DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
- DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
- DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
- DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
- DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
- DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
- DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
- DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
- DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
- DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
- DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
- DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
- DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
- DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
- DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
- DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs,
eml_unat));
- DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu,
arch.arch_vmx.cr_iipa));
- DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu,
arch.arch_vmx.cr_isr));
- DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu,
arch.arch_vmx.cause));
- DEFINE(IA64_VCPU_OPCODE_OFFSET, offsetof (struct vcpu,
arch.arch_vmx.opcode));
- DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
- DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs,
r16)-sizeof(struct pt_regs))>>3)&0x3f));
- DEFINE(IA64_PT_REGS_R2_SLOT, (((offsetof(struct pt_regs,
r16)-sizeof(struct pt_regs))>>3)&0x3f));
- DEFINE(IA64_PT_REGS_R8_SLOT, (((offsetof(struct pt_regs,
r16)-sizeof(struct pt_regs))>>3)&0x3f));
- DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu
,arch.arch_vmx.flags));
- DEFINE(IA64_VCPU_MMU_MODE_OFFSET,offsetof(struct vcpu,
arch.arch_vmx.mmu_mode));
-
- BLANK();
-
- DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct
switch_stack, caller_unat));
- DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack,
ar_fpsr));
- DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
- DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
- DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
- DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
- DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack,
f12));
- DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack,
f13));
- DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack,
f14));
- DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack,
f15));
- DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack,
f16));
- DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack,
f17));
- DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack,
f18));
- DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack,
f19));
- DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack,
f20));
- DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack,
f21));
- DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack,
f22));
- DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack,
f23));
- DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack,
f24));
- DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack,
f25));
- DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack,
f26));
- DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack,
f27));
- DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack,
f28));
- DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack,
f29));
- DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack,
f30));
- DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack,
f31));
- DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
- DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
- DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
- DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
- DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
- DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
- DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
- DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
- DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
- DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
- DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack,
ar_pfs));
- DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack,
ar_lc));
- DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack,
ar_unat));
- DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack,
ar_rnat));
- DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct
switch_stack, ar_bspstore));
- DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));
-
- BLANK();
-
- DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
- DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
- DEFINE(IA64_VPD_VHPI_OFFSET, offsetof (mapped_regs_t, vhpi));
- DEFINE(IA64_VPD_VB1REG_OFFSET, offsetof (mapped_regs_t, bank1_regs[0]));
- DEFINE(IA64_VPD_VB0REG_OFFSET, offsetof (mapped_regs_t, bank0_regs[0]));
- DEFINE(IA64_VPD_VB1NAT_OFFSET, offsetof (mapped_regs_t, vnat));
- DEFINE(IA64_VPD_VB0NAT_OFFSET, offsetof (mapped_regs_t, vbnat));
- DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu,
arch.insvc[0]));
- DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta));
- DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
-
- BLANK();
- DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64,
nsec_per_cyc));
- DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET, offsetof (struct cpuinfo_ia64,
ptce_base));
- DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET, offsetof (struct cpuinfo_ia64,
ptce_count));
- DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec,
tv_nsec));
-
-
- DEFINE(CLONE_IDLETASK_BIT, 12);
- DEFINE(CLONE_SETTLS_BIT, 19);
- DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64,
nsec_per_cyc));
-
- BLANK();
- DEFINE(IA64_KR_CURRENT_OFFSET, offsetof (cpu_kr_ia64_t,
_kr[IA64_KR_CURRENT]));
- DEFINE(IA64_KR_PT_BASE_OFFSET, offsetof (cpu_kr_ia64_t,
_kr[IA64_KR_PT_BASE]));
- DEFINE(IA64_KR_IO_BASE_OFFSET, offsetof (cpu_kr_ia64_t,
_kr[IA64_KR_IO_BASE]));
- DEFINE(IA64_KR_PERCPU_DATA_OFFSET, offsetof (cpu_kr_ia64_t,
_kr[IA64_KR_PER_CPU_DATA]));
- DEFINE(IA64_KR_IO_BASE_OFFSET, offsetof (cpu_kr_ia64_t,
_kr[IA64_KR_IO_BASE]));
- DEFINE(IA64_KR_CURRENT_STACK_OFFSET, offsetof (cpu_kr_ia64_t,
_kr[IA64_KR_CURRENT_STACK]));
-
-#ifdef PERF_COUNTERS
- BLANK();
- DEFINE(IA64_PERFC_recover_to_page_fault, PERFC_recover_to_page_fault);
- DEFINE(IA64_PERFC_recover_to_break_fault, PERFC_recover_to_break_fault);
- DEFINE(IA64_PERFC_fast_vhpt_translate, PERFC_fast_vhpt_translate);
- DEFINE(IA64_PERFC_fast_hyperprivop, PERFC_fast_hyperprivop);
- DEFINE(IA64_PERFC_fast_reflect, PERFC_fast_reflect);
-#endif
-
- BLANK();
- DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
- offsetof(struct cpuinfo_ia64, ptce_base));
- DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
- offsetof(struct cpuinfo_ia64, ptce_count));
- DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
- offsetof(struct cpuinfo_ia64, ptce_stride));
-
- BLANK();
- DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
- offsetof(struct ia64_mca_cpu, proc_state_dump));
- DEFINE(IA64_MCA_CPU_STACK_OFFSET,
- offsetof(struct ia64_mca_cpu, stack));
- DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
- offsetof(struct ia64_mca_cpu, stackframe));
- DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
- offsetof(struct ia64_mca_cpu, rbstore));
-
-#if VHPT_ENABLED
- DEFINE(IA64_VCPU_VHPT_PAGE_OFFSET,
- offsetof(struct vcpu, arch.vhpt_page));
- DEFINE(IA64_VCPU_VHPT_MADDR_OFFSET,
- offsetof(struct vcpu, arch.vhpt_maddr));
-#endif
-
- BLANK();
- DEFINE(IA64_MCA_TLB_INFO_SIZE, sizeof(struct ia64_mca_tlb_info));
- DEFINE(IA64_MCA_PERCPU_OFFSET,
- offsetof(struct ia64_mca_tlb_info, percpu_paddr));
-}
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/asm-xsi-offsets.c
--- a/xen/arch/ia64/asm-xsi-offsets.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,76 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
-/*
- * asm-xsi-offsets.c_
- * Copyright (c) 2005, Intel Corporation.
- * Kun Tian (Kevin Tian) <kevin.tian@xxxxxxxxx>
- * Eddie Dong <eddie.dong@xxxxxxxxx>
- * Fred Yang <fred.yang@xxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <public/xen.h>
-#include <asm/tlb.h>
-#include <asm/regs.h>
-
-#define task_struct vcpu
-
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define DEFINE_MAPPED_REG_OFS(sym, field) \
- DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
-
-void foo(void)
-{
- DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
- DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
- DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
- DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
- DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
- DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
- DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
- DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
- DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
- DEFINE_MAPPED_REG_OFS(XSI_TPR_OFS, tpr);
- DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
- DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
- DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
- DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
- DEFINE_MAPPED_REG_OFS(XSI_VPSR_DFH_OFS, vpsr_dfh);
- DEFINE_MAPPED_REG_OFS(XSI_HPSR_DFH_OFS, hpsr_dfh);
- DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
- DEFINE_MAPPED_REG_OFS(XSI_VPSR_PP_OFS, vpsr_pp);
- DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
- DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
- DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
- DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
- DEFINE_MAPPED_REG_OFS(XSI_RR0_OFS, rrs[0]);
- DEFINE_MAPPED_REG_OFS(XSI_KR0_OFS, krs[0]);
-}
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/Makefile
--- a/xen/arch/ia64/linux-xen/Makefile Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-subdir-y += sn
-
-obj-y += cmdline.o
-obj-y += efi.o
-obj-y += entry.o
-obj-y += irq_ia64.o
-obj-y += mca.o
-obj-y += mca_asm.o
-obj-y += mm_contig.o
-obj-y += process-linux-xen.o
-obj-y += sal.o
-obj-y += setup.o
-obj-y += smpboot.o
-obj-y += smp.o
-obj-y += time.o
-obj-y += tlb.o
-obj-y += unaligned.o
-obj-y += unwind.o
-obj-y += iosapic.o
-obj-y += numa.o
-obj-y += perfmon.o
-obj-y += perfmon_default_smpl.o
-obj-y += acpi.o
-obj-y += acpi_numa.o
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/README.origin
--- a/xen/arch/ia64/linux-xen/README.origin Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-# Source files in this directory are near-identical copies of linux-2.6.13
-# files:
-
-# NOTE: ALL changes to these files should be clearly marked
-# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
-# easily updated to future versions of the corresponding Linux files.
-
-cmdline.c -> linux/lib/cmdline.c
-entry.h -> linux/arch/ia64/kernel/entry.h
-entry.S -> linux/arch/ia64/kernel/entry.S
-head.S -> linux/arch/ia64/kernel/head.S
-hpsim_ssc.h -> linux/arch/ia64/hp/sim/hpsim_ssc.h
-irq_ia64.c -> linux/arch/ia64/kernel/irq_ia64.c
-mca.c -> linux/arch/ia64/kernel/mca.c
-mca_asm.S -> linux/arch/ia64/kernel/mca_asm.S
-minstate.h -> linux/arch/ia64/kernel/minstate.h
-mm_contig.c -> linux/arch/ia64/mm/contig.c
-numa.c -> linux/arch/ia64/kernel/numa.c
-process-linux-xen.c -> linux/arch/ia64/kernel/process.c
-sal.c -> linux/arch/ia64/kernel/sal.c
-setup.c -> linux/arch/ia64/kernel/setup.c
-smp.c -> linux/arch/ia64/kernel/smp.c
-smpboot.c -> linux/arch/ia64/kernel/smpboot.c
-time.c -> linux/arch/ia64/kernel/time.c
-tlb.c -> linux/arch/ia64/mm/tlb.c
-unaligned.c -> linux/arch/ia64/kernel/unaligned.c
-unwind.c -> linux/arch/ia64/kernel/unwind.c
-unwind_decoder.c -> linux/arch/ia64/kernel/unwind_decoder.c
-unwind_i.h -> linux/arch/ia64/kernel/unwind_i.h
-
-# The files below are from Linux-2.6.16
-iosapic.c -> linux/arch/ia64/kernel/iosapic.c
-
-# The files below are from Linux-2.6.16.33
-perfmon.c -> linux/arch/kernel/perfmon.c
-perfmon_default_smpl.c -> linux/arch/kernel/perfmon_default_smpl.c
-perfmon_generic.h -> linux/arch/kernel/perfmon_generic.h
-perfmon_itanium.h -> linux/arch/kernel/perfmon_itanium.h
-perfmon_mckinley.h -> linux/arch/kernel/perfmon_mckinley.h
-perfmon_montecito.h -> linux/arch/kernel/perfmon_montecito.h
-
-# The files below are from Linux-2.6.21
-efi.c -> linux/arch/ia64/kernel/efi.c
-
-# The files below are from Linux-2.6.26-rc5
-acpi.c -> linux/arch/ia64/kernel/acpi.c
-acpi_numa.c -> linux/drivers/acpi/numa.c
\ No newline at end of file
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/acpi.c
--- a/xen/arch/ia64/linux-xen/acpi.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1098 +0,0 @@
-/*
- * acpi.c - Architecture-Specific Low-Level ACPI Support
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 2000 Intel Corp.
- * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@xxxxxxxxx>
- * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@xxxxxxxxx>
- * Copyright (C) 2001 Jenna Hall <jenna.s.hall@xxxxxxxxx>
- * Copyright (C) 2001 Takayoshi Kochi <t-kochi@xxxxxxxxxxxxx>
- * Copyright (C) 2002 Erich Focht <efocht@xxxxxxxxxx>
- * Copyright (C) 2004 Ashok Raj <ashok.raj@xxxxxxxxx>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <linux/acpi.h>
-#include <linux/efi.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/machvec.h>
-#include <asm/page.h>
-#include <asm/system.h>
-#include <asm/numa.h>
-#include <asm/sal.h>
-#include <asm/cyclone.h>
-#include <asm/xen/hypervisor.h>
-#ifdef XEN
-#include <asm/hw_irq.h>
-#include <asm/numa.h>
-extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
-#endif
-
-
-#define BAD_MADT_ENTRY(entry, end) ( \
- (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-
-#define PREFIX "ACPI: "
-
-void (*pm_idle) (void) __read_mostly;
-EXPORT_SYMBOL(pm_idle);
-void (*pm_power_off) (void) __read_mostly;
-EXPORT_SYMBOL(pm_power_off);
-
-unsigned int acpi_cpei_override;
-unsigned int acpi_cpei_phys_cpuid;
-
-unsigned long acpi_wakeup_address = 0;
-
-#ifdef CONFIG_IA64_GENERIC
-static unsigned long __init acpi_find_rsdp(void)
-{
- unsigned long rsdp_phys = 0;
-
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
- rsdp_phys = efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
- printk(KERN_WARNING PREFIX
- "v1.0/r0.71 tables no longer supported\n");
- return rsdp_phys;
-}
-#endif
-
-const char __init *
-acpi_get_sysname(void)
-{
-#ifdef CONFIG_IA64_GENERIC
- unsigned long rsdp_phys;
- struct acpi_table_rsdp *rsdp;
- struct acpi_table_xsdt *xsdt;
- struct acpi_table_header *hdr;
-
- rsdp_phys = acpi_find_rsdp();
- if (!rsdp_phys) {
- printk(KERN_ERR
- "ACPI 2.0 RSDP not found, default to \"dig\"\n");
- return "dig";
- }
-
- rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
- if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1))
{
- printk(KERN_ERR
- "ACPI 2.0 RSDP signature incorrect, default to
\"dig\"\n");
- return "dig";
- }
-
- xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
- hdr = &xsdt->header;
- if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
- printk(KERN_ERR
- "ACPI 2.0 XSDT signature incorrect, default to
\"dig\"\n");
- return "dig";
- }
-
- if (!strcmp(hdr->oem_id, "HP")) {
- return "hpzx1";
- } else if (!strcmp(hdr->oem_id, "SGI")) {
- if (!strcmp(hdr->oem_table_id + 4, "UV"))
- return "uv";
- else
- return "sn2";
-#ifndef XEN
- } else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) {
- return "xen";
-#endif
- }
-
- return "dig";
-#else
-# if defined (CONFIG_IA64_HP_SIM)
- return "hpsim";
-# elif defined (CONFIG_IA64_HP_ZX1)
- return "hpzx1";
-# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
- return "hpzx1_swiotlb";
-# elif defined (CONFIG_IA64_SGI_SN2)
- return "sn2";
-# elif defined (CONFIG_IA64_SGI_UV)
- return "uv";
-# elif defined (CONFIG_IA64_DIG)
- return "dig";
-# elif defined (CONFIG_IA64_XEN)
- return "xen";
-# else
-# error Unknown platform. Fix acpi.c.
-# endif
-#endif
-}
-
-#ifdef CONFIG_ACPI
-
-#define ACPI_MAX_PLATFORM_INTERRUPTS 256
-
-/* Array to record platform interrupt vectors for generic interrupt routing. */
-int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
- [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
-};
-
-/*
- * Interrupt routing API for device drivers. Provides interrupt vector for
- * a generic platform event. Currently only CPEI is implemented.
- */
-int acpi_request_vector(u32 int_type)
-{
- int vector = -1;
-
- if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
- /* corrected platform error interrupt */
- vector = platform_intr_list[int_type];
- } else
- printk(KERN_ERR
- "acpi_request_vector(): invalid interrupt type\n");
- return vector;
-}
-
-char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
-{
- return __va(phys_addr);
-}
-
-/* --------------------------------------------------------------------------
- Boot-time Table Parsing
- --------------------------------------------------------------------------
*/
-
-static int total_cpus __initdata;
-static int available_cpus __initdata;
-struct acpi_table_madt *acpi_madt __initdata;
-static u8 has_8259;
-
-static int __init
-acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_local_apic_override *lapic;
-
- lapic = (struct acpi_madt_local_apic_override *)header;
-
- if (BAD_MADT_ENTRY(lapic, end))
- return -EINVAL;
-
- if (lapic->address) {
- iounmap(ipi_base_addr);
- ipi_base_addr = ioremap(lapic->address, 0);
- }
- return 0;
-}
-
-#ifdef XEN
-
-#define MAX_LOCAL_SAPIC 255
-static u16 ia64_acpiid_to_sapicid[ MAX_LOCAL_SAPIC ] =
- {[0 ... MAX_LOCAL_SAPIC - 1] = 0xffff };
-
-/* acpi id to cpu id */
-int get_cpu_id(u32 acpi_id)
-{
- int i;
- u16 apic_id;
-
- if ( acpi_id >= MAX_LOCAL_SAPIC )
- return -EINVAL;
-
- apic_id = ia64_acpiid_to_sapicid[acpi_id];
- if ( apic_id == 0xffff )
- return -EINVAL;
-
- for ( i = 0; i < NR_CPUS; i++ )
- {
- if ( apic_id == ia64_cpu_to_sapicid[i] )
- return i;
- }
-
- return -1;
-}
-
-int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *pdc, u32 mask)
-{
- pdc[2] |= ACPI_PDC_EST_CAPABILITY_SMP & mask;
- return 0;
-}
-
-#endif
-
-static int __init
-acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long
end)
-{
- struct acpi_madt_local_sapic *lsapic;
-
- lsapic = (struct acpi_madt_local_sapic *)header;
-
- /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
-
- if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
-#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[available_cpus] =
- (lsapic->id << 8) | lsapic->eid;
-#endif
-#ifdef XEN
- ia64_acpiid_to_sapicid[lsapic->processor_id] =
- (lsapic->id << 8) | lsapic->eid;
-#endif
- ++available_cpus;
- }
-
- total_cpus++;
- return 0;
-}
-
-static int __init
-acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long
end)
-{
- struct acpi_madt_local_apic_nmi *lacpi_nmi;
-
- lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
-
- if (BAD_MADT_ENTRY(lacpi_nmi, end))
- return -EINVAL;
-
- /* TBD: Support lapic_nmi entries */
- return 0;
-}
-
-static int __init
-acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long
end)
-{
- struct acpi_madt_io_sapic *iosapic;
-
- iosapic = (struct acpi_madt_io_sapic *)header;
-
- if (BAD_MADT_ENTRY(iosapic, end))
- return -EINVAL;
-
-#ifndef XEN
- return iosapic_init(iosapic->address, iosapic->global_irq_base);
-#else
- return iosapic_init(iosapic->address, iosapic->global_irq_base,
- iosapic->id);
-#endif
-}
-
-static unsigned int __initdata acpi_madt_rev;
-
-static int __init
-acpi_parse_plat_int_src(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_source *plintsrc;
- int vector;
-
- plintsrc = (struct acpi_madt_interrupt_source *)header;
-
- if (BAD_MADT_ENTRY(plintsrc, end))
- return -EINVAL;
-
- /*
- * Get vector assignment for this interrupt, set attributes,
- * and program the IOSAPIC routing table.
- */
- vector = iosapic_register_platform_intr(plintsrc->type,
- plintsrc->global_irq,
- plintsrc->io_sapic_vector,
- plintsrc->eid,
- plintsrc->id,
- ((plintsrc->inti_flags &
ACPI_MADT_POLARITY_MASK) ==
-
ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
- IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW,
- ((plintsrc->inti_flags &
ACPI_MADT_TRIGGER_MASK) ==
- ACPI_MADT_TRIGGER_EDGE) ?
- IOSAPIC_EDGE : IOSAPIC_LEVEL);
-
- platform_intr_list[plintsrc->type] = vector;
- if (acpi_madt_rev > 1) {
- acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
- }
-
- /*
- * Save the physical id, so we can check when its being removed
- */
- acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
-
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-#ifdef XEN
-unsigned int force_cpei_retarget = 0;
-#endif
-
-unsigned int can_cpei_retarget(void)
-{
- extern int cpe_vector;
- extern unsigned int force_cpei_retarget;
-
- /*
- * Only if CPEI is supported and the override flag
- * is present, otherwise return that its re-targettable
- * if we are in polling mode.
- */
- if (cpe_vector > 0) {
- if (acpi_cpei_override || force_cpei_retarget)
- return 1;
- else
- return 0;
- }
- return 1;
-}
-
-unsigned int is_cpu_cpei_target(unsigned int cpu)
-{
- unsigned int logical_id;
-
- logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
-
- if (logical_id == cpu)
- return 1;
- else
- return 0;
-}
-
-void set_cpei_target_cpu(unsigned int cpu)
-{
- acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
-}
-#endif
-
-unsigned int get_cpei_target_cpu(void)
-{
- return acpi_cpei_phys_cpuid;
-}
-
-static int __init
-acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_madt_interrupt_override *p;
-
- p = (struct acpi_madt_interrupt_override *)header;
-
- if (BAD_MADT_ENTRY(p, end))
- return -EINVAL;
-
- iosapic_override_isa_irq(p->source_irq, p->global_irq,
- ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
- ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
- IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
- ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
- ACPI_MADT_TRIGGER_EDGE) ?
- IOSAPIC_EDGE : IOSAPIC_LEVEL);
- return 0;
-}
-
-static int __init
-acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long
end)
-{
- struct acpi_madt_nmi_source *nmi_src;
-
- nmi_src = (struct acpi_madt_nmi_source *)header;
-
- if (BAD_MADT_ENTRY(nmi_src, end))
- return -EINVAL;
-
- /* TBD: Support nimsrc entries */
- return 0;
-}
-
-static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW",
6))) {
-
- /*
- * Unfortunately ITC_DRIFT is not yet part of the
- * official SAL spec, so the ITC_DRIFT bit is not
- * set by the BIOS on this hardware.
- */
- sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
-
-#ifndef XEN
- cyclone_setup();
-#endif
- }
-}
-
-static int __init acpi_parse_madt(struct acpi_table_header *table)
-{
- if (!table)
- return -EINVAL;
-
- acpi_madt = (struct acpi_table_madt *)table;
-
- acpi_madt_rev = acpi_madt->header.revision;
-
- /* remember the value for reference after free_initmem() */
-#ifdef CONFIG_ITANIUM
- has_8259 = 1; /* Firmware on old Itanium systems is broken */
-#else
- has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
-#endif
- iosapic_system_init(has_8259);
-
- /* Get base address of IPI Message Block */
-
- if (acpi_madt->address)
- ipi_base_addr = ioremap(acpi_madt->address, 0);
-
- printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
-
- acpi_madt_oem_check(acpi_madt->header.oem_id,
- acpi_madt->header.oem_table_id);
-
- return 0;
-}
-
-#ifdef CONFIG_ACPI_NUMA
-
-#undef SLIT_DEBUG
-
-#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
-
-static int __initdata srat_num_cpus; /* number of cpus */
-static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
-#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
-#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
-static struct acpi_table_slit __initdata *slit_table;
-cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
-
-static int __init
-get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
-{
- int pxm;
-
- pxm = pa->proximity_domain_lo;
- if (srat_rev >= 2) {
- pxm += pa->proximity_domain_hi[0] << 8;
- pxm += pa->proximity_domain_hi[1] << 16;
- pxm += pa->proximity_domain_hi[2] << 24;
- } else if (ia64_platform_is("sn2"))
- pxm += pa->proximity_domain_hi[0] << 8;
- return pxm;
-}
-
-static int __init
-get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
-{
- int pxm;
-
- pxm = ma->proximity_domain;
- if (!ia64_platform_is("sn2") && srat_rev < 2)
- pxm &= 0xff;
-
- return pxm;
-}
-
-/*
- * ACPI 2.0 SLIT (System Locality Information Table)
- * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
- */
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
-{
- u32 len;
-
- len = sizeof(struct acpi_table_header) + 8
- + slit->locality_count * slit->locality_count;
- if (slit->header.length != len) {
- printk(KERN_ERR
- "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
- len, slit->header.length);
- memset(numa_slit, 10, sizeof(numa_slit));
- return;
- }
- slit_table = slit;
-}
-
-void __init
-acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
-{
- int pxm;
-
- if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
- return;
-
- pxm = get_processor_proximity_domain(pa);
-
- /* record this node in proximity bitmap */
- pxm_bit_set(pxm);
-
- node_cpuid[srat_num_cpus].phys_id =
- (pa->apic_id << 8) | (pa->local_sapic_eid);
- /* nid should be overridden as logical node id later */
- node_cpuid[srat_num_cpus].nid = pxm;
- cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
- srat_num_cpus++;
-}
-
-void __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
-{
- unsigned long paddr, size;
- int pxm;
- struct node_memblk_s *p, *q, *pend;
-
- pxm = get_memory_proximity_domain(ma);
-
- /* fill node memory chunk structure */
- paddr = ma->base_address;
- size = ma->length;
-
- /* Ignore disabled entries */
- if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return;
-
- /* record this node in proximity bitmap */
- pxm_bit_set(pxm);
-
- /* Insertion sort based on base address */
- pend = &node_memblk[num_node_memblks];
- for (p = &node_memblk[0]; p < pend; p++) {
- if (paddr < p->start_paddr)
- break;
- }
- if (p < pend) {
- for (q = pend - 1; q >= p; q--)
- *(q + 1) = *q;
- }
- p->start_paddr = paddr;
- p->size = size;
- p->nid = pxm;
- num_node_memblks++;
-}
-
-void __init acpi_numa_arch_fixup(void)
-{
- int i, j, node_from, node_to;
-
- /* If there's no SRAT, fix the phys_id and mark node 0 online */
- if (srat_num_cpus == 0) {
- node_set_online(0);
- node_cpuid[0].phys_id = hard_smp_processor_id();
- return;
- }
-
- /*
- * MCD - This can probably be dropped now. No need for pxm ID to node
ID
- * mapping with sparse node numbering iff MAX_PXM_DOMAINS <=
MAX_NUMNODES.
- */
- nodes_clear(node_online_map);
- for (i = 0; i < MAX_PXM_DOMAINS; i++) {
- if (pxm_bit_test(i)) {
- int nid = acpi_map_pxm_to_node(i);
- node_set_online(nid);
- }
- }
-
- /* set logical node id in memory chunk structure */
- for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
-
- /* assign memory bank numbers for each chunk on each node */
- for_each_online_node(i) {
- int bank;
-
- bank = 0;
- for (j = 0; j < num_node_memblks; j++)
- if (node_memblk[j].nid == i)
- node_memblk[j].bank = bank++;
- }
-
- /* set logical node id in cpu structure */
- for_each_possible_early_cpu(i)
- node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
-
- printk(KERN_INFO "Number of logical nodes in system = %d\n",
- num_online_nodes());
- printk(KERN_INFO "Number of memory chunks in system = %d\n",
- num_node_memblks);
-
- if (!slit_table)
- return;
- memset(numa_slit, -1, sizeof(numa_slit));
- for (i = 0; i < slit_table->locality_count; i++) {
- if (!pxm_bit_test(i))
- continue;
- node_from = pxm_to_node(i);
- for (j = 0; j < slit_table->locality_count; j++) {
- if (!pxm_bit_test(j))
- continue;
- node_to = pxm_to_node(j);
- node_distance(node_from, node_to) =
- slit_table->entry[i * slit_table->locality_count +
j];
- }
- }
-
-#ifdef SLIT_DEBUG
- printk("ACPI 2.0 SLIT locality table:\n");
- for_each_online_node(i) {
- for_each_online_node(j)
- printk("%03d ", node_distance(i, j));
- printk("\n");
- }
-#endif
-}
-#endif /* CONFIG_ACPI_NUMA */
-
-#ifndef XEN
-/*
- * success: return IRQ number (>=0)
- * failure: return < 0
- */
-int acpi_register_gsi(u32 gsi, int triggering, int polarity)
-{
- if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
- return gsi;
-
- if (has_8259 && gsi < 16)
- return isa_irq_to_vector(gsi);
-
- return iosapic_register_intr(gsi,
- (polarity ==
- ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
- IOSAPIC_POL_LOW,
- (triggering ==
- ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
- IOSAPIC_LEVEL);
-}
-
-void acpi_unregister_gsi(u32 gsi)
-{
- if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
- return;
-
- if (has_8259 && gsi < 16)
- return;
-
- iosapic_unregister_intr(gsi);
-}
-#endif
-
-static int __init acpi_parse_fadt(struct acpi_table_header *table)
-{
- struct acpi_table_header *fadt_header;
- struct acpi_table_fadt *fadt;
-
- if (!table)
- return -EINVAL;
-
- fadt_header = (struct acpi_table_header *)table;
- if (fadt_header->revision != 3)
- return -ENODEV; /* Only deal with ACPI 2.0 FADT */
-
- fadt = (struct acpi_table_fadt *)fadt_header;
-
-#ifndef XEN
- acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
-#endif
- return 0;
-}
-
-int __init acpi_boot_init(void)
-{
-
- /*
- * MADT
- * ----
- * Parse the Multiple APIC Description Table (MADT), if exists.
- * Note that this table provides platform SMP configuration
- * information -- the successor to MPS tables.
- */
-
- if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
- printk(KERN_ERR PREFIX "Can't find MADT\n");
- goto skip_madt;
- }
-
- /* Local APIC */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0)
< 0)
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
acpi_parse_lsapic, NR_CPUS)
- < 1)
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no LAPIC entries\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0)
- < 0)
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-
- /* I/O APIC */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
- if (!ia64_platform_is("sn2"))
- printk(KERN_ERR PREFIX
- "Error parsing MADT - no IOSAPIC entries\n");
- }
-
- /* System-Level Interrupt Routing */
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
- ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing platform interrupt source entry\n");
-
- if (acpi_table_parse_madt
- (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
-
- if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, 0) < 0)
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
- skip_madt:
-
- /*
- * FADT says whether a legacy keyboard controller is present.
- * The FADT also contains an SCI_INT line, by which the system
- * gets interrupts such as power and sleep buttons. If it's not
- * on a Legacy interrupt, it needs to be setup.
- */
- if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
- printk(KERN_ERR PREFIX "Can't find FADT\n");
-
-#ifdef XEN
- acpi_dmar_init();
-#endif
-
-#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
- printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
- smp_boot_data.cpu_phys_id[available_cpus] =
- hard_smp_processor_id();
- available_cpus = 1; /* We've got at least one of these, no?
*/
- }
- smp_boot_data.cpu_count = available_cpus;
-
- smp_build_cpu_map();
-# ifdef CONFIG_ACPI_NUMA
- if (srat_num_cpus == 0) {
- int cpu, i = 1;
- for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
- if (smp_boot_data.cpu_phys_id[cpu] !=
- hard_smp_processor_id())
- node_cpuid[i++].phys_id =
- smp_boot_data.cpu_phys_id[cpu];
- }
-# endif
-#endif
-#ifdef CONFIG_ACPI_NUMA
- build_cpu_to_node_map();
-#endif
- /* Make boot-up look pretty */
- printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
- return 0;
-}
-
-int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-{
- int tmp;
-
- if (has_8259 && gsi < 16)
- *irq = isa_irq_to_vector(gsi);
- else {
- tmp = gsi_to_irq(gsi);
- if (tmp == -1)
- return -1;
- *irq = tmp;
- }
- return 0;
-}
-
-/*
- * ACPI based hotplug CPU support
- */
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static
-int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
-{
-#ifdef CONFIG_ACPI_NUMA
- int pxm_id;
- int nid;
-
- pxm_id = acpi_get_pxm(handle);
- /*
- * We don't have cpu-only-node hotadd. But if the system equips
- * SRAT table, pxm is already found and node is ready.
- * So, just pxm_to_nid(pxm) is OK.
- * This code here is for the system which doesn't have full SRAT
- * table for possible cpus.
- */
- nid = acpi_map_pxm_to_node(pxm_id);
- node_cpuid[cpu].phys_id = physid;
- node_cpuid[cpu].nid = nid;
-#endif
- return (0);
-}
-
-int additional_cpus __initdata = -1;
-
-static __init int setup_additional_cpus(char *s)
-{
- if (s)
- additional_cpus = simple_strtol(s, NULL, 0);
-
- return 0;
-}
-
-early_param("additional_cpus", setup_additional_cpus);
-
-/*
- * cpu_possible_map should be static, it cannot change as CPUs
- * are onlined, or offlined. The reason is per-cpu data-structures
- * are allocated by some modules at init time, and dont expect to
- * do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
- * In case when cpu_hotplug is not compiled, then we resort to current
- * behaviour, which is cpu_possible == cpu_present.
- * - Ashok Raj
- *
- * Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - The user can overwrite it with additional_cpus=NUM
- * - Otherwise don't reserve additional CPUs.
- */
-__init void prefill_possible_map(void)
-{
- int i;
- int possible, disabled_cpus;
-
- disabled_cpus = total_cpus - available_cpus;
-
- if (additional_cpus == -1) {
- if (disabled_cpus > 0)
- additional_cpus = disabled_cpus;
- else
- additional_cpus = 0;
- }
-
- possible = available_cpus + additional_cpus;
-
- if (possible > NR_CPUS)
- possible = NR_CPUS;
-
- printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
- possible, max((possible - available_cpus), 0));
-
- for (i = 0; i < possible; i++)
- cpumask_set_cpu(i, &cpu_possible_map);
-}
-
-#ifndef XEN
-int acpi_map_lsapic(acpi_handle handle, int *pcpu)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_local_sapic *lsapic;
- cpumask_t tmp_map;
- long physid;
- int cpu;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return -EINVAL;
-
- if (!buffer.length || !buffer.pointer)
- return -EINVAL;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER)
- {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
-
- if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
- (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- physid = ((lsapic->id << 8) | (lsapic->eid));
-
- kfree(buffer.pointer);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
-
- cpumask_complement(&tmp_map, &cpu_present_map);
- cpu = cpumask_first(&tmp_map);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- acpi_map_cpu2node(handle, cpu, physid);
-
- cpumask_set_cpu(cpu, &cpu_present_map);
- ia64_cpu_to_sapicid[cpu] = physid;
-
- *pcpu = cpu;
- return (0);
-}
-
-EXPORT_SYMBOL(acpi_map_lsapic);
-
-int acpi_unmap_lsapic(int cpu)
-{
- ia64_cpu_to_sapicid[cpu] = -1;
- cpumask_clear_cpu(cpu, &cpu_present_map);
-
-#ifdef CONFIG_ACPI_NUMA
- /* NUMA specific cleanup's */
-#endif
-
- return (0);
-}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
-#endif /* XEN */
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
-#ifndef XEN
-#ifdef CONFIG_ACPI_NUMA
-static acpi_status __devinit
-acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_io_sapic *iosapic;
- unsigned int gsi_base;
- int pxm, node;
-
- /* Only care about objects w/ a method that returns the MADT */
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return AE_OK;
-
- if (!buffer.length || !buffer.pointer)
- return AE_OK;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*iosapic)) {
- kfree(buffer.pointer);
- return AE_OK;
- }
-
- iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
-
- if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
- kfree(buffer.pointer);
- return AE_OK;
- }
-
- gsi_base = iosapic->global_irq_base;
-
- kfree(buffer.pointer);
-
- /*
- * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
- * us which node to associate this with.
- */
- pxm = acpi_get_pxm(handle);
- if (pxm < 0)
- return AE_OK;
-
- node = pxm_to_node(pxm);
-
- if (node >= MAX_NUMNODES || !node_online(node) ||
- cpus_empty(node_to_cpumask(node)))
- return AE_OK;
-
- /* We know a gsi to node mapping! */
- map_iosapic_to_node(gsi_base, node);
- return AE_OK;
-}
-
-static int __init
-acpi_map_iosapics (void)
-{
- acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
- return 0;
-}
-
-fs_initcall(acpi_map_iosapics);
-#endif /* CONFIG_ACPI_NUMA */
-
-int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
-{
- int err;
-
- if ((err = iosapic_init(phys_addr, gsi_base)))
- return err;
-
-#ifdef CONFIG_ACPI_NUMA
- acpi_map_iosapic(handle, 0, NULL, NULL);
-#endif /* CONFIG_ACPI_NUMA */
-
- return 0;
-}
-
-EXPORT_SYMBOL(acpi_register_ioapic);
-
-int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
-{
- return iosapic_remove(gsi_base);
-}
-
-EXPORT_SYMBOL(acpi_unregister_ioapic);
-#endif /* XEN */
-
-/*
- * acpi_save_state_mem() - save kernel state
- *
- * TBD when when IA64 starts to support suspend...
- */
-int acpi_save_state_mem(void) { return 0; }
-
-/*
- * acpi_restore_state()
- */
-void acpi_restore_state_mem(void) {}
-
-/*
- * do_suspend_lowlevel()
- */
-void do_suspend_lowlevel(void) {}
-
-#endif /* CONFIG_ACPI */
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/acpi_numa.c
--- a/xen/arch/ia64/linux-xen/acpi_numa.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,276 +0,0 @@
-/*
- * acpi_numa.c - ACPI NUMA support
- *
- * Copyright (C) 2002 Takayoshi Kochi <t-kochi@xxxxxxxxxxxxx>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <acpi/acmacros.h>
-
-#ifndef XEN
-#define ACPI_NUMA 0x80000000
-#define _COMPONENT ACPI_NUMA
-ACPI_MODULE_NAME("numa");
-#else
-#define NID_INVAL -1
-#define PXM_INVAL -1
-#endif
-
-#ifndef XEN
-static nodemask_t nodes_found_map = NODE_MASK_NONE;
-#else
-/* the above causes error: initializer element is not constant
- * anyway NODE_MASK_NONE is 0 filled array.
- */
-static nodemask_t nodes_found_map;
-#endif
-
-/* maps to convert between proximity domain and logical node ID */
-static int pxm_to_node_map[MAX_PXM_DOMAINS]
- = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
-static int node_to_pxm_map[MAX_NUMNODES]
- = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
-
-int pxm_to_node(int pxm)
-{
- if (pxm < 0)
- return NID_INVAL;
- return pxm_to_node_map[pxm];
-}
-
-int node_to_pxm(int node)
-{
- if (node < 0)
- return PXM_INVAL;
- return node_to_pxm_map[node];
-}
-
-void __acpi_map_pxm_to_node(int pxm, int node)
-{
- pxm_to_node_map[pxm] = node;
- node_to_pxm_map[node] = pxm;
-}
-
-int acpi_map_pxm_to_node(int pxm)
-{
- int node = pxm_to_node_map[pxm];
-
- if (node < 0){
- if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
- return NID_INVAL;
- node = first_unset_node(nodes_found_map);
- __acpi_map_pxm_to_node(pxm, node);
- node_set(node, nodes_found_map);
- }
-
- return node;
-}
-
-#ifndef XEN
-#if 0
-void __cpuinit acpi_unmap_pxm_to_node(int node)
-{
- int pxm = node_to_pxm_map[node];
- pxm_to_node_map[pxm] = NID_INVAL;
- node_to_pxm_map[node] = PXM_INVAL;
- node_clear(node, nodes_found_map);
-}
-#endif /* 0 */
-
-static void __init
-acpi_table_print_srat_entry(struct acpi_subtable_header *header)
-{
-
- ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
-
- if (!header)
- return;
-
- switch (header->type) {
-
- case ACPI_SRAT_TYPE_CPU_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
- {
- struct acpi_srat_cpu_affinity *p =
- (struct acpi_srat_cpu_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Processor (id[0x%02x]
eid[0x%02x]) in proximity domain %d %s\n",
- p->apic_id, p->local_sapic_eid,
- p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED)?
- "enabled" : "disabled"));
- }
-#endif /* ACPI_DEBUG_OUTPUT */
- break;
-
- case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
- {
- struct acpi_srat_mem_affinity *p =
- (struct acpi_srat_mem_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Memory (0x%lx length 0x%lx type
0x%x) in proximity domain %d %s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
- p->memory_type, p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED)?
- "enabled" : "disabled",
- (p->flags &
ACPI_SRAT_MEM_HOT_PLUGGABLE)?
- " hot-pluggable" : ""));
- }
-#endif /* ACPI_DEBUG_OUTPUT */
- break;
-
- default:
- printk(KERN_WARNING PREFIX
- "Found unsupported SRAT entry (type = 0x%x)\n",
- header->type);
- break;
- }
-}
-
-static int __init acpi_parse_slit(struct acpi_table_header *table)
-{
- struct acpi_table_slit *slit;
- u32 localities;
-
- if (!table)
- return -EINVAL;
-
- slit = (struct acpi_table_slit *)table;
-
- /* downcast just for %llu vs %lu for i386/ia64 */
- localities = (u32) slit->locality_count;
-
- acpi_numa_slit_init(slit);
-
- return 0;
-}
-
-static int __init
-acpi_parse_processor_affinity(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_srat_cpu_affinity *processor_affinity;
-
- processor_affinity = (struct acpi_srat_cpu_affinity *)header;
- if (!processor_affinity)
- return -EINVAL;
-
- acpi_table_print_srat_entry(header);
-
- /* let architecture-dependent part to do it */
- acpi_numa_processor_affinity_init(processor_affinity);
-
- return 0;
-}
-
-static int __init
-acpi_parse_memory_affinity(struct acpi_subtable_header * header,
- const unsigned long end)
-{
- struct acpi_srat_mem_affinity *memory_affinity;
-
- memory_affinity = (struct acpi_srat_mem_affinity *)header;
- if (!memory_affinity)
- return -EINVAL;
-
- acpi_table_print_srat_entry(header);
-
- /* let architecture-dependent part to do it */
- acpi_numa_memory_affinity_init(memory_affinity);
-
- return 0;
-}
-
-static int __init acpi_parse_srat(struct acpi_table_header *table)
-{
- struct acpi_table_srat *srat;
-
- if (!table)
- return -EINVAL;
-
- srat = (struct acpi_table_srat *)table;
-
- return 0;
-}
-
-static int __init
-acpi_table_parse_srat(enum acpi_srat_type id,
- acpi_table_entry_handler handler, unsigned int
max_entries)
-{
- return acpi_table_parse_entries(ACPI_SIG_SRAT,
- sizeof(struct acpi_table_srat), id,
- handler, max_entries);
-}
-
-int __init acpi_numa_init(void)
-{
- /* SRAT: Static Resource Affinity Table */
- if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, NR_CPUS);
- acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
- }
-
- /* SLIT: System Locality Information Table */
- acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
-
- acpi_numa_arch_fixup();
- return 0;
-}
-
-int acpi_get_pxm(acpi_handle h)
-{
- unsigned long pxm;
- acpi_status status;
- acpi_handle handle;
- acpi_handle phandle = h;
-
- do {
- handle = phandle;
- status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
- if (ACPI_SUCCESS(status))
- return pxm;
- status = acpi_get_parent(handle, &phandle);
- } while (ACPI_SUCCESS(status));
- return -1;
-}
-
-int acpi_get_node(acpi_handle *handle)
-{
- int pxm, node = -1;
-
- pxm = acpi_get_pxm(handle);
- if (pxm >= 0)
- node = acpi_map_pxm_to_node(pxm);
-
- return node;
-}
-EXPORT_SYMBOL(acpi_get_node);
-#endif /* XEN */
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/cmdline.c
--- a/xen/arch/ia64/linux-xen/cmdline.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,131 +0,0 @@
-/*
- * linux/lib/cmdline.c
- * Helper functions generally used for parsing kernel command line
- * and module options.
- *
- * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- *
- * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#ifdef XEN
-#include <xen/lib.h>
-#endif
-
-
-/**
- * get_option - Parse integer from an option string
- * @str: option string
- * @pint: (output) integer value parsed from @str
- *
- * Read an int from an option string; if available accept a subsequent
- * comma as well.
- *
- * Return values:
- * 0 : no int in string
- * 1 : int found, no subsequent comma
- * 2 : int found including a subsequent comma
- */
-
-int get_option (char **str, int *pint)
-{
- char *cur = *str;
-
- if (!cur || !(*cur))
- return 0;
-#ifndef XEN
- *pint = simple_strtol (cur, str, 0);
-#else
- *pint = simple_strtol (cur, (const char**)str, 0);
-#endif
- if (cur == *str)
- return 0;
- if (**str == ',') {
- (*str)++;
- return 2;
- }
-
- return 1;
-}
-
-/**
- * get_options - Parse a string into a list of integers
- * @str: String to be parsed
- * @nints: size of integer array
- * @ints: integer array
- *
- * This function parses a string containing a comma-separated
- * list of integers. The parse halts when the array is
- * full, or when no more numbers can be retrieved from the
- * string.
- *
- * Return value is the character in the string which caused
- * the parse to end (typically a null terminator, if @str is
- * completely parseable).
- */
-
-char *get_options(const char *str, int nints, int *ints)
-{
- int res, i = 1;
-
- while (i < nints) {
- res = get_option ((char **)&str, ints + i);
- if (res == 0)
- break;
- i++;
- if (res == 1)
- break;
- }
- ints[0] = i - 1;
- return (char *)str;
-}
-
-/**
- * memparse - parse a string with mem suffixes into a number
- * @ptr: Where parse begins
- * @retptr: (output) Pointer to next char after parse completes
- *
- * Parses a string into a number. The number stored at @ptr is
- * potentially suffixed with %K (for kilobytes, or 1024 bytes),
- * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
- * 1073741824). If the number is suffixed with K, M, or G, then
- * the return value is the number multiplied by one kilobyte, one
- * megabyte, or one gigabyte, respectively.
- */
-
-unsigned long long memparse (char *ptr, char **retptr)
-{
-#ifndef XEN
- unsigned long long ret = simple_strtoull (ptr, retptr, 0);
-#else
- unsigned long long ret = simple_strtoull (ptr, (const char**)retptr, 0);
-#endif
-
- switch (**retptr) {
- case 'G':
- case 'g':
- ret <<= 10;
- case 'M':
- case 'm':
- ret <<= 10;
- case 'K':
- case 'k':
- ret <<= 10;
- (*retptr)++;
- default:
- break;
- }
- return ret;
-}
-
-
-EXPORT_SYMBOL(memparse);
-EXPORT_SYMBOL(get_option);
-EXPORT_SYMBOL(get_options);
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/efi.c
--- a/xen/arch/ia64/linux-xen/efi.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1334 +0,0 @@
-/*
- * Extensible Firmware Interface
- *
- * Based on Extensible Firmware Interface Specification version 0.9 April 30,
1999
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 1999-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Stephane Eranian <eranian@xxxxxxxxxx>
- * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@xxxxxx>
- *
- * All EFI Runtime Services are not implemented yet as EFI only
- * supports physical mode addressing on SoftSDV. This is to be fixed
- * in a future version. --drummond 1999-07-20
- *
- * Implemented EFI runtime services and virtual mode calls. --davidm
- *
- * Goutham Rao: <goutham.rao@xxxxxxxxx>
- * Skip non-WB memory and ignore empty memory ranges.
- */
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/efi.h>
-#include <linux/kexec.h>
-
-#include <asm/io.h>
-#include <asm/kregs.h>
-#include <asm/meminit.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/mca.h>
-
-#define EFI_DEBUG 0
-
-extern efi_status_t efi_call_phys (void *, ...);
-#ifdef XEN
-/* this should be defined in linux/kernel.h */
-extern unsigned long long memparse (char *ptr, char **retptr);
-/* this should be defined in linux/efi.h */
-//#define EFI_INVALID_TABLE_ADDR (void *)(~0UL)
-#endif
-
-struct efi efi;
-EXPORT_SYMBOL(efi);
-static efi_runtime_services_t *runtime;
-#if defined(XEN) && !defined(CONFIG_VIRTUAL_FRAME_TABLE)
-// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
-static unsigned long mem_limit = ~0UL, max_addr = 0x100000000UL, min_addr =
0UL;
-#else
-static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
-#endif
-
-#define efi_call_virt(f, args...) (*(f))(args)
-
-#define STUB_GET_TIME(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)
\
-{
\
- struct ia64_fpreg fr[6];
\
- efi_time_cap_t *atc = NULL;
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- if (tc)
\
- atc = adjust_arg(tc);
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time),
adjust_arg(tm), atc); \
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_SET_TIME(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_set_time (efi_time_t *tm)
\
-{
\
- struct ia64_fpreg fr[6];
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time),
adjust_arg(tm)); \
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t
*tm) \
-{
\
- struct ia64_fpreg fr[6];
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_get_wakeup_time_t *)
__va(runtime->get_wakeup_time), \
- adjust_arg(enabled), adjust_arg(pending),
adjust_arg(tm)); \
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)
\
-{
\
- struct ia64_fpreg fr[6];
\
- efi_time_t *atm = NULL;
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- if (tm)
\
- atm = adjust_arg(tm);
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_set_wakeup_time_t *)
__va(runtime->set_wakeup_time), \
- enabled, atm);
\
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_GET_VARIABLE(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
\
- unsigned long *data_size, void *data)
\
-{
\
- struct ia64_fpreg fr[6];
\
- u32 *aattr = NULL;
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- if (attr)
\
- aattr = adjust_arg(attr);
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_get_variable_t *)
__va(runtime->get_variable), \
- adjust_arg(name), adjust_arg(vendor), aattr,
\
- adjust_arg(data_size), adjust_arg(data));
\
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name,
efi_guid_t *vendor) \
-{
\
- struct ia64_fpreg fr[6];
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_get_next_variable_t *)
__va(runtime->get_next_variable), \
- adjust_arg(name_size), adjust_arg(name),
adjust_arg(vendor)); \
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_SET_VARIABLE(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long
attr, \
- unsigned long data_size, void *data)
\
-{
\
- struct ia64_fpreg fr[6];
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_set_variable_t *)
__va(runtime->set_variable), \
- adjust_arg(name), adjust_arg(vendor), attr,
data_size, \
- adjust_arg(data));
\
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)
\
-static efi_status_t
\
-prefix##_get_next_high_mono_count (u32 *count)
\
-{
\
- struct ia64_fpreg fr[6];
\
- efi_status_t ret;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)
\
- __va(runtime->get_next_high_mono_count),
adjust_arg(count)); \
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
- return ret;
\
-}
-
-#define STUB_RESET_SYSTEM(prefix, adjust_arg)
\
-static void
\
-prefix##_reset_system (int reset_type, efi_status_t status,
\
- unsigned long data_size, efi_char16_t *data)
\
-{
\
- struct ia64_fpreg fr[6];
\
- efi_char16_t *adata = NULL;
\
- XEN_EFI_RR_DECLARE(rr6, rr7);
\
-
\
- if (data)
\
- adata = adjust_arg(data);
\
-
\
- ia64_save_scratch_fpregs(fr);
\
- XEN_EFI_RR_ENTER(rr6, rr7);
\
- efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),
\
- reset_type, status, data_size, adata);
\
- /* should not return, but just in case... */
\
- XEN_EFI_RR_LEAVE(rr6, rr7);
\
- ia64_load_scratch_fpregs(fr);
\
-}
-
-#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
-
-STUB_GET_TIME(phys, phys_ptr)
-STUB_SET_TIME(phys, phys_ptr)
-STUB_GET_WAKEUP_TIME(phys, phys_ptr)
-STUB_SET_WAKEUP_TIME(phys, phys_ptr)
-STUB_GET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
-STUB_SET_VARIABLE(phys, phys_ptr)
-STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
-STUB_RESET_SYSTEM(phys, phys_ptr)
-
-#define id(arg) arg
-
-STUB_GET_TIME(virt, id)
-STUB_SET_TIME(virt, id)
-STUB_GET_WAKEUP_TIME(virt, id)
-STUB_SET_WAKEUP_TIME(virt, id)
-STUB_GET_VARIABLE(virt, id)
-STUB_GET_NEXT_VARIABLE(virt, id)
-STUB_SET_VARIABLE(virt, id)
-STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
-STUB_RESET_SYSTEM(virt, id)
-
-#ifndef XEN
-void
-efi_gettimeofday (struct timespec *ts)
-{
- efi_time_t tm;
-
- memset(ts, 0, sizeof(ts));
- if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
- return;
-
- ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute,
tm.second);
- ts->tv_nsec = tm.nanosecond;
-}
-#endif
-
-static int
-is_memory_available (efi_memory_desc_t *md)
-{
- if (!(md->attribute & EFI_MEMORY_WB))
- return 0;
-
- switch (md->type) {
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_CONVENTIONAL_MEMORY:
- return 1;
- }
- return 0;
-}
-
-typedef struct kern_memdesc {
- u64 attribute;
- u64 start;
- u64 num_pages;
-} kern_memdesc_t;
-
-static kern_memdesc_t *kern_memmap;
-
-#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
-
-static inline u64
-kmd_end(kern_memdesc_t *kmd)
-{
- return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
-}
-
-static inline u64
-efi_md_end(efi_memory_desc_t *md)
-{
- return (md->phys_addr + efi_md_size(md));
-}
-
-static inline int
-efi_wb(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_WB);
-}
-
-static inline int
-efi_uc(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_UC);
-}
-
-static void
-walk (efi_freemem_callback_t callback, void *arg, u64 attr)
-{
- kern_memdesc_t *k;
- u64 start, end, voff;
-
- voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
- for (k = kern_memmap; k->start != ~0UL; k++) {
- if (k->attribute != attr)
- continue;
- start = PAGE_ALIGN(k->start);
- end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
- if (start < end)
- if ((*callback)(start + voff, end + voff, arg) < 0)
- return;
- }
-}
-
-/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory
descriptor that
- * has memory that is available for OS use.
- */
-void
-efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
-{
- walk(callback, arg, EFI_MEMORY_WB);
-}
-
-/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory
descriptor that
- * has memory that is available for uncached allocator.
- */
-void
-efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
-{
- walk(callback, arg, EFI_MEMORY_UC);
-}
-
-/*
- * Look for the PAL_CODE region reported by EFI and maps it using an
- * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
- * Abstraction Layer chapter 11 in ADAG
- */
-
-#ifdef XEN
-static void *
-__efi_get_pal_addr (void)
-#else
-void *
-efi_get_pal_addr (void)
-#endif
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- int pal_code_count = 0;
- u64 vaddr, mask;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->type != EFI_PAL_CODE)
- continue;
-
- if (++pal_code_count > 1) {
- printk(KERN_ERR "Too many EFI Pal Code memory ranges,
dropped @ %lx\n",
- md->phys_addr);
- continue;
- }
- /*
- * The only ITLB entry in region 7 that is used is the one
installed by
- * __start(). That entry covers a 64MB range.
- */
- mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
- vaddr = PAGE_OFFSET + md->phys_addr;
-
- /*
- * We must check that the PAL mapping won't overlap with the
kernel
- * mapping.
- *
- * PAL code is guaranteed to be aligned on a power of 2 between
4k and
- * 256KB and that only one ITR is needed to map it. This
implies that the
- * PAL code is always aligned on its size, i.e., the closest
matching page
- * size supported by the TLB. Therefore PAL code is guaranteed
never to
- * cross a 64MB unless it is bigger than 64MB (very unlikely!).
So for
- * now the following test is enough to determine whether or not
we need a
- * dedicated ITR for the PAL code.
- */
- if ((vaddr & mask) == (KERNEL_START & mask)) {
- printk(KERN_INFO "%s: no need to install ITR for PAL
code\n",
- __FUNCTION__);
- continue;
- }
-
- if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
- panic("Woah! PAL code size bigger than a granule!");
-
-#if EFI_DEBUG
- mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
-
- printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into
[0x%lx-0x%lx)\n",
- smp_processor_id(), md->phys_addr,
- md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
- vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
-#endif
- return __va_efi(md->phys_addr);
- }
- printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
- __FUNCTION__);
- return NULL;
-}
-
-#ifdef XEN
-static void *pal_vaddr = 0;
-
-void *
-efi_get_pal_addr(void)
-{
- if (!pal_vaddr)
- pal_vaddr = __efi_get_pal_addr();
- return pal_vaddr;
-}
-#endif
-
-#ifdef XEN
-static void
-__efi_unmap_pal_code (void *pal_vaddr)
-{
- ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
- IA64_GRANULE_SHIFT);
-}
-
-void
-efi_unmap_pal_code (void)
-{
- void *pal_vaddr = efi_get_pal_addr ();
- u64 psr;
-
- if (!pal_vaddr)
- return;
-
- /*
- * Cannot write to CRx with PSR.ic=1
- */
- psr = ia64_clear_ic();
- __efi_unmap_pal_code(pal_vaddr);
- ia64_set_psr(psr); /* restore psr */
- ia64_srlz_i();
-}
-#endif
-
-void
-efi_map_pal_code (void)
-{
- void *pal_vaddr = efi_get_pal_addr ();
- u64 psr;
-
- if (!pal_vaddr)
- return;
-
- /*
- * Cannot write to CRx with PSR.ic=1
- */
- psr = ia64_clear_ic();
-#ifdef XEN
- /* pal_vaddr must be unpinned before pinning
- * This is needed in the case of a nested EFI, PAL or SAL call */
- __efi_unmap_pal_code(pal_vaddr);
-#endif
- ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long)
pal_vaddr),
- pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
- IA64_GRANULE_SHIFT);
- ia64_set_psr(psr); /* restore psr */
- ia64_srlz_i();
-}
-
-void __init
-efi_init (void)
-{
- void *efi_map_start, *efi_map_end;
- efi_config_table_t *config_tables;
- efi_char16_t *c16;
- u64 efi_desc_size;
- char *cp, vendor[100] = "unknown";
- int i;
-
- /* it's too early to be able to use the standard kernel command line
support... */
-#ifdef XEN
- extern char saved_command_line[];
- for (cp = saved_command_line; *cp; ) {
-#else
- for (cp = boot_command_line; *cp; ) {
-#endif
- if (memcmp(cp, "mem=", 4) == 0) {
- mem_limit = memparse(cp + 4, &cp);
- } else if (memcmp(cp, "max_addr=", 9) == 0) {
- max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
- } else if (memcmp(cp, "min_addr=", 9) == 0) {
- min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
- } else {
- while (*cp != ' ' && *cp)
- ++cp;
- while (*cp == ' ')
- ++cp;
- }
- }
- if (min_addr != 0UL)
- printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >>
20);
- if (max_addr != ~0UL)
- printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >>
20);
-
- efi.systab = __va(ia64_boot_param->efi_systab);
-
- /*
- * Verify the EFI Table
- */
- if (efi.systab == NULL)
- panic("Woah! Can't find EFI system table.\n");
- if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- panic("Woah! EFI system table signature incorrect\n");
- if ((efi.systab->hdr.revision >> 16) == 0)
- printk(KERN_WARNING "Warning: EFI system table version "
- "%d.%02d, expected 1.00 or greater\n",
- efi.systab->hdr.revision >> 16,
- efi.systab->hdr.revision & 0xffff);
-
- config_tables = __va(efi.systab->tables);
-
- /* Show what we know for posterity */
- c16 = __va(efi.systab->fw_vendor);
- if (c16) {
- for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
- vendor[i] = *c16++;
- vendor[i] = '\0';
- }
-
- printk(KERN_INFO "EFI v%u.%.02u by %s:",
- efi.systab->hdr.revision >> 16, efi.systab->hdr.revision &
0xffff, vendor);
-
- efi.mps = EFI_INVALID_TABLE_ADDR;
- efi.acpi = EFI_INVALID_TABLE_ADDR;
- efi.acpi20 = EFI_INVALID_TABLE_ADDR;
- efi.smbios = EFI_INVALID_TABLE_ADDR;
- efi.sal_systab = EFI_INVALID_TABLE_ADDR;
- efi.boot_info = EFI_INVALID_TABLE_ADDR;
- efi.hcdp = EFI_INVALID_TABLE_ADDR;
- efi.uga = EFI_INVALID_TABLE_ADDR;
-
- for (i = 0; i < (int) efi.systab->nr_tables; i++) {
- if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
- efi.mps = config_tables[i].table;
- printk(" MPS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid,
ACPI_20_TABLE_GUID) == 0) {
- efi.acpi20 = config_tables[i].table;
- printk(" ACPI 2.0=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID)
== 0) {
- efi.acpi = config_tables[i].table;
- printk(" ACPI=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid,
SMBIOS_TABLE_GUID) == 0) {
- efi.smbios = config_tables[i].table;
- printk(" SMBIOS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid,
SAL_SYSTEM_TABLE_GUID) == 0) {
- efi.sal_systab = config_tables[i].table;
- printk(" SALsystab=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID)
== 0) {
- efi.hcdp = config_tables[i].table;
- printk(" HCDP=0x%lx", config_tables[i].table);
- }
- }
- printk("\n");
-
- runtime = __va(efi.systab->runtime);
- efi.get_time = phys_get_time;
- efi.set_time = phys_set_time;
- efi.get_wakeup_time = phys_get_wakeup_time;
- efi.set_wakeup_time = phys_set_wakeup_time;
- efi.get_variable = phys_get_variable;
- efi.get_next_variable = phys_get_next_variable;
- efi.set_variable = phys_set_variable;
- efi.get_next_high_mono_count = phys_get_next_high_mono_count;
- efi.reset_system = phys_reset_system;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-#if EFI_DEBUG
- /* print EFI memory map: */
- {
- efi_memory_desc_t *md;
- void *p;
-
- for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p +=
efi_desc_size) {
- md = p;
- printk("mem%02u: type=%u, attr=0x%lx,
range=[0x%016lx-0x%016lx) (%luMB)\n",
- i, md->type, md->attribute, md->phys_addr,
- md->phys_addr + (md->num_pages <<
EFI_PAGE_SHIFT),
- md->num_pages >> (20 - EFI_PAGE_SHIFT));
- }
- }
-#endif
-
-#ifndef XEN
- efi_map_pal_code();
-#endif
- efi_enter_virtual_mode();
-}
-
-void
-efi_enter_virtual_mode (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- efi_status_t status;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-#ifdef XEN
- if (md->attribute & EFI_MEMORY_RUNTIME) {
- if (md->attribute & EFI_MEMORY_WB)
- md->virt_addr = __IA64_EFI_CACHED_OFFSET|
- md->phys_addr;
- else if (md->attribute & (EFI_MEMORY_UC|EFI_MEMORY_WC|
- EFI_MEMORY_WT))
- md->virt_addr = __IA64_EFI_UNCACHED_OFFSET|
- md->phys_addr;
- }
-#else
- if (md->attribute & EFI_MEMORY_RUNTIME) {
- /*
- * Some descriptors have multiple bits set, so the
order of
- * the tests is relevant.
- */
- if (md->attribute & EFI_MEMORY_WB) {
- md->virt_addr = (u64) __va(md->phys_addr);
- } else if (md->attribute & EFI_MEMORY_UC) {
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
- } else if (md->attribute & EFI_MEMORY_WC) {
-#if 0
- md->virt_addr = ia64_remap(md->phys_addr,
(_PAGE_A | _PAGE_P
- |
_PAGE_D
- |
_PAGE_MA_WC
- |
_PAGE_PL_0
- |
_PAGE_AR_RW));
-#else
- printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
- } else if (md->attribute & EFI_MEMORY_WT) {
-#if 0
- md->virt_addr = ia64_remap(md->phys_addr,
(_PAGE_A | _PAGE_P
- |
_PAGE_D | _PAGE_MA_WT
- |
_PAGE_PL_0
- |
_PAGE_AR_RW));
-#else
- printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
- md->virt_addr = (u64) ioremap(md->phys_addr, 0);
-#endif
- }
- }
-#endif
- }
-
- status = efi_call_phys(__va(runtime->set_virtual_address_map),
- ia64_boot_param->efi_memmap_size,
- efi_desc_size,
ia64_boot_param->efi_memdesc_version,
- ia64_boot_param->efi_memmap);
- if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "warning: unable to switch EFI into virtual
mode "
- "(status=%lu)\n", status);
- return;
- }
-
- /*
- * Now that EFI is in virtual mode, we call the EFI functions more
efficiently:
- */
- efi.get_time = virt_get_time;
- efi.set_time = virt_set_time;
- efi.get_wakeup_time = virt_get_wakeup_time;
- efi.set_wakeup_time = virt_set_wakeup_time;
- efi.get_variable = virt_get_variable;
- efi.get_next_variable = virt_get_next_variable;
- efi.set_variable = virt_set_variable;
- efi.get_next_high_mono_count = virt_get_next_high_mono_count;
- efi.reset_system = virt_reset_system;
-}
-
-/*
- * Walk the EFI memory map looking for the I/O port range. There can only be
one entry of
- * this type, other I/O port ranges should be described via ACPI.
- */
-u64
-efi_get_iobase (void)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
- if (md->attribute & EFI_MEMORY_UC)
- return md->phys_addr;
- }
- }
- return 0;
-}
-
-static struct kern_memdesc *
-kern_memory_descriptor (unsigned long phys_addr)
-{
- struct kern_memdesc *md;
-
- for (md = kern_memmap; md->start != ~0UL; md++) {
- if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
- return md;
- }
- return NULL;
-}
-
-static efi_memory_desc_t *
-efi_memory_descriptor (unsigned long phys_addr)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (phys_addr - md->phys_addr < (md->num_pages <<
EFI_PAGE_SHIFT))
- return md;
- }
- return NULL;
-}
-
-u32
-efi_mem_type (unsigned long phys_addr)
-{
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
-
- if (md)
- return md->type;
- return 0;
-}
-
-u64
-efi_mem_attributes (unsigned long phys_addr)
-{
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
-
- if (md)
- return md->attribute;
- return 0;
-}
-EXPORT_SYMBOL(efi_mem_attributes);
-
-u64
-efi_mem_attribute (unsigned long phys_addr, unsigned long size)
-{
- unsigned long end = phys_addr + size;
- efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
- u64 attr;
-
- if (!md)
- return 0;
-
- /*
- * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
- * the kernel that firmware needs this region mapped.
- */
- attr = md->attribute & ~EFI_MEMORY_RUNTIME;
- do {
- unsigned long md_end = efi_md_end(md);
-
- if (end <= md_end)
- return attr;
-
- md = efi_memory_descriptor(md_end);
- if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
- return 0;
- } while (md);
- return 0;
-}
-
-u64
-kern_mem_attribute (unsigned long phys_addr, unsigned long size)
-{
- unsigned long end = phys_addr + size;
- struct kern_memdesc *md;
- u64 attr;
-
- /*
- * This is a hack for ioremap calls before we set up kern_memmap.
- * Maybe we should do efi_memmap_init() earlier instead.
- */
- if (!kern_memmap) {
- attr = efi_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB)
- return EFI_MEMORY_WB;
- return 0;
- }
-
- md = kern_memory_descriptor(phys_addr);
- if (!md)
- return 0;
-
- attr = md->attribute;
- do {
- unsigned long md_end = kmd_end(md);
-
- if (end <= md_end)
- return attr;
-
- md = kern_memory_descriptor(md_end);
- if (!md || md->attribute != attr)
- return 0;
- } while (md);
- return 0;
-}
-EXPORT_SYMBOL(kern_mem_attribute);
-
-#ifndef XEN
-int
-valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
-{
- u64 attr;
-
- /*
- * /dev/mem reads and writes use copy_to_user(), which implicitly
- * uses a granule-sized kernel identity mapping. It's really
- * only safe to do this for regions in kern_memmap. For more
- * details, see Documentation/ia64/aliasing.txt.
- */
- attr = kern_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
- return 1;
- return 0;
-}
-
-int
-valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
-{
- /*
- * MMIO regions are often missing from the EFI memory map.
- * We must allow mmap of them for programs like X, so we
- * currently can't do any useful validation.
- */
- return 1;
-}
-
-pgprot_t
-phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
- pgprot_t vma_prot)
-{
- unsigned long phys_addr = pfn << PAGE_SHIFT;
- u64 attr;
-
- /*
- * For /dev/mem mmap, we use user mappings, but if the region is
- * in kern_memmap (and hence may be covered by a kernel mapping),
- * we must use the same attribute as the kernel mapping.
- */
- attr = kern_mem_attribute(phys_addr, size);
- if (attr & EFI_MEMORY_WB)
- return pgprot_cacheable(vma_prot);
- else if (attr & EFI_MEMORY_UC)
- return pgprot_noncached(vma_prot);
-
- /*
- * Some chipsets don't support UC access to memory. If
- * WB is supported, we prefer that.
- */
- if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
- return pgprot_cacheable(vma_prot);
-
- return pgprot_noncached(vma_prot);
-}
-#endif
-
-int __init
-efi_uart_console_only(void)
-{
- efi_status_t status;
- char *s, name[] = "ConOut";
- efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
- efi_char16_t *utf16, name_utf16[32];
- unsigned char data[1024];
- unsigned long size = sizeof(data);
- struct efi_generic_dev_path *hdr, *end_addr;
- int uart = 0;
-
- /* Convert to UTF-16 */
- utf16 = name_utf16;
- s = name;
- while (*s)
- *utf16++ = *s++ & 0x7f;
- *utf16 = 0;
-
- status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
- if (status != EFI_SUCCESS) {
- printk(KERN_ERR "No EFI %s variable?\n", name);
- return 0;
- }
-
- hdr = (struct efi_generic_dev_path *) data;
- end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
- while (hdr < end_addr) {
- if (hdr->type == EFI_DEV_MSG &&
- hdr->sub_type == EFI_DEV_MSG_UART)
- uart = 1;
- else if (hdr->type == EFI_DEV_END_PATH ||
- hdr->type == EFI_DEV_END_PATH2) {
- if (!uart)
- return 0;
- if (hdr->sub_type == EFI_DEV_END_ENTIRE)
- return 1;
- uart = 0;
- }
- hdr = (struct efi_generic_dev_path *) ((u8 *) hdr +
hdr->length);
- }
- printk(KERN_ERR "Malformed %s value\n", name);
- return 0;
-}
-
-/*
- * Look for the first granule aligned memory descriptor memory
- * that is big enough to hold EFI memory map. Make sure this
- * descriptor is atleast granule sized so it does not get trimmed
- */
-struct kern_memdesc *
-find_memmap_space (void)
-{
- u64 contig_low=0, contig_high=0;
- u64 as = 0, ae;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *pmd = NULL, *check_md;
- u64 space_needed, efi_desc_size;
- unsigned long total_mem = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- /*
- * Worst case: we need 3 kernel descriptors for each efi descriptor
- * (if every entry has a WB part in the middle, and UC head and tail),
- * plus one for the end marker.
- */
- space_needed = sizeof(kern_memdesc_t) *
- (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
-
- for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
- md = p;
- if (!efi_wb(md)) {
- continue;
- }
- if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) !=
md->phys_addr) {
- contig_low = GRANULEROUNDUP(md->phys_addr);
- contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end; q +=
efi_desc_size) {
- check_md = q;
- if (!efi_wb(check_md))
- break;
- if (contig_high != check_md->phys_addr)
- break;
- contig_high = efi_md_end(check_md);
- }
- contig_high = GRANULEROUNDDOWN(contig_high);
- }
- if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
- continue;
-
- /* Round ends inward to granule boundaries */
- as = max(contig_low, md->phys_addr);
- ae = min(contig_high, efi_md_end(md));
-
- /* keep within max_addr= and min_addr= command line arg */
- as = max(as, min_addr);
- ae = min(ae, max_addr);
- if (ae <= as)
- continue;
-
- /* avoid going over mem= command line arg */
- if (total_mem + (ae - as) > mem_limit)
- ae -= total_mem + (ae - as) - mem_limit;
-
- if (ae <= as)
- continue;
-
- if (ae - as > space_needed)
- break;
- }
- if (p >= efi_map_end)
- panic("Can't allocate space for kernel memory descriptors");
-
- return __va(as);
-}
-
-/*
- * Walk the EFI memory map and gather all memory available for kernel
- * to use. We can allocate partial granules only if the unavailable
- * parts exist, and are WB.
- */
-void
-efi_memmap_init(unsigned long *s, unsigned long *e)
-{
- struct kern_memdesc *k, *prev = NULL;
- u64 contig_low=0, contig_high=0;
- u64 as, ae, lim;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *pmd = NULL, *check_md;
- u64 efi_desc_size;
- unsigned long total_mem = 0;
-
- k = kern_memmap = find_memmap_space();
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
- md = p;
- if (!efi_wb(md)) {
- if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY
||
- md->type == EFI_BOOT_SERVICES_DATA))
{
- k->attribute = EFI_MEMORY_UC;
- k->start = md->phys_addr;
- k->num_pages = md->num_pages;
- k++;
- }
- continue;
- }
-#ifdef XEN
- /* this works around a problem in the ski bootloader */
- if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-#endif
- if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) !=
md->phys_addr) {
- contig_low = GRANULEROUNDUP(md->phys_addr);
- contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end; q +=
efi_desc_size) {
- check_md = q;
- if (!efi_wb(check_md))
- break;
- if (contig_high != check_md->phys_addr)
- break;
- contig_high = efi_md_end(check_md);
- }
- contig_high = GRANULEROUNDDOWN(contig_high);
- }
- if (!is_memory_available(md))
- continue;
-
-#ifdef CONFIG_CRASH_DUMP
- /* saved_max_pfn should ignore max_addr= command line arg */
- if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
- saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
-#endif
- /*
- * Round ends inward to granule boundaries
- * Give trimmings to uncached allocator
- */
- if (md->phys_addr < contig_low) {
- lim = min(efi_md_end(md), contig_low);
- if (efi_uc(md)) {
- if (k > kern_memmap && (k-1)->attribute ==
EFI_MEMORY_UC &&
- kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages += (lim -
md->phys_addr) >> EFI_PAGE_SHIFT;
- } else {
- k->attribute = EFI_MEMORY_UC;
- k->start = md->phys_addr;
- k->num_pages = (lim - md->phys_addr) >>
EFI_PAGE_SHIFT;
- k++;
- }
- }
- as = contig_low;
- } else
- as = md->phys_addr;
-
- if (efi_md_end(md) > contig_high) {
- lim = max(md->phys_addr, contig_high);
- if (efi_uc(md)) {
- if (lim == md->phys_addr && k > kern_memmap &&
- (k-1)->attribute == EFI_MEMORY_UC &&
- kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages += md->num_pages;
- } else {
- k->attribute = EFI_MEMORY_UC;
- k->start = lim;
- k->num_pages = (efi_md_end(md) - lim)
>> EFI_PAGE_SHIFT;
- k++;
- }
- }
- ae = contig_high;
- } else
- ae = efi_md_end(md);
-
- /* keep within max_addr= and min_addr= command line arg */
- as = max(as, min_addr);
- ae = min(ae, max_addr);
- if (ae <= as)
- continue;
-
- /* avoid going over mem= command line arg */
- if (total_mem + (ae - as) > mem_limit)
- ae -= total_mem + (ae - as) - mem_limit;
-
- if (ae <= as)
- continue;
- if (prev && kmd_end(prev) == md->phys_addr) {
- prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
- total_mem += ae - as;
- continue;
- }
- k->attribute = EFI_MEMORY_WB;
- k->start = as;
- k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
- total_mem += ae - as;
- prev = k++;
- }
- k->start = ~0L; /* end-marker */
-
- /* reserve the memory we are using for kern_memmap */
- *s = (u64)kern_memmap;
- *e = (u64)++k;
-}
-
-#ifndef XEN
-void
-efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource)
-{
- struct resource *res;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- char *name;
- unsigned long flags;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- res = NULL;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (md->num_pages == 0) /* should not happen */
- continue;
-
- flags = IORESOURCE_MEM;
- switch (md->type) {
-
- case EFI_MEMORY_MAPPED_IO:
- case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
- continue;
-
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_CONVENTIONAL_MEMORY:
- if (md->attribute & EFI_MEMORY_WP) {
- name = "System ROM";
- flags |= IORESOURCE_READONLY;
- } else {
- name = "System RAM";
- }
- break;
-
- case EFI_ACPI_MEMORY_NVS:
- name = "ACPI Non-volatile Storage";
- flags |= IORESOURCE_BUSY;
- break;
-
- case EFI_UNUSABLE_MEMORY:
- name = "reserved";
- flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
- break;
-
- case EFI_RESERVED_TYPE:
- case EFI_RUNTIME_SERVICES_CODE:
- case EFI_RUNTIME_SERVICES_DATA:
- case EFI_ACPI_RECLAIM_MEMORY:
- default:
- name = "reserved";
- flags |= IORESOURCE_BUSY;
- break;
- }
-
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) ==
NULL) {
- printk(KERN_ERR "failed to alocate resource for
iomem\n");
- return;
- }
-
- res->name = name;
- res->start = md->phys_addr;
- res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) -
1;
- res->flags = flags;
-
- if (insert_resource(&iomem_resource, res) < 0)
- kfree(res);
- else {
- /*
- * We don't know which region contains
- * kernel data so we try it repeatedly and
- * let the resource manager test it.
- */
- insert_resource(res, code_resource);
- insert_resource(res, data_resource);
-#ifdef CONFIG_KEXEC
- insert_resource(res, &efi_memmap_res);
- insert_resource(res, &boot_param_res);
- if (crashk_res.end > crashk_res.start)
- insert_resource(res, &crashk_res);
-#endif
- }
- }
-}
-#endif /* XEN */
-
-#if defined(CONFIG_KEXEC) || defined(XEN)
-/* find a block of memory aligned to 64M exclude reserved regions
- rsvd_regions are sorted
- */
-unsigned long __init
-kdump_find_rsvd_region (unsigned long size,
- struct rsvd_region *r, int n)
-{
- int i;
- u64 start, end;
- u64 alignment = 1UL << _PAGE_SIZE_64M;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (!efi_wb(md))
- continue;
- start = ALIGN(md->phys_addr, alignment);
- end = efi_md_end(md);
- for (i = 0; i < n; i++) {
- if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
- if (__pa(r[i].start) > start + size)
- return start;
- start = ALIGN(__pa(r[i].end), alignment);
- if (i < n-1 && __pa(r[i+1].start) < start + size)
- continue;
- else
- break;
- }
- }
- if (end > start + size)
- return start;
- }
-
- printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
- size);
- return ~0UL;
-}
-#endif
-
-#ifndef XEN
-#ifdef CONFIG_PROC_VMCORE
-/* locate the size find a the descriptor at a certain address */
-unsigned long
-vmcore_find_descriptor_size (unsigned long address)
-{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
- unsigned long ret = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (efi_wb(md) && md->type == EFI_LOADER_DATA
- && md->phys_addr == address) {
- ret = efi_md_size(md);
- break;
- }
- }
-
- if (ret == 0)
- printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
-
- return ret;
-}
-#endif
-#endif /* XEN */
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/entry.S
--- a/xen/arch/ia64/linux-xen/entry.S Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1851 +0,0 @@
-/*
- * ia64/kernel/entry.S
- *
- * Kernel entry points.
- *
- * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999, 2002-2003
- * Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
- * Don Dugger <Don.Dugger@xxxxxxxxx>
- * Suresh Siddha <suresh.b.siddha@xxxxxxxxx>
- * Fenghua Yu <fenghua.yu@xxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- */
-/*
- * ia64_switch_to now places correct virtual mapping in in TR2 for
- * kernel stack. This allows us to handle interrupts without changing
- * to physical mode.
- *
- * Jonathan Nicklin <nicklin@xxxxxxxxxxxxxxxxxxxxxxxx>
- * Patrick O'Rourke <orourke@xxxxxxxxxxxxxxxxxxxxxxxx>
- * 11/07/2000
- */
-/*
- * Global (preserved) predicate usage on syscall entry/exit path:
- *
- * pKStk: See entry.h.
- * pUStk: See entry.h.
- * pSys: See entry.h.
- * pNonSys: !pSys
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/cache.h>
-#ifdef XEN
-#include <xen/errno.h>
-#else
-#include <asm/errno.h>
-#endif
-#include <asm/kregs.h>
-#include <asm/offsets.h>
-#include <asm/pgtable.h>
-#include <asm/percpu.h>
-#include <asm/processor.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-
-#include "minstate.h"
-
-#ifndef XEN
- /*
- * execve() is special because in case of success, we need to
- * setup a null register window frame.
- */
-ENTRY(ia64_execve)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,4,0
- mov loc0=rp
- .body
- mov out0=in0 // filename
- ;; // stop bit between alloc and call
- mov out1=in1 // argv
- mov out2=in2 // envp
- add out3=16,sp // regs
- br.call.sptk.many rp=sys_execve
-.ret0:
-#ifdef CONFIG_IA32_SUPPORT
- /*
- * Check if we're returning to ia32 mode. If so, we need to restore
ia32 registers
- * from pt_regs.
- */
- adds r16=PT(CR_IPSR)+16,sp
- ;;
- ld8 r16=[r16]
-#endif
- cmp4.ge p6,p7=r8,r0
- mov ar.pfs=loc1 // restore ar.pfs
- sxt4 r8=r8 // return 64-bit result
- ;;
- stf.spill [sp]=f0
-(p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in
user-mode...
- mov rp=loc0
-(p6) mov ar.pfs=r0 // clear ar.pfs on success
-(p7) br.ret.sptk.many rp
-
- /*
- * In theory, we'd have to zap this state only to prevent leaking of
- * security sensitive state (e.g., if current->mm->dumpable is zero).
However,
- * this executes in less than 20 cycles even on Itanium, so it's not
worth
- * optimizing for...).
- */
- mov ar.unat=0; mov ar.lc=0
- mov r4=0; mov f2=f0; mov b1=r0
- mov r5=0; mov f3=f0; mov b2=r0
- mov r6=0; mov f4=f0; mov b3=r0
- mov r7=0; mov f5=f0; mov b4=r0
- ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
- ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
- ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
- ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
- ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
- ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
- ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
-#ifdef CONFIG_IA32_SUPPORT
- tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
- movl loc0=ia64_ret_from_ia32_execve
- ;;
-(p6) mov rp=loc0
-#endif
- br.ret.sptk.many rp
-END(ia64_execve)
-
-/*
- * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr,
u64 child_tidptr,
- * u64 tls)
- */
-GLOBAL_ENTRY(sys_clone2)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc r16=ar.pfs,8,2,6,0
- DO_SAVE_SWITCH_STACK
- adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
- mov loc0=rp
- mov loc1=r16 // save ar.pfs across do_fork
- .body
- mov out1=in1
- mov out3=in2
- tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
- mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
- ;;
-(p6) st8 [r2]=in5 // store TLS in r16 for
copy_thread()
- mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or
CLONE_CHILD_CLEARTID
- adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
- mov out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
-.ret1: .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(sys_clone2)
-
-/*
- * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr,
u64 tls)
- * Deprecated. Use sys_clone2() instead.
- */
-GLOBAL_ENTRY(sys_clone)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc r16=ar.pfs,8,2,6,0
- DO_SAVE_SWITCH_STACK
- adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
- mov loc0=rp
- mov loc1=r16 // save ar.pfs across do_fork
- .body
- mov out1=in1
- mov out3=16 // stacksize (compensates for
16-byte scratch area)
- tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
- mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
- ;;
-(p6) st8 [r2]=in4 // store TLS in r13 (tp)
- mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or
CLONE_CHILD_CLEARTID
- adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
- mov out0=in0 // out0 = clone_flags
- br.call.sptk.many rp=do_fork
-.ret2: .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(sys_clone)
-#endif
-
-/*
- * prev_task <- ia64_switch_to(struct task_struct *next)
- * With Ingo's new scheduler, interrupts are disabled when this routine
gets
- * called. The code starting at .map relies on this. The rest of the code
- * doesn't care about the interrupt masking status.
- */
-GLOBAL_ENTRY(ia64_switch_to)
- .prologue
- alloc r16=ar.pfs,1,0,0,0
- DO_SAVE_SWITCH_STACK
- .body
-
- adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-#ifdef XEN
- movl r24=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
- ld8 r27=[r24]
- adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
- dep r20=0,in0,60,4 // physical address of "next"
-#else
- movl r25=init_task
- mov r27=IA64_KR(CURRENT_STACK)
- adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
- dep r20=0,in0,61,3 // physical address of "next"
-#endif
- ;;
- st8 [r22]=sp // save kernel stack pointer of old task
- shr.u r26=r20,IA64_GRANULE_SHIFT
-#ifdef XEN
- ;;
- /*
- * If we've already mapped this task's page, we can skip doing it again.
- */
- cmp.eq p7,p6=r26,r27
-(p6) br.cond.dpnt .map
-#else
- cmp.eq p7,p6=r25,in0
- ;;
- /*
- * If we've already mapped this task's page, we can skip doing it again.
- */
-(p6) cmp.eq p7,p6=r26,r27
-(p6) br.cond.dpnt .map
-#endif
- ;;
-.done:
-(p6) ssm psr.ic // if we had to map, reenable the
psr.ic bit FIRST!!!
- ;;
-(p6) srlz.d
- ld8 sp=[r21] // load kernel stack pointer of new task
-#ifdef XEN
- add r25=IA64_KR_CURRENT_OFFSET-IA64_KR_CURRENT_STACK_OFFSET,r24
- ;;
- st8 [r25]=in0 // update "current" application register
- ;;
- bsw.0
- ;;
- mov r8=r13 // return pointer to previously running
task
- mov r13=in0 // set "current" pointer
- mov r21=in0
- ;;
- bsw.1
- ;;
-#else
- mov IA64_KR(CURRENT)=in0 // update "current" application register
- mov r8=r13 // return pointer to previously running task
- mov r13=in0 // set "current" pointer
-#endif
- DO_LOAD_SWITCH_STACK
-
-#ifdef CONFIG_SMP
- sync.i // ensure "fc"s done by this CPU are
visible on other CPUs
-#endif
- br.ret.sptk.many rp // boogie on out in new context
-
-.map:
- rsm psr.ic // interrupts (psr.i) are already
disabled here
- movl r25=PAGE_KERNEL
-#ifdef XEN
- movl r27=IA64_GRANULE_SHIFT << 2
-#endif
- ;;
- srlz.d
- or r23=r25,r20 // construct PA | page properties
-#ifdef XEN
- ptr.d in0,r27 // to purge dtr[IA64_TR_VHPT] and
dtr[IA64_TR_VPD]
-#else
- movl r27=IA64_GRANULE_SHIFT << 2
-#endif
- ;;
- mov cr.itir=r27
- mov cr.ifa=in0 // VA of next task...
-#ifdef XEN
- srlz.d
-#endif
- ;;
- mov r25=IA64_TR_CURRENT_STACK
-#ifdef XEN
- st8 [r24]=r26 // remember last page we mapped...
-#else
- mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
-#endif
- ;;
- itr.d dtr[r25]=r23 // wire in new mapping...
- br.cond.sptk .done
-END(ia64_switch_to)
-
-/*
- * Note that interrupts are enabled during save_switch_stack and
load_switch_stack. This
- * means that we may get an interrupt with "sp" pointing to the new kernel
stack while
- * ar.bspstore is still pointing to the old kernel backing store area. Since
ar.rsc,
- * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is
not a
- * problem. Also, we don't need to specify unwind information for preserved
registers
- * that are not modified in save_switch_stack as the right unwind information
is already
- * specified at the call-site of save_switch_stack.
- */
-
-/*
- * save_switch_stack:
- * - r16 holds ar.pfs
- * - b7 holds address to return to
- * - rp (b0) holds return address to save
- */
-GLOBAL_ENTRY(save_switch_stack)
- .prologue
- .altrp b7
- flushrs // flush dirty regs to backing store (must be
first in insn group)
- .save @priunat,r17
- mov r17=ar.unat // preserve caller's
- .body
-#ifdef CONFIG_ITANIUM
- adds r2=16+128,sp
- adds r3=16+64,sp
- adds r14=SW(R4)+16,sp
- ;;
- st8.spill [r14]=r4,16 // spill r4
- lfetch.fault.excl.nt1 [r3],128
- ;;
- lfetch.fault.excl.nt1 [r2],128
- lfetch.fault.excl.nt1 [r3],128
- ;;
- lfetch.fault.excl [r2]
- lfetch.fault.excl [r3]
- adds r15=SW(R5)+16,sp
-#else
- add r2=16+3*128,sp
- add r3=16,sp
- add r14=SW(R4)+16,sp
- ;;
- st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset
0x1c0
- lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
- ;;
- lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
- lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
- ;;
- lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
- lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
- adds r15=SW(R5)+16,sp
-#endif
- ;;
- st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
- mov.m ar.rsc=0 // put RSE in mode: enforced lazy,
little endian, pl 0
- add r2=SW(F2)+16,sp // r2 = &sw->f2
- ;;
- st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
- mov.m r18=ar.fpsr // preserve fpsr
- add r3=SW(F3)+16,sp // r3 = &sw->f3
- ;;
- stf.spill [r2]=f2,32
- mov.m r19=ar.rnat
- mov r21=b0
-
- stf.spill [r3]=f3,32
- st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
- mov r22=b1
- ;;
- // since we're done with the spills, read and save ar.unat:
- mov.m r29=ar.unat
- mov.m r20=ar.bspstore
- mov r23=b2
- stf.spill [r2]=f4,32
- stf.spill [r3]=f5,32
- mov r24=b3
- ;;
- st8 [r14]=r21,SW(B1)-SW(B0) // save b0
- st8 [r15]=r23,SW(B3)-SW(B2) // save b2
- mov r25=b4
- mov r26=b5
- ;;
- st8 [r14]=r22,SW(B4)-SW(B1) // save b1
- st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
- mov r21=ar.lc // I-unit
- stf.spill [r2]=f12,32
- stf.spill [r3]=f13,32
- ;;
- st8 [r14]=r25,SW(B5)-SW(B4) // save b4
- st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
- stf.spill [r2]=f14,32
- stf.spill [r3]=f15,32
- ;;
- st8 [r14]=r26 // save b5
- st8 [r15]=r21 // save ar.lc
- stf.spill [r2]=f16,32
- stf.spill [r3]=f17,32
- ;;
- stf.spill [r2]=f18,32
- stf.spill [r3]=f19,32
- ;;
- stf.spill [r2]=f20,32
- stf.spill [r3]=f21,32
- ;;
- stf.spill [r2]=f22,32
- stf.spill [r3]=f23,32
- ;;
- stf.spill [r2]=f24,32
- stf.spill [r3]=f25,32
- ;;
- stf.spill [r2]=f26,32
- stf.spill [r3]=f27,32
- ;;
- stf.spill [r2]=f28,32
- stf.spill [r3]=f29,32
- ;;
- stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
- stf.spill [r3]=f31,SW(PR)-SW(F31)
- add r14=SW(CALLER_UNAT)+16,sp
- ;;
- st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
- st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
- mov r21=pr
- ;;
- st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
- st8 [r3]=r21 // save predicate registers
- ;;
- st8 [r2]=r20 // save ar.bspstore
- st8 [r14]=r18 // save fpsr
- mov ar.rsc=3 // put RSE back into eager mode, pl 0
- br.cond.sptk.many b7
-END(save_switch_stack)
-
-/*
- * load_switch_stack:
- * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
- * - b7 holds address to return to
- * - must not touch r8-r11
- */
-#ifdef XEN
-GLOBAL_ENTRY(load_switch_stack)
-#else
-ENTRY(load_switch_stack)
-#endif
- .prologue
- .altrp b7
-
- .body
- lfetch.fault.nt1 [sp]
- adds r2=SW(AR_BSPSTORE)+16,sp
- adds r3=SW(AR_UNAT)+16,sp
- mov ar.rsc=0 // put RSE into
enforced lazy mode
- adds r14=SW(CALLER_UNAT)+16,sp
- adds r15=SW(AR_FPSR)+16,sp
- ;;
- ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
- ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
- ;;
- ld8 r21=[r2],16 // restore b0
- ld8 r22=[r3],16 // restore b1
- ;;
- ld8 r23=[r2],16 // restore b2
- ld8 r24=[r3],16 // restore b3
- ;;
- ld8 r25=[r2],16 // restore b4
- ld8 r26=[r3],16 // restore b5
- ;;
- ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
- ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
- ;;
- ld8 r28=[r2] // restore pr
- ld8 r30=[r3] // restore rnat
- ;;
- ld8 r18=[r14],16 // restore caller's unat
- ld8 r19=[r15],24 // restore fpsr
- ;;
- ldf.fill f2=[r14],32
- ldf.fill f3=[r15],32
- ;;
- ldf.fill f4=[r14],32
- ldf.fill f5=[r15],32
- ;;
- ldf.fill f12=[r14],32
- ldf.fill f13=[r15],32
- ;;
- ldf.fill f14=[r14],32
- ldf.fill f15=[r15],32
- ;;
- ldf.fill f16=[r14],32
- ldf.fill f17=[r15],32
- ;;
- ldf.fill f18=[r14],32
- ldf.fill f19=[r15],32
- mov b0=r21
- ;;
- ldf.fill f20=[r14],32
- ldf.fill f21=[r15],32
- mov b1=r22
- ;;
- ldf.fill f22=[r14],32
- ldf.fill f23=[r15],32
- mov b2=r23
- ;;
- mov ar.bspstore=r27
- mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
- mov b3=r24
- ;;
- ldf.fill f24=[r14],32
- ldf.fill f25=[r15],32
- mov b4=r25
- ;;
- ldf.fill f26=[r14],32
- ldf.fill f27=[r15],32
- mov b5=r26
- ;;
- ldf.fill f28=[r14],32
- ldf.fill f29=[r15],32
- mov ar.pfs=r16
- ;;
- ldf.fill f30=[r14],32
- ldf.fill f31=[r15],24
- mov ar.lc=r17
- ;;
- ld8.fill r4=[r14],16
- ld8.fill r5=[r15],16
- mov pr=r28,-1
- ;;
- ld8.fill r6=[r14],16
- ld8.fill r7=[r15],16
-
- mov ar.unat=r18 // restore caller's unat
- mov ar.rnat=r30 // must restore after bspstore
but before rsc!
- mov ar.fpsr=r19 // restore fpsr
- mov ar.rsc=3 // put RSE back into eager
mode, pl 0
- br.cond.sptk.many b7
-END(load_switch_stack)
-
-#ifndef XEN
-GLOBAL_ENTRY(execve)
- mov r15=__NR_execve // put syscall number in place
- break __BREAK_SYSCALL
- br.ret.sptk.many rp
-END(execve)
-
-GLOBAL_ENTRY(clone)
- mov r15=__NR_clone // put syscall number in place
- break __BREAK_SYSCALL
- br.ret.sptk.many rp
-END(clone)
-
- /*
- * Invoke a system call, but do some tracing before and after the call.
- * We MUST preserve the current register frame throughout this routine
- * because some system calls (such as ia64_execve) directly
- * manipulate ar.pfs.
- */
-GLOBAL_ENTRY(ia64_trace_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * We need to preserve the scratch registers f6-f11 in case the system
- * call is sigreturn.
- */
- adds r16=PT(F6)+16,sp
- adds r17=PT(F7)+16,sp
- ;;
- stf.spill [r16]=f6,32
- stf.spill [r17]=f7,32
- ;;
- stf.spill [r16]=f8,32
- stf.spill [r17]=f9,32
- ;;
- stf.spill [r16]=f10
- stf.spill [r17]=f11
- br.call.sptk.many rp=syscall_trace_enter // give parent a chance to
catch syscall args
- adds r16=PT(F6)+16,sp
- adds r17=PT(F7)+16,sp
- ;;
- ldf.fill f6=[r16],32
- ldf.fill f7=[r17],32
- ;;
- ldf.fill f8=[r16],32
- ldf.fill f9=[r17],32
- ;;
- ldf.fill f10=[r16]
- ldf.fill f11=[r17]
- // the syscall number may have changed, so re-load it and re-calculate
the
- // syscall entry-point:
- adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall
#)
- ;;
- ld8 r15=[r15]
- mov r3=NR_syscalls - 1
- ;;
- adds r15=-1024,r15
- movl r16=sys_call_table
- ;;
- shladd r20=r15,3,r16 // r20 = sys_call_table +
8*(syscall-1024)
- cmp.leu p6,p7=r15,r3
- ;;
-(p6) ld8 r20=[r20] // load address of syscall
entry point
-(p7) movl r20=sys_ni_syscall
- ;;
- mov b6=r20
- br.call.sptk.many rp=b6 // do the syscall
-.strace_check_retval:
- cmp.lt p6,p0=r8,r0 // syscall failed?
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
- mov r10=0
-(p6) br.cond.sptk strace_error // syscall failed ->
- ;; // avoid RAW on r10
-.strace_save_retval:
-.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot
for r8
-.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in
slot for r10
- br.call.sptk.many rp=syscall_trace_leave // give parent a chance to
catch return value
-.ret3: br.cond.sptk .work_pending_syscall_end
-
-strace_error:
- ld8 r3=[r2] // load pt_regs.r8
- sub r9=0,r8 // negate return value to get
errno value
- ;;
- cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
- adds r3=16,r2 // r3=&pt_regs.r10
- ;;
-(p6) mov r10=-1
-(p6) mov r8=r9
- br.cond.sptk .strace_save_retval
-END(ia64_trace_syscall)
-
- /*
- * When traced and returning from sigreturn, we invoke syscall_trace
but then
- * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
- */
-GLOBAL_ENTRY(ia64_strace_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
-{ /*
- * Some versions of gas generate bad unwind info if the first
instruction of a
- * procedure doesn't go into the first slot of a bundle. This is a
workaround.
- */
- nop.m 0
- nop.i 0
- br.call.sptk.many rp=syscall_trace_leave // give parent a chance to
catch return value
-}
-.ret4: br.cond.sptk ia64_leave_kernel
-END(ia64_strace_leave_kernel)
-#endif
-
-GLOBAL_ENTRY(ia64_ret_from_clone)
- PT_REGS_UNWIND_INFO(0)
-{ /*
- * Some versions of gas generate bad unwind info if the first
instruction of a
- * procedure doesn't go into the first slot of a bundle. This is a
workaround.
- */
- nop.m 0
- nop.i 0
- /*
- * We need to call schedule_tail() to complete the scheduling process.
- * Called by ia64_switch_to() after do_fork()->copy_thread(). r8
contains the
- * address of the previously executing task.
- */
- br.call.sptk.many rp=ia64_invoke_schedule_tail
-}
-#ifdef XEN
- // new domains are cloned but not exec'ed so switch to user mode here
- cmp.ne pKStk,pUStk=r0,r0
- adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
- ;;
- ld8 r16 = [r16] // arch.arch_vmx.flags
- ;;
- cmp.eq p6,p0 = r16, r0
-(p6) br.cond.spnt ia64_leave_kernel // !VMX_DOMAIN
- ;;
- adds r16 = PT(CR_IFS)+16, r12
- ;;
- ld8 r16 = [r16]
- cmp.eq pNonSys,pSys=r0,r0 // pSys=0,pNonSys=1
- ;;
- cmp.eq p6,p7 = 0x6, r16
-(p7) br.cond.sptk ia64_leave_hypervisor // VMX_DOMAIN
- ;;
- /*
- * cr.ifs.v==0 && cr.ifm(ar.pfm)==6 means that HYPERVISOR_suspend
- * has been called. (i.e. HVM with PV driver is restored here)
- * We need to allocate a dummy RSE stack frame to resume.
- */
- alloc r32=ar.pfs, 0, 0, 6, 0
- cmp.eq pSys,pNonSys=r0,r0 // pSys=1,pNonSys=0
- ;;
- bsw.0
- ;;
- mov r21=r13 // set current
- ;;
- bsw.1
- ;;
- mov r8=r0
- br.cond.sptk.many ia64_leave_hypercall
-#else
-.ret8:
- adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
- ld4 r2=[r2]
- ;;
- mov r8=0
- and r2=_TIF_SYSCALL_TRACEAUDIT,r2
- ;;
- cmp.ne p6,p0=r2,r0
-(p6) br.cond.spnt .strace_check_retval
-#endif
- ;; // added stop bits to prevent
r8 dependency
-END(ia64_ret_from_clone)
- // fall through
-GLOBAL_ENTRY(ia64_ret_from_syscall)
- PT_REGS_UNWIND_INFO(0)
- cmp.ge p6,p7=r8,r0 // syscall executed
successfully?
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- mov r10=r0 // clear error indication in r10
-#ifndef XEN
-(p7) br.cond.spnt handle_syscall_error // handle potential syscall
failure
-#endif
-END(ia64_ret_from_syscall)
- // fall through
-/*
- * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
- * need to switch to bank 0 and doesn't restore the scratch registers.
- * To avoid leaking kernel bits, the scratch registers are set to
- * the following known-to-be-safe values:
- *
- * r1: restored (global pointer)
- * r2: cleared
- * r3: 1 (when returning to user-level)
- * r8-r11: restored (syscall return value(s))
- * r12: restored (user-level stack pointer)
- * r13: restored (user-level thread pointer)
- * r14: set to __kernel_syscall_via_epc
- * r15: restored (syscall #)
- * r16-r17: cleared
- * r18: user-level b6
- * r19: cleared
- * r20: user-level ar.fpsr
- * r21: user-level b0
- * r22: cleared
- * r23: user-level ar.bspstore
- * r24: user-level ar.rnat
- * r25: user-level ar.unat
- * r26: user-level ar.pfs
- * r27: user-level ar.rsc
- * r28: user-level ip
- * r29: user-level psr
- * r30: user-level cfm
- * r31: user-level pr
- * f6-f11: cleared
- * pr: restored (user-level pr)
- * b0: restored (user-level rp)
- * b6: restored
- * b7: set to __kernel_syscall_via_epc
- * ar.unat: restored (user-level ar.unat)
- * ar.pfs: restored (user-level ar.pfs)
- * ar.rsc: restored (user-level ar.rsc)
- * ar.rnat: restored (user-level ar.rnat)
- * ar.bspstore: restored (user-level ar.bspstore)
- * ar.fpsr: restored (user-level ar.fpsr)
- * ar.ccv: cleared
- * ar.csd: cleared
- * ar.ssd: cleared
- */
-ENTRY(ia64_leave_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it
returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check
for
- * extra work. We always check for extra work when returning to
user-level.
- * With CONFIG_PREEMPT, we also check for extra work when the
preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the
extra-work-check
- * needs to be redone.
- */
-#ifdef CONFIG_PREEMPT
- rsm psr.i // disable interrupts
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
- .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-(pUStk) mov r21=0 // r21 <- 0
- ;;
- cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-#else /* !CONFIG_PREEMPT */
-(pUStk) rsm psr.i
- cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-#endif
-.work_processed_syscall:
- adds r2=PT(LOADRS)+16,r12
- adds r3=PT(AR_BSPSTORE)+16,r12
-#ifdef XEN
- ;;
-#else
- adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r18] // load
current_thread_info()->flags
-#endif
- ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for
"loadrs"
- nop.i 0
- ;;
-#ifndef XEN
- mov r16=ar.bsp // M2 get existing backing
store pointer
-#endif
- ld8 r18=[r2],PT(R9)-PT(B6) // load b6
-#ifndef XEN
-(p6) and r15=TIF_WORK_MASK,r31 // any work other than
TIF_SYSCALL_TRACE?
-#endif
- ;;
- ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be
garbage)
-#ifndef XEN
-(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
-(p6) br.cond.spnt .work_pending_syscall
-#endif
- ;;
- // start restoring the state saved on the kernel stack (struct pt_regs):
- ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
- ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-(pNonSys) break 0 // bug check: we shouldn't be here if
pNonSys is TRUE!
- ;;
- invala // M0|1 invalidate ALAT
- rsm psr.i | psr.ic // M2 turn off interrupts and interruption
collection
-#ifndef XEN
- cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should
restore cr.ifs
-#endif
-
- ld8 r29=[r2],16 // M0|1 load cr.ipsr
- ld8 r28=[r3],16 // M0|1 load cr.iip
- mov r22=r0 // A clear r22
- ;;
- ld8 r30=[r2],16 // M0|1 load cr.ifs
- ld8 r25=[r3],16 // M0|1 load ar.unat
-(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
- ;;
- ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
-(pKStk) mov r22=psr // M2 read PSR now that
interrupts are disabled
- nop 0
- ;;
- ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
- ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
- mov f6=f0 // F clear f6
- ;;
- ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be
garbage)
- ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
- mov f7=f0 // F clear f7
- ;;
- ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
- ld8.fill r1=[r3],16 // M0|1 load r1
-(pUStk) mov r17=1 // A
- ;;
-(pUStk) st1 [r14]=r17 // M2|3
- ld8.fill r13=[r3],16 // M0|1
- mov f8=f0 // F clear f8
- ;;
- ld8.fill r12=[r2] // M0|1 restore r12 (sp)
-#ifdef XEN
- ld8.fill r2=[r3] // M0|1
-#else
- ld8.fill r15=[r3] // M0|1 restore r15
-#endif
- mov b6=r18 // I0 restore b6
-
-#ifdef XEN
- movl r17=THIS_CPU(ia64_phys_stacked_size_p8) // A
-#else
- addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
-#endif
- mov f9=f0 // F clear f9
-(pKStk) br.cond.dpnt.many skip_rbs_switch // B
-
- srlz.d // M0 ensure interruption collection
is off (for cover)
- shr.u r18=r19,16 // I0|1 get byte size of existing
"dirty" partition
-#ifndef XEN
- cover // B add current frame into dirty
partition & set cr.ifs
-#endif
- ;;
-(pUStk) ld4 r17=[r17] // M0|1 r17 =
cpu_data->phys_stacked_size_p8
- mov r19=ar.bsp // M2 get new backing store pointer
- mov f10=f0 // F clear f10
-
- nop.m 0
-#ifdef XEN
- mov r14=r0
-#else
- movl r14=__kernel_syscall_via_epc // X
-#endif
- ;;
- mov.m ar.csd=r0 // M2 clear ar.csd
- mov.m ar.ccv=r0 // M2 clear ar.ccv
- mov b7=r14 // I0 clear b7 (hint with
__kernel_syscall_via_epc)
-
- mov.m ar.ssd=r0 // M2 clear ar.ssd
- mov f11=f0 // F clear f11
- br.cond.sptk.many rbs_switch // B
-END(ia64_leave_syscall)
-
-#ifdef CONFIG_IA32_SUPPORT
-GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
- PT_REGS_UNWIND_INFO(0)
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
- ;;
- .mem.offset 0,0
- st8.spill [r2]=r8 // store return value in slot for r8 and set
unat bit
- .mem.offset 8,0
- st8.spill [r3]=r0 // clear error indication in slot for r10 and
set unat bit
-END(ia64_ret_from_ia32_execve)
- // fall through
-#endif /* CONFIG_IA32_SUPPORT */
-GLOBAL_ENTRY(ia64_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it
returns to
- * user- or fsys-mode, hence we disable interrupts early on.
- *
- * p6 controls whether current_thread_info()->flags needs to be check
for
- * extra work. We always check for extra work when returning to
user-level.
- * With CONFIG_PREEMPT, we also check for extra work when the
preempt_count
- * is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the
extra-work-check
- * needs to be redone.
- */
-#ifdef CONFIG_PREEMPT
- rsm psr.i // disable interrupts
- cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
- .pred.rel.mutex pUStk,pKStk
-(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-(pUStk) mov r21=0 // r21 <- 0
- ;;
- cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-#else
-(pUStk) rsm psr.i
- cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-#endif
-.work_processed_kernel:
-#ifdef XEN
- ;;
-(pUStk) ssm psr.i
-(pUStk) br.call.sptk.many b0=do_softirq
-(pUStk) rsm psr.i
- ;;
-(pUStk) br.call.sptk.many b0=reflect_event
- ;;
- adds r7 = PT(EML_UNAT)+16,r12
- ;;
- ld8 r7 = [r7]
- ;;
- mov ar.unat=r7 /* load eml_unat */
- mov r31=r0
-
-#else
- adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
-(p6) ld4 r31=[r17] // load
current_thread_info()->flags
-#endif
- adds r21=PT(PR)+16,r12
- ;;
-
- lfetch [r21],PT(CR_IPSR)-PT(PR)
- adds r2=PT(B6)+16,r12
- adds r3=PT(R16)+16,r12
- ;;
- lfetch [r21]
- ld8 r28=[r2],8 // load b6
- adds r29=PT(R24)+16,r12
-
-#ifdef XEN
- ld8.fill r16=[r3]
- adds r3=PT(AR_CSD)-PT(R16),r3
-#else
- ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
-#endif
- adds r30=PT(AR_CCV)+16,r12
-(p6) and r19=TIF_WORK_MASK,r31 // any work other than
TIF_SYSCALL_TRACE?
- ;;
- ld8.fill r24=[r29]
- ld8 r15=[r30] // load ar.ccv
-(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
- ;;
- ld8 r29=[r2],16 // load b7
- ld8 r30=[r3],16 // load ar.csd
-#ifndef XEN
-(p6) br.cond.spnt .work_pending
-#endif
- ;;
- ld8 r31=[r2],16 // load ar.ssd
- ld8.fill r8=[r3],16
- ;;
- ld8.fill r9=[r2],16
- ld8.fill r10=[r3],PT(R17)-PT(R10)
- ;;
- ld8.fill r11=[r2],PT(R18)-PT(R11)
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- mov ar.csd=r30
- mov ar.ssd=r31
- ;;
- rsm psr.i | psr.ic // initiate turning off of interrupt and
interruption collection
- invala // invalidate ALAT
- ;;
- ld8.fill r22=[r2],24
- ld8.fill r23=[r3],24
- mov b6=r28
- ;;
- ld8.fill r25=[r2],16
- ld8.fill r26=[r3],16
- mov b7=r29
- ;;
- ld8.fill r27=[r2],16
- ld8.fill r28=[r3],16
- ;;
- ld8.fill r29=[r2],16
- ld8.fill r30=[r3],24
- ;;
- ld8.fill r31=[r2],PT(F9)-PT(R31)
- adds r3=PT(F10)-PT(F6),r3
- ;;
- ldf.fill f9=[r2],PT(F6)-PT(F9)
- ldf.fill f10=[r3],PT(F8)-PT(F10)
- ;;
- ldf.fill f6=[r2],PT(F7)-PT(F6)
- ;;
- ldf.fill f7=[r2],PT(F11)-PT(F7)
-#ifdef XEN
- ldf.fill f8=[r3],PT(R5)-PT(F8)
- ;;
- ldf.fill f11=[r2],PT(R4)-PT(F11)
- mov ar.ccv=r15
- ;;
- ld8.fill r4=[r2],16
- ld8.fill r5=[r3],16
- ;;
- ld8.fill r6=[r2]
- ld8.fill r7=[r3]
- ;;
- srlz.d // ensure that inter. collection is off (VHPT is don't care,
since text is pinned)
- ;;
- bsw.0 // switch back to bank 0 (no stop bit required
beforehand...)
- ;;
-#else
- ldf.fill f8=[r3],32
- ;;
- srlz.d // ensure that inter. collection is off (VHPT is don't care,
since text is pinned)
- mov ar.ccv=r15
- ;;
- ldf.fill f11=[r2]
- bsw.0 // switch back to bank 0 (no stop bit required
beforehand...)
- ;;
-#endif
-#ifdef XEN
-(pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
-(pUStk) ld8 r18=[r18]
-#else
-(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
-#endif
- adds r16=PT(CR_IPSR)+16,r12
- adds r17=PT(CR_IIP)+16,r12
-
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are
disabled
- nop.i 0
- nop.i 0
- ;;
- ld8 r29=[r16],16 // load cr.ipsr
- ld8 r28=[r17],16 // load cr.iip
- ;;
- ld8 r30=[r16],16 // load cr.ifs
- ld8 r25=[r17],16 // load ar.unat
- ;;
- ld8 r26=[r16],16 // load ar.pfs
- ld8 r27=[r17],16 // load ar.rsc
-#ifndef XEN
- cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore
cr.ifs
-#endif
- ;;
- ld8 r24=[r16],16 // load ar.rnat (may be garbage)
- ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
- ;;
- ld8 r31=[r16],16 // load predicates
- ld8 r21=[r17],16 // load b0
- ;;
- ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 // load r1
- ;;
- ld8.fill r12=[r16],16
- ld8.fill r13=[r17],16
-(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
- ;;
- ld8 r20=[r16],16 // ar.fpsr
- ld8.fill r15=[r17],16
- ;;
- ld8.fill r14=[r16],16
- ld8.fill r2=[r17]
-(pUStk) mov r17=1
- ;;
- ld8.fill r3=[r16]
-(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
- shr.u r18=r19,16 // get byte size of existing "dirty" partition
- ;;
- mov r16=ar.bsp // get existing backing store pointer
-#ifdef XEN
- movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
-#else
- addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-#endif
- ;;
- ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
-(pKStk) br.cond.dpnt skip_rbs_switch
-
- /*
- * Restore user backing store.
- *
- * NOTE: alloc, loadrs, and cover can't be predicated.
- */
-(pNonSys) br.cond.dpnt dont_preserve_current_frame
- cover // add current frame into dirty
partition and set cr.ifs
- ;;
- mov r19=ar.bsp // get new backing store pointer
-rbs_switch:
- sub r16=r16,r18 // krbs = old bsp - size of dirty
partition
-#ifndef XEN
- cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
-#endif
- ;;
- sub r19=r19,r16 // calculate total byte size of dirty
partition
- add r18=64,r18 // don't force in0-in7 into memory...
- ;;
- shl r19=r19,16 // shift size of dirty partition into
loadrs position
- ;;
-dont_preserve_current_frame:
- /*
- * To prevent leaking bits between the kernel and user-space,
- * we must clear the stacked registers in the "invalid" partition here.
- * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
- * 5 registers/cycle on McKinley).
- */
-# define pRecurse p6
-# define pReturn p7
-#ifdef CONFIG_ITANIUM
-# define Nregs 10
-#else
-# define Nregs 14
-#endif
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize /
(64*8))
- sub r17=r17,r18 // r17 = (physStackedSize + 8) -
dirtySize
- ;;
- mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
- shladd in0=loc1,3,r17
- mov in1=0
- ;;
- TEXT_ALIGN(32)
-rse_clear_invalid:
-#ifdef CONFIG_ITANIUM
- // cycle 0
- { .mii
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to
clear, (re)curse
- add out0=-Nregs*8,in0
-}{ .mfb
- add out1=1,in1 // increment recursion count
- nop.f 0
- nop.b 0 // can't do br.call here because of
alloc (WAW on CFM)
- ;;
-}{ .mfi // cycle 1
- mov loc1=0
- nop.f 0
- mov loc2=0
-}{ .mib
- mov loc3=0
- mov loc4=0
-(pRecurse) br.call.sptk.many b0=rse_clear_invalid
-
-}{ .mfi // cycle 2
- mov loc5=0
- nop.f 0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to
do a br.ret
-}{ .mib
- mov loc6=0
- mov loc7=0
-(pReturn) br.ret.sptk.many b0
-}
-#else /* !CONFIG_ITANIUM */
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to
clear, (re)curse
- add out0=-Nregs*8,in0
- add out1=1,in1 // increment recursion count
- mov loc1=0
- mov loc2=0
- ;;
- mov loc3=0
- mov loc4=0
- mov loc5=0
- mov loc6=0
- mov loc7=0
-(pRecurse) br.call.dptk.few b0=rse_clear_invalid
- ;;
- mov loc8=0
- mov loc9=0
- cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to
do a br.ret
- mov loc10=0
- mov loc11=0
-(pReturn) br.ret.dptk.many b0
-#endif /* !CONFIG_ITANIUM */
-# undef pRecurse
-# undef pReturn
- ;;
- alloc r17=ar.pfs,0,0,0,0 // drop current register frame
- ;;
- loadrs
- ;;
-skip_rbs_switch:
- mov ar.unat=r25 // M2
-(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp
from r22
-(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op
otherwise
- ;;
-(pUStk) mov ar.bspstore=r23 // M2
-(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
-(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op
otherwise
- ;;
- mov cr.ipsr=r29 // M2
- mov ar.pfs=r26 // I0
-(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op
otherwise
-#ifdef XEN
- mov cr.ifs=r30 // M2
-#else
-(p9) mov cr.ifs=r30 // M2
-#endif
- mov b0=r21 // I0
-(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op
otherwise
-
- mov ar.fpsr=r20 // M2
- mov cr.iip=r28 // M2
- nop 0
- ;;
-(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
- nop 0
-#ifdef XEN
-(pLvSys)mov r15=r0
-#else
-(pLvSys)mov r2=r0
-#endif
-
- mov ar.rsc=r27 // M2
- mov pr=r31,-1 // I0
- rfi // B
-
-#ifndef XEN
- /*
- * On entry:
- * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT)
- * r31 = current->thread_info->flags
- * On exit:
- * p6 = TRUE if work-pending-check needs to be redone
- */
-.work_pending_syscall:
- add r2=-8,r2
- add r3=-8,r3
- ;;
- st8 [r2]=r8
- st8 [r3]=r10
-.work_pending:
- tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from
MCA/INIT/NMI/PMI context?
-(p6) br.cond.sptk.few .sigdelayed
- ;;
- tbit.z p6,p0=r31,TIF_NEED_RESCHED //
current_thread_info()->need_resched==0?
-(p6) br.cond.sptk.few .notify
-#ifdef CONFIG_PREEMPT
-(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
- ;;
-(pKStk) st4 [r20]=r21
- ssm psr.i // enable interrupts
-#endif
- br.call.spnt.many rp=schedule
-.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
- rsm psr.i // disable interrupts
- ;;
-#ifdef CONFIG_PREEMPT
-(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
- ;;
-(pKStk) st4 [r20]=r0 // preempt_count() <- 0
-#endif
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // re-check
-
-.notify:
-(pUStk) br.call.spnt.many rp=notify_resume_user
-.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // don't re-check
-
-// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context
where
-// it could not be delivered. Deliver it now. The signal might be for us and
-// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
-// signal.
-
-.sigdelayed:
- br.call.sptk.many rp=do_sigdelayed
- cmp.eq p6,p0=r0,r0 // p6 <- 1, always
re-check
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // re-check
-
-.work_pending_syscall_end:
- adds r2=PT(R8)+16,r12
- adds r3=PT(R10)+16,r12
- ;;
- ld8 r8=[r2]
- ld8 r10=[r3]
- br.cond.sptk.many .work_processed_syscall // re-check
-#endif
-
-END(ia64_leave_kernel)
-
-ENTRY(handle_syscall_error)
- /*
- * Some system calls (e.g., ptrace, mmap) can return arbitrary values
which could
- * lead us to mistake a negative return value as a failed syscall.
Those syscall
- * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
- * pt_regs.r8 is zero, we assume that the call completed successfully.
- */
- PT_REGS_UNWIND_INFO(0)
- ld8 r3=[r2] // load pt_regs.r8
- ;;
- cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
- ;;
-(p7) mov r10=-1
-(p7) sub r8=0,r8 // negate return value to get errno
- br.cond.sptk ia64_leave_syscall
-END(handle_syscall_error)
-
- /*
- * Invoke schedule_tail(task) while preserving in0-in7, which may be
needed
- * in case a system call gets restarted.
- */
-GLOBAL_ENTRY(ia64_invoke_schedule_tail)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,1,0
- mov loc0=rp
- mov out0=r8 // Address of previous task
- ;;
- br.call.sptk.many rp=schedule_tail
-.ret11: mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(ia64_invoke_schedule_tail)
-
-#ifndef XEN
- /*
- * Setup stack and call do_notify_resume_user(). Note that pSys and
pNonSys need to
- * be set up by the caller. We declare 8 input registers so the system
call
- * args get preserved, in case we need to restart a system call.
- */
-ENTRY(notify_resume_user)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of
syscall restart!
- mov r9=ar.unat
- mov loc0=rp // save return address
- mov out0=0 // there is no "oldset"
- adds out1=8,sp // out1=&sigscratch->ar_pfs
-(pSys) mov out2=1 // out2==1 => we're in a syscall
- ;;
-(pNonSys) mov out2=0 // out2==0 => not a syscall
- .fframe 16
- .spillsp ar.unat, 16
- st8 [sp]=r9,-16 // allocate space for ar.unat
and save it
- st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
- .body
- br.call.sptk.many rp=do_notify_resume_user
-.ret15: .restore sp
- adds sp=16,sp // pop scratch stack space
- ;;
- ld8 r9=[sp] // load new unat from
sigscratch->scratch_unat
- mov rp=loc0
- ;;
- mov ar.unat=r9
- mov ar.pfs=loc1
- br.ret.sptk.many rp
-END(notify_resume_user)
-
-GLOBAL_ENTRY(sys_rt_sigsuspend)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of
syscall restart!
- mov r9=ar.unat
- mov loc0=rp // save return address
- mov out0=in0 // mask
- mov out1=in1 // sigsetsize
- adds out2=8,sp // out2=&sigscratch->ar_pfs
- ;;
- .fframe 16
- .spillsp ar.unat, 16
- st8 [sp]=r9,-16 // allocate space for ar.unat
and save it
- st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
- .body
- br.call.sptk.many rp=ia64_rt_sigsuspend
-.ret17: .restore sp
- adds sp=16,sp // pop scratch stack space
- ;;
- ld8 r9=[sp] // load new unat from
sw->caller_unat
- mov rp=loc0
- ;;
- mov ar.unat=r9
- mov ar.pfs=loc1
- br.ret.sptk.many rp
-END(sys_rt_sigsuspend)
-
-ENTRY(sys_rt_sigreturn)
- PT_REGS_UNWIND_INFO(0)
- /*
- * Allocate 8 input registers since ptrace() may clobber them
- */
- alloc r2=ar.pfs,8,0,1,0
- .prologue
- PT_REGS_SAVES(16)
- adds sp=-16,sp
- .body
- cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal
syscall...
- ;;
- /*
- * leave_kernel() restores f6-f11 from pt_regs, but since the
streamlined
- * syscall-entry path does not save them we save them here instead.
Note: we
- * don't need to save any other registers that are not saved by the
stream-lined
- * syscall path, because restore_sigcontext() restores them.
- */
- adds r16=PT(F6)+32,sp
- adds r17=PT(F7)+32,sp
- ;;
- stf.spill [r16]=f6,32
- stf.spill [r17]=f7,32
- ;;
- stf.spill [r16]=f8,32
- stf.spill [r17]=f9,32
- ;;
- stf.spill [r16]=f10
- stf.spill [r17]=f11
- adds out0=16,sp // out0 = &sigscratch
- br.call.sptk.many rp=ia64_rt_sigreturn
-.ret19: .restore sp,0
- adds sp=16,sp
- ;;
- ld8 r9=[sp] // load new ar.unat
- mov.sptk b7=r8,ia64_leave_kernel
- ;;
- mov ar.unat=r9
- br.many b7
-END(sys_rt_sigreturn)
-#endif
-
-GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
- .prologue
- /*
- * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
- */
- mov r16=r0
- DO_SAVE_SWITCH_STACK
- br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in
ivt
-.ret21: .body
- DO_LOAD_SWITCH_STACK
- br.cond.sptk.many rp // goes to
ia64_leave_kernel
-END(ia64_prepare_handle_unaligned)
-
- //
- // unw_init_running(void (*callback)(info, arg), void *arg)
- //
-# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
-
-GLOBAL_ENTRY(unw_init_running)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- alloc loc1=ar.pfs,2,3,3,0
- ;;
- ld8 loc2=[in0],8
- mov loc0=rp
- mov r16=loc1
- DO_SAVE_SWITCH_STACK
- .body
-
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
- SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
- adds sp=-EXTRA_FRAME_SIZE,sp
- .body
- ;;
- adds out0=16,sp // &info
- mov out1=r13 // current
- adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
- br.call.sptk.many rp=unw_init_frame_info
-1: adds out0=16,sp // &info
- mov b6=loc2
- mov loc2=gp // save gp across indirect
function call
- ;;
- ld8 gp=[in0]
- mov out1=in1 // arg
- br.call.sptk.many rp=b6 // invoke the callback function
-1: mov gp=loc2 // restore gp
-
- // For now, we don't allow changing registers from within
- // unw_init_running; if we ever want to allow that, we'd
- // have to do a load_switch_stack here:
- .restore sp
- adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
-
- mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(unw_init_running)
-
-#ifdef XEN
-GLOBAL_ENTRY(ia64_do_multicall_call)
- movl r2=ia64_hypercall_table;;
- shladd r2=r38,3,r2;;
- ld8 r2=[r2];;
- mov b6=r2
- br.sptk.many b6;;
-END(ia64_do_multicall_call)
-
-
- .rodata
- .align 8
- .globl ia64_hypercall_table
-ia64_hypercall_table:
- data8 do_ni_hypercall /* do_set_trap_table *//* 0 */
- data8 do_ni_hypercall /* do_mmu_update */
- data8 do_ni_hypercall /* do_set_gdt */
- data8 do_ni_hypercall /* do_stack_switch */
- data8 do_ni_hypercall /* do_set_callbacks */
- data8 do_ni_hypercall /* do_fpu_taskswitch *//* 5 */
- data8 do_sched_op_compat
- data8 do_platform_op
- data8 do_ni_hypercall /* do_set_debugreg */
- data8 do_ni_hypercall /* do_get_debugreg */
- data8 do_ni_hypercall /* do_update_descriptor * 10 */
- data8 do_ni_hypercall /* do_ni_hypercall */
- data8 do_memory_op
- data8 do_multicall
- data8 do_ni_hypercall /* do_update_va_mapping */
- data8 do_ni_hypercall /* do_set_timer_op */ /* 15 */
- data8 do_ni_hypercall
- data8 do_xen_version
- data8 do_console_io
- data8 do_ni_hypercall
- data8 do_grant_table_op /* 20 */
- data8 do_ni_hypercall /* do_vm_assist */
- data8 do_ni_hypercall /* do_update_va_mapping_othe */
- data8 do_ni_hypercall /* (x86 only) */
- data8 do_vcpu_op /* do_vcpu_op */
- data8 do_ni_hypercall /* (x86_64 only) */ /* 25 */
- data8 do_ni_hypercall /* do_mmuext_op */
- data8 do_ni_hypercall /* do_acm_op */
- data8 do_ni_hypercall /* do_nmi_op */
- data8 do_sched_op
- data8 do_callback_op /* */ /* 30 */
- data8 do_xenoprof_op /* */
- data8 do_event_channel_op
- data8 do_physdev_op
- data8 do_hvm_op /* */
- data8 do_sysctl /* */ /* 35 */
- data8 do_domctl /* */
- data8 do_kexec_op /* */
- data8 do_tmem_op /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */ /* 40 */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */ /* 45 */
- data8 do_ni_hypercall /* */
- data8 do_ni_hypercall /* */
- data8 do_dom0vp_op /* dom0vp_op */
- data8 do_pirq_guest_eoi /* arch_1 */
- data8 do_ia64_debug_op /* arch_2 */ /* 50 */
- data8 do_ni_hypercall /* arch_3 */
- data8 do_ni_hypercall /* arch_4 */
- data8 do_ni_hypercall /* arch_5 */
- data8 do_ni_hypercall /* arch_6 */
- data8 do_ni_hypercall /* arch_7 */ /* 55 */
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall /* 60 */
- data8 do_ni_hypercall
- data8 do_ni_hypercall
- data8 do_ni_hypercall
-
- // guard against failures to increase NR_hypercalls
- .org ia64_hypercall_table + 8*NR_hypercalls
-
-#else
- .rodata
- .align 8
- .globl sys_call_table
-sys_call_table:
- data8 sys_ni_syscall // This must be sys_ni_syscall! See
ivt.S.
- data8 sys_exit // 1025
- data8 sys_read
- data8 sys_write
- data8 sys_open
- data8 sys_close
- data8 sys_creat // 1030
- data8 sys_link
- data8 sys_unlink
- data8 ia64_execve
- data8 sys_chdir
- data8 sys_fchdir // 1035
- data8 sys_utimes
- data8 sys_mknod
- data8 sys_chmod
- data8 sys_chown
- data8 sys_lseek // 1040
- data8 sys_getpid
- data8 sys_getppid
- data8 sys_mount
- data8 sys_umount
- data8 sys_setuid // 1045
- data8 sys_getuid
- data8 sys_geteuid
- data8 sys_ptrace
- data8 sys_access
- data8 sys_sync // 1050
- data8 sys_fsync
- data8 sys_fdatasync
- data8 sys_kill
- data8 sys_rename
- data8 sys_mkdir // 1055
- data8 sys_rmdir
- data8 sys_dup
- data8 sys_pipe
- data8 sys_times
- data8 ia64_brk // 1060
- data8 sys_setgid
- data8 sys_getgid
- data8 sys_getegid
- data8 sys_acct
- data8 sys_ioctl // 1065
- data8 sys_fcntl
- data8 sys_umask
- data8 sys_chroot
- data8 sys_ustat
- data8 sys_dup2 // 1070
- data8 sys_setreuid
- data8 sys_setregid
- data8 sys_getresuid
- data8 sys_setresuid
- data8 sys_getresgid // 1075
- data8 sys_setresgid
- data8 sys_getgroups
- data8 sys_setgroups
- data8 sys_getpgid
- data8 sys_setpgid // 1080
- data8 sys_setsid
- data8 sys_getsid
- data8 sys_sethostname
- data8 sys_setrlimit
- data8 sys_getrlimit // 1085
- data8 sys_getrusage
- data8 sys_gettimeofday
- data8 sys_settimeofday
- data8 sys_select
- data8 sys_poll // 1090
- data8 sys_symlink
- data8 sys_readlink
- data8 sys_uselib
- data8 sys_swapon
- data8 sys_swapoff // 1095
- data8 sys_reboot
- data8 sys_truncate
- data8 sys_ftruncate
- data8 sys_fchmod
- data8 sys_fchown // 1100
- data8 ia64_getpriority
- data8 sys_setpriority
- data8 sys_statfs
- data8 sys_fstatfs
- data8 sys_gettid // 1105
- data8 sys_semget
- data8 sys_semop
- data8 sys_semctl
- data8 sys_msgget
- data8 sys_msgsnd // 1110
- data8 sys_msgrcv
- data8 sys_msgctl
- data8 sys_shmget
- data8 sys_shmat
- data8 sys_shmdt // 1115
- data8 sys_shmctl
- data8 sys_syslog
- data8 sys_setitimer
- data8 sys_getitimer
- data8 sys_ni_syscall // 1120 /* was:
ia64_oldstat */
- data8 sys_ni_syscall /* was:
ia64_oldlstat */
- data8 sys_ni_syscall /* was:
ia64_oldfstat */
- data8 sys_vhangup
- data8 sys_lchown
- data8 sys_remap_file_pages // 1125
- data8 sys_wait4
- data8 sys_sysinfo
- data8 sys_clone
- data8 sys_setdomainname
- data8 sys_newuname // 1130
- data8 sys_adjtimex
- data8 sys_ni_syscall /* was:
ia64_create_module */
- data8 sys_init_module
- data8 sys_delete_module
- data8 sys_ni_syscall // 1135 /* was:
sys_get_kernel_syms */
- data8 sys_ni_syscall /* was:
sys_query_module */
- data8 sys_quotactl
- data8 sys_bdflush
- data8 sys_sysfs
- data8 sys_personality // 1140
- data8 sys_ni_syscall // sys_afs_syscall
- data8 sys_setfsuid
- data8 sys_setfsgid
- data8 sys_getdents
- data8 sys_flock // 1145
- data8 sys_readv
- data8 sys_writev
- data8 sys_pread64
- data8 sys_pwrite64
- data8 sys_sysctl // 1150
- data8 sys_mmap
- data8 sys_munmap
- data8 sys_mlock
- data8 sys_mlockall
- data8 sys_mprotect // 1155
- data8 ia64_mremap
- data8 sys_msync
- data8 sys_munlock
- data8 sys_munlockall
- data8 sys_sched_getparam // 1160
- data8 sys_sched_setparam
- data8 sys_sched_getscheduler
- data8 sys_sched_setscheduler
- data8 sys_sched_yield
- data8 sys_sched_get_priority_max // 1165
- data8 sys_sched_get_priority_min
- data8 sys_sched_rr_get_interval
- data8 sys_nanosleep
- data8 sys_nfsservctl
- data8 sys_prctl // 1170
- data8 sys_getpagesize
- data8 sys_mmap2
- data8 sys_pciconfig_read
- data8 sys_pciconfig_write
- data8 sys_perfmonctl // 1175
- data8 sys_sigaltstack
- data8 sys_rt_sigaction
- data8 sys_rt_sigpending
- data8 sys_rt_sigprocmask
- data8 sys_rt_sigqueueinfo // 1180
- data8 sys_rt_sigreturn
- data8 sys_rt_sigsuspend
- data8 sys_rt_sigtimedwait
- data8 sys_getcwd
- data8 sys_capget // 1185
- data8 sys_capset
- data8 sys_sendfile64
- data8 sys_ni_syscall // sys_getpmsg (STREAMS)
- data8 sys_ni_syscall // sys_putpmsg (STREAMS)
- data8 sys_socket // 1190
- data8 sys_bind
- data8 sys_connect
- data8 sys_listen
- data8 sys_accept
- data8 sys_getsockname // 1195
- data8 sys_getpeername
- data8 sys_socketpair
- data8 sys_send
- data8 sys_sendto
- data8 sys_recv // 1200
- data8 sys_recvfrom
- data8 sys_shutdown
- data8 sys_setsockopt
- data8 sys_getsockopt
- data8 sys_sendmsg // 1205
- data8 sys_recvmsg
- data8 sys_pivot_root
- data8 sys_mincore
- data8 sys_madvise
- data8 sys_newstat // 1210
- data8 sys_newlstat
- data8 sys_newfstat
- data8 sys_clone2
- data8 sys_getdents64
- data8 sys_getunwind // 1215
- data8 sys_readahead
- data8 sys_setxattr
- data8 sys_lsetxattr
- data8 sys_fsetxattr
- data8 sys_getxattr // 1220
- data8 sys_lgetxattr
- data8 sys_fgetxattr
- data8 sys_listxattr
- data8 sys_llistxattr
- data8 sys_flistxattr // 1225
- data8 sys_removexattr
- data8 sys_lremovexattr
- data8 sys_fremovexattr
- data8 sys_tkill
- data8 sys_futex // 1230
- data8 sys_sched_setaffinity
- data8 sys_sched_getaffinity
- data8 sys_set_tid_address
- data8 sys_fadvise64_64
- data8 sys_tgkill // 1235
- data8 sys_exit_group
- data8 sys_lookup_dcookie
- data8 sys_io_setup
- data8 sys_io_destroy
- data8 sys_io_getevents // 1240
- data8 sys_io_submit
- data8 sys_io_cancel
- data8 sys_epoll_create
- data8 sys_epoll_ctl
- data8 sys_epoll_wait // 1245
- data8 sys_restart_syscall
- data8 sys_semtimedop
- data8 sys_timer_create
- data8 sys_timer_settime
- data8 sys_timer_gettime // 1250
- data8 sys_timer_getoverrun
- data8 sys_timer_delete
- data8 sys_clock_settime
- data8 sys_clock_gettime
- data8 sys_clock_getres // 1255
- data8 sys_clock_nanosleep
- data8 sys_fstatfs64
- data8 sys_statfs64
- data8 sys_mbind
- data8 sys_get_mempolicy // 1260
- data8 sys_set_mempolicy
- data8 sys_mq_open
- data8 sys_mq_unlink
- data8 sys_mq_timedsend
- data8 sys_mq_timedreceive // 1265
- data8 sys_mq_notify
- data8 sys_mq_getsetattr
- data8 sys_ni_syscall // reserved for kexec_load
- data8 sys_ni_syscall // reserved for vserver
- data8 sys_waitid // 1270
- data8 sys_add_key
- data8 sys_request_key
- data8 sys_keyctl
- data8 sys_ioprio_set
- data8 sys_ioprio_get // 1275
- data8 sys_ni_syscall
- data8 sys_inotify_init
- data8 sys_inotify_add_watch
- data8 sys_inotify_rm_watch
-
- .org sys_call_table + 8*NR_syscalls // guard against failures to
increase NR_syscalls
-#endif
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/entry.h
--- a/xen/arch/ia64/linux-xen/entry.h Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-#include <linux/config.h>
-
-/*
- * Preserved registers that are shared between code in ivt.S and
- * entry.S. Be careful not to step on these!
- */
-#define PRED_LEAVE_SYSCALL 1 /* TRUE iff leave from syscall */
-#define PRED_KERNEL_STACK 2 /* returning to kernel-stacks? */
-#define PRED_USER_STACK 3 /* returning to user-stacks? */
-#define PRED_SYSCALL 4 /* inside a system call? */
-#define PRED_NON_SYSCALL 5 /* complement of PRED_SYSCALL */
-
-#ifdef __ASSEMBLY__
-# define PASTE2(x,y) x##y
-# define PASTE(x,y) PASTE2(x,y)
-
-# define pLvSys PASTE(p,PRED_LEAVE_SYSCALL)
-# define pKStk PASTE(p,PRED_KERNEL_STACK)
-# define pUStk PASTE(p,PRED_USER_STACK)
-# define pSys PASTE(p,PRED_SYSCALL)
-# define pNonSys PASTE(p,PRED_NON_SYSCALL)
-#endif
-
-#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
-#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
-#ifdef XEN
-#define VPD(f) (VPD_##f##_START_OFFSET)
-#endif
-
-#define PT_REGS_SAVES(off) \
- .unwabi 3, 'i'; \
- .fframe IA64_PT_REGS_SIZE+16+(off); \
- .spillsp rp, PT(CR_IIP)+16+(off); \
- .spillsp ar.pfs, PT(CR_IFS)+16+(off); \
- .spillsp ar.unat, PT(AR_UNAT)+16+(off); \
- .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \
- .spillsp pr, PT(PR)+16+(off);
-
-#define PT_REGS_UNWIND_INFO(off) \
- .prologue; \
- PT_REGS_SAVES(off); \
- .body
-
-#define SWITCH_STACK_SAVES(off)
\
- .savesp ar.unat,SW(CALLER_UNAT)+16+(off);
\
- .savesp ar.fpsr,SW(AR_FPSR)+16+(off);
\
- .spillsp f2,SW(F2)+16+(off); .spillsp f3,SW(F3)+16+(off);
\
- .spillsp f4,SW(F4)+16+(off); .spillsp f5,SW(F5)+16+(off);
\
- .spillsp f16,SW(F16)+16+(off); .spillsp f17,SW(F17)+16+(off);
\
- .spillsp f18,SW(F18)+16+(off); .spillsp f19,SW(F19)+16+(off);
\
- .spillsp f20,SW(F20)+16+(off); .spillsp f21,SW(F21)+16+(off);
\
- .spillsp f22,SW(F22)+16+(off); .spillsp f23,SW(F23)+16+(off);
\
- .spillsp f24,SW(F24)+16+(off); .spillsp f25,SW(F25)+16+(off);
\
- .spillsp f26,SW(F26)+16+(off); .spillsp f27,SW(F27)+16+(off);
\
- .spillsp f28,SW(F28)+16+(off); .spillsp f29,SW(F29)+16+(off);
\
- .spillsp f30,SW(F30)+16+(off); .spillsp f31,SW(F31)+16+(off);
\
- .spillsp r4,SW(R4)+16+(off); .spillsp r5,SW(R5)+16+(off);
\
- .spillsp r6,SW(R6)+16+(off); .spillsp r7,SW(R7)+16+(off);
\
- .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off);
\
- .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off);
\
- .spillsp b4,SW(B4)+16+(off); .spillsp b5,SW(B5)+16+(off);
\
- .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off);
\
- .spillsp @priunat,SW(AR_UNAT)+16+(off);
\
- .spillsp ar.rnat,SW(AR_RNAT)+16+(off);
\
- .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);
\
- .spillsp pr,SW(PR)+16+(off)
-
-#define DO_SAVE_SWITCH_STACK \
- movl r28=1f; \
- ;; \
- .fframe IA64_SWITCH_STACK_SIZE; \
- adds sp=-IA64_SWITCH_STACK_SIZE,sp; \
- mov.ret.sptk b7=r28,1f; \
- SWITCH_STACK_SAVES(0); \
- br.cond.sptk.many save_switch_stack; \
-1:
-
-#define DO_LOAD_SWITCH_STACK \
- movl r28=1f; \
- ;; \
- invala; \
- mov.ret.sptk b7=r28,1f; \
- br.cond.sptk.many load_switch_stack; \
-1: .restore sp; \
- adds sp=IA64_SWITCH_STACK_SIZE,sp
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/head.S
--- a/xen/arch/ia64/linux-xen/head.S Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1298 +0,0 @@
-/*
- * Here is where the ball gets rolling as far as the kernel is concerned.
- * When control is transferred to _start, the bootload has already
- * loaded us to the correct address. All that's left to do here is
- * to set up the kernel's global pointer and jump to the kernel
- * entry point.
- *
- * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Stephane Eranian <eranian@xxxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@xxxxxxxxx>
- * Copyright (C) 1999 Don Dugger <Don.Dugger@xxxxxxxxx>
- * Copyright (C) 2002 Fenghua Yu <fenghua.yu@xxxxxxxxx>
- * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
- * Copyright (C) 2004 Ashok Raj <ashok.raj@xxxxxxxxx>
- * Support for CPU Hotplug
- */
-
-#include <linux/config.h>
-
-#include <asm/asmmacro.h>
-#include <asm/fpu.h>
-#include <asm/kregs.h>
-#include <asm/mmu_context.h>
-#include <asm/offsets.h>
-#include <asm/pal.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/mca_asm.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define SAL_PSR_BITS_TO_SET \
- (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
-
-#define SAVE_FROM_REG(src, ptr, dest) \
- mov dest=src;; \
- st8 [ptr]=dest,0x08
-
-#define RESTORE_REG(reg, ptr, _tmp) \
- ld8 _tmp=[ptr],0x08;; \
- mov reg=_tmp
-
-#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
- mov ar.lc=IA64_NUM_DBG_REGS-1;; \
- mov _idx=0;;
\
-1:
\
- SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \
- add _idx=1,_idx;;
\
- br.cloop.sptk.many 1b
-
-#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
- mov ar.lc=IA64_NUM_DBG_REGS-1;; \
- mov _idx=0;; \
-_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \
- add _idx=1, _idx;; \
- br.cloop.sptk.many _lbl
-
-#define SAVE_ONE_RR(num, _reg, _tmp) \
- movl _tmp=(num<<61);; \
- mov _reg=rr[_tmp]
-
-#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
- SAVE_ONE_RR(0,_r0, _tmp);; \
- SAVE_ONE_RR(1,_r1, _tmp);; \
- SAVE_ONE_RR(2,_r2, _tmp);; \
- SAVE_ONE_RR(3,_r3, _tmp);; \
- SAVE_ONE_RR(4,_r4, _tmp);; \
- SAVE_ONE_RR(5,_r5, _tmp);; \
- SAVE_ONE_RR(6,_r6, _tmp);; \
- SAVE_ONE_RR(7,_r7, _tmp);;
-
-#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
- st8 [ptr]=_r0, 8;; \
- st8 [ptr]=_r1, 8;; \
- st8 [ptr]=_r2, 8;; \
- st8 [ptr]=_r3, 8;; \
- st8 [ptr]=_r4, 8;; \
- st8 [ptr]=_r5, 8;; \
- st8 [ptr]=_r6, 8;; \
- st8 [ptr]=_r7, 8;;
-
-#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
- mov ar.lc=0x08-1;;
\
- movl _idx1=0x00;; \
-RestRR:
\
- dep.z _idx2=_idx1,61,3;; \
- ld8 _tmp=[ptr],8;;
\
- mov rr[_idx2]=_tmp;;
\
- srlz.d;;
\
- add _idx1=1,_idx1;;
\
- br.cloop.sptk.few RestRR
-
-#define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \
- movl reg1=sal_state_for_booting_cpu;; \
- ld8 reg2=[reg1];;
-
-/*
- * Adjust region registers saved before starting to save
- * break regs and rest of the states that need to be preserved.
- */
-#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \
- SAVE_FROM_REG(b0,_reg1,_reg2);;
\
- SAVE_FROM_REG(b1,_reg1,_reg2);;
\
- SAVE_FROM_REG(b2,_reg1,_reg2);;
\
- SAVE_FROM_REG(b3,_reg1,_reg2);;
\
- SAVE_FROM_REG(b4,_reg1,_reg2);;
\
- SAVE_FROM_REG(b5,_reg1,_reg2);;
\
- st8 [_reg1]=r1,0x08;;
\
- st8 [_reg1]=r12,0x08;;
\
- st8 [_reg1]=r13,0x08;;
\
- SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.pfs,_reg1,_reg2);;
\
- SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \
- SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.dcr,_reg1,_reg2);;
\
- SAVE_FROM_REG(cr.iva,_reg1,_reg2);;
\
- SAVE_FROM_REG(cr.pta,_reg1,_reg2);;
\
- SAVE_FROM_REG(cr.itv,_reg1,_reg2);;
\
- SAVE_FROM_REG(cr.pmv,_reg1,_reg2);;
\
- SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \
- SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \
- st8 [_reg1]=r4,0x08;;
\
- st8 [_reg1]=r5,0x08;;
\
- st8 [_reg1]=r6,0x08;;
\
- st8 [_reg1]=r7,0x08;;
\
- st8 [_reg1]=_pred,0x08;;
\
- SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \
- stf.spill.nta [_reg1]=f2,16;;
\
- stf.spill.nta [_reg1]=f3,16;;
\
- stf.spill.nta [_reg1]=f4,16;;
\
- stf.spill.nta [_reg1]=f5,16;;
\
- stf.spill.nta [_reg1]=f16,16;;
\
- stf.spill.nta [_reg1]=f17,16;;
\
- stf.spill.nta [_reg1]=f18,16;;
\
- stf.spill.nta [_reg1]=f19,16;;
\
- stf.spill.nta [_reg1]=f20,16;;
\
- stf.spill.nta [_reg1]=f21,16;;
\
- stf.spill.nta [_reg1]=f22,16;;
\
- stf.spill.nta [_reg1]=f23,16;;
\
- stf.spill.nta [_reg1]=f24,16;;
\
- stf.spill.nta [_reg1]=f25,16;;
\
- stf.spill.nta [_reg1]=f26,16;;
\
- stf.spill.nta [_reg1]=f27,16;;
\
- stf.spill.nta [_reg1]=f28,16;;
\
- stf.spill.nta [_reg1]=f29,16;;
\
- stf.spill.nta [_reg1]=f30,16;;
\
- stf.spill.nta [_reg1]=f31,16;;
-
-#else
-#define SET_AREA_FOR_BOOTING_CPU(a1, a2)
-#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3)
-#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
-#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
-#endif
-
-#ifdef XEN
-#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
- movl _tmp1=(num << 61);; \
- movl _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize
<< 2) | vhpt);; \
- mov rr[_tmp1]=_tmp2
-#else
-#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
- movl _tmp1=(num << 61);; \
- mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize
<< 2) | vhpt);; \
- mov rr[_tmp1]=_tmp2
-#endif
-
- .section __special_page_section,"ax"
-
- .global empty_zero_page
-empty_zero_page:
- .skip PAGE_SIZE
-
-#ifndef XEN
- .global swapper_pg_dir
-swapper_pg_dir:
- .skip PAGE_SIZE
-#endif
-
-#if defined(XEN) && defined(CONFIG_VIRTUAL_FRAME_TABLE)
- .global frametable_pg_dir
-frametable_pg_dir:
- .skip PAGE_SIZE
-#endif
-
- .rodata
-halt_msg:
- stringz "Halting kernel\n"
-
- .text
-
- .global start_ap
-
- /*
- * Start the kernel. When the bootloader passes control to _start(),
r28
- * points to the address of the boot parameter area. Execution reaches
- * here in physical mode.
- */
-GLOBAL_ENTRY(_start)
-start_ap:
- .prologue
- .save rp, r0 // terminate unwind chain with a NULL rp
- .body
-
- rsm psr.i | psr.ic
- ;;
- srlz.i
- ;;
- /*
- * Save the region registers, predicate before they get clobbered
- */
- SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
- mov r25=pr;;
-
- /*
- * Initialize kernel region registers:
- * rr[0]: VHPT enabled, page size = PAGE_SHIFT
- * rr[1]: VHPT enabled, page size = PAGE_SHIFT
- * rr[2]: VHPT enabled, page size = PAGE_SHIFT
- * rr[3]: VHPT enabled, page size = PAGE_SHIFT
- * rr[4]: VHPT enabled, page size = PAGE_SHIFT
- * rr[5]: VHPT enabled, page size = PAGE_SHIFT
- * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
- * rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
- * We initialize all of them to prevent inadvertently assuming
- * something about the state of address translation early in boot.
- */
- SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
- SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
- SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
- /*
- * Now pin mappings into the TLB for kernel text and data
- */
- mov r18=KERNEL_TR_PAGE_SHIFT<<2
- movl r17=KERNEL_START
- ;;
- mov cr.itir=r18
- mov cr.ifa=r17
- mov r16=IA64_TR_KERNEL
- mov r3=ip
- movl r18=PAGE_KERNEL
- ;;
- dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
- ;;
- or r18=r2,r18
- ;;
- srlz.i
- ;;
- itr.i itr[r16]=r18
- ;;
- itr.d dtr[r16]=r18
- ;;
- srlz.i
-
- /*
- * Switch into virtual mode:
- */
-#ifdef XEN
- movl
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
- |IA64_PSR_DI|IA64_PSR_AC)
-#else
- movl
r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
- |IA64_PSR_DI)
-#endif
- ;;
- mov cr.ipsr=r16
- movl r17=1f
- ;;
- mov cr.iip=r17
- mov cr.ifs=r0
- ;;
- rfi
- ;;
-1: // now we are in virtual mode
-
- SET_AREA_FOR_BOOTING_CPU(r2, r16);
-
- STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
- SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
- ;;
-
- // set IVT entry point---can't access I/O ports without it
- movl r3=ia64_ivt
- ;;
- mov cr.iva=r3
- movl r2=FPSR_DEFAULT
- ;;
- srlz.i
- movl gp=__gp
- ;;
- mov ar.fpsr=r2
- ;;
-
-#define isAP p2 // are we an Application Processor?
-#define isBP p3 // are we the Bootstrap Processor?
-
-#ifdef XEN
-# define init_task init_task_mem
-#endif
-
-#ifdef CONFIG_SMP
- /*
- * Find the init_task for the currently booting CPU. At poweron, and in
- * UP mode, task_for_booting_cpu is NULL.
- */
- movl r3=task_for_booting_cpu
- ;;
- ld8 r3=[r3]
- movl r2=init_task
- ;;
- cmp.eq isBP,isAP=r3,r0
- ;;
-(isAP) mov r2=r3
-#else
- movl r2=init_task
- cmp.eq isBP,isAP=r0,r0
-#endif
- ;;
- tpa r3=r2 // r3 == phys addr of task struct
- mov r16=-1
-#ifndef XEN
-(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to
map it
-#endif
-
- // load mapping for stack (virtaddr in r2, physaddr in r3)
- rsm psr.ic
- movl r17=PAGE_KERNEL
- ;;
- srlz.d
- dep r18=0,r3,0,12
- ;;
- or r18=r17,r18
-#ifdef XEN
- dep r2=-1,r3,60,4 // IMVA of task
-#else
- dep r2=-1,r3,61,3 // IMVA of task
-#endif
- ;;
- mov r17=rr[r2]
- shr.u r16=r3,IA64_GRANULE_SHIFT
- ;;
- dep r17=0,r17,8,24
- ;;
- mov cr.itir=r17
- mov cr.ifa=r2
-
- mov r19=IA64_TR_CURRENT_STACK
- ;;
- itr.d dtr[r19]=r18
- ;;
- ssm psr.ic
- srlz.d
- ;;
-
-.load_current:
- // load the "current" pointer (r13) and ar.k6 with the current task
- mov IA64_KR(CURRENT)=r2 // virtual address
- mov IA64_KR(CURRENT_STACK)=r16
- mov r13=r2
- /*
- * Reserve space at the top of the stack for "struct pt_regs". Kernel
- * threads don't store interesting values in that structure, but the
space
- * still needs to be there because time-critical stuff such as the
context
- * switching can be implemented more efficiently (for example,
__switch_to()
- * always sets the psr.dfh bit of the task it is switching to).
- */
-
- addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
- addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE
- mov ar.rsc=0 // place RSE in enforced lazy mode
- ;;
- loadrs // clear the dirty partition
-#ifdef XEN
-(isAP) br.few 2f
- movl r19=__phys_per_cpu_start
- mov r18=PERCPU_PAGE_SIZE
-#ifndef CONFIG_SMP
- add r19=r19,r18
- ;;
-#else
- movl r20=__cpu0_per_cpu
- ;;
- shr.u r18=r18,3
-1:
- ld8 r21=[r19],8 ;;
- st8[r20]=r21,8
- adds r18=-1,r18
- ;;
- cmp4.lt p7,p6=0,r18
-(p7) br.cond.dptk.few 1b
- ;;
-#endif
- movl r18=__per_cpu_offset
- movl r19=__cpu0_per_cpu
- movl r20=__per_cpu_start
- ;;
- sub r20=r19,r20
- ;;
- st8 [r18]=r20
-2:
-#endif
- ;;
- mov ar.bspstore=r2 // establish the new RSE stack
- ;;
- mov ar.rsc=0x3 // place RSE in eager mode
-
-#ifdef XEN
-(isBP) dep r28=-1,r28,60,4 // make address virtual
-#else
-(isBP) dep r28=-1,r28,61,3 // make address virtual
-#endif
-(isBP) movl r2=ia64_boot_param
- ;;
-(isBP) st8 [r2]=r28 // save the address of the boot param area
passed by the bootloader
-
-#ifdef CONFIG_SMP
-(isAP) br.call.sptk.many rp=start_secondary
-.ret0:
-(isAP) br.cond.sptk self
-#endif
-
- // This is executed by the bootstrap processor (bsp) only:
-
-#ifdef CONFIG_IA64_FW_EMU
- // initialize PAL & SAL emulator:
- br.call.sptk.many rp=sys_fw_init
-.ret1:
-#endif
- br.call.sptk.many rp=start_kernel
-.ret2: addl r3=@ltoff(halt_msg),gp
- ;;
- alloc r2=ar.pfs,8,0,2,0
- ;;
- ld8 out0=[r3]
- br.call.sptk.many b0=console_print
-
-self: hint @pause
-#ifdef XEN
- ;;
- br.sptk.many self // endless loop
- ;;
-#else
- br.sptk.many self // endless loop
-#endif
-END(_start)
-
-GLOBAL_ENTRY(ia64_save_debug_regs)
- alloc r16=ar.pfs,1,0,0,0
- mov r20=ar.lc // preserve ar.lc
- mov ar.lc=IA64_NUM_DBG_REGS-1
- mov r18=0
- add r19=IA64_NUM_DBG_REGS*8,in0
- ;;
-1: mov r16=dbr[r18]
-#ifdef CONFIG_ITANIUM
- ;;
- srlz.d
-#endif
- mov r17=ibr[r18]
- add r18=1,r18
- ;;
- st8.nta [in0]=r16,8
- st8.nta [r19]=r17,8
- br.cloop.sptk.many 1b
- ;;
- mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.many rp
-END(ia64_save_debug_regs)
-
-GLOBAL_ENTRY(ia64_load_debug_regs)
- alloc r16=ar.pfs,1,0,0,0
- lfetch.nta [in0]
- mov r20=ar.lc // preserve ar.lc
- add r19=IA64_NUM_DBG_REGS*8,in0
- mov ar.lc=IA64_NUM_DBG_REGS-1
- mov r18=-1
- ;;
-1: ld8.nta r16=[in0],8
- ld8.nta r17=[r19],8
- add r18=1,r18
- ;;
- mov dbr[r18]=r16
-#ifdef CONFIG_ITANIUM
- ;;
- srlz.d // Errata 132 (NoFix status)
-#endif
- mov ibr[r18]=r17
- br.cloop.sptk.many 1b
- ;;
- mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.many rp
-END(ia64_load_debug_regs)
-
-GLOBAL_ENTRY(__ia64_save_fpu)
- alloc r2=ar.pfs,1,4,0,0
- adds loc0=96*16-16,in0
- adds loc1=96*16-16-128,in0
- ;;
- stf.spill.nta [loc0]=f127,-256
- stf.spill.nta [loc1]=f119,-256
- ;;
- stf.spill.nta [loc0]=f111,-256
- stf.spill.nta [loc1]=f103,-256
- ;;
- stf.spill.nta [loc0]=f95,-256
- stf.spill.nta [loc1]=f87,-256
- ;;
- stf.spill.nta [loc0]=f79,-256
- stf.spill.nta [loc1]=f71,-256
- ;;
- stf.spill.nta [loc0]=f63,-256
- stf.spill.nta [loc1]=f55,-256
- adds loc2=96*16-32,in0
- ;;
- stf.spill.nta [loc0]=f47,-256
- stf.spill.nta [loc1]=f39,-256
- adds loc3=96*16-32-128,in0
- ;;
- stf.spill.nta [loc2]=f126,-256
- stf.spill.nta [loc3]=f118,-256
- ;;
- stf.spill.nta [loc2]=f110,-256
- stf.spill.nta [loc3]=f102,-256
- ;;
- stf.spill.nta [loc2]=f94,-256
- stf.spill.nta [loc3]=f86,-256
- ;;
- stf.spill.nta [loc2]=f78,-256
- stf.spill.nta [loc3]=f70,-256
- ;;
- stf.spill.nta [loc2]=f62,-256
- stf.spill.nta [loc3]=f54,-256
- adds loc0=96*16-48,in0
- ;;
- stf.spill.nta [loc2]=f46,-256
- stf.spill.nta [loc3]=f38,-256
- adds loc1=96*16-48-128,in0
- ;;
- stf.spill.nta [loc0]=f125,-256
- stf.spill.nta [loc1]=f117,-256
- ;;
- stf.spill.nta [loc0]=f109,-256
- stf.spill.nta [loc1]=f101,-256
- ;;
- stf.spill.nta [loc0]=f93,-256
- stf.spill.nta [loc1]=f85,-256
- ;;
- stf.spill.nta [loc0]=f77,-256
- stf.spill.nta [loc1]=f69,-256
- ;;
- stf.spill.nta [loc0]=f61,-256
- stf.spill.nta [loc1]=f53,-256
- adds loc2=96*16-64,in0
- ;;
- stf.spill.nta [loc0]=f45,-256
- stf.spill.nta [loc1]=f37,-256
- adds loc3=96*16-64-128,in0
- ;;
- stf.spill.nta [loc2]=f124,-256
- stf.spill.nta [loc3]=f116,-256
- ;;
- stf.spill.nta [loc2]=f108,-256
- stf.spill.nta [loc3]=f100,-256
- ;;
- stf.spill.nta [loc2]=f92,-256
- stf.spill.nta [loc3]=f84,-256
- ;;
- stf.spill.nta [loc2]=f76,-256
- stf.spill.nta [loc3]=f68,-256
- ;;
- stf.spill.nta [loc2]=f60,-256
- stf.spill.nta [loc3]=f52,-256
- adds loc0=96*16-80,in0
- ;;
- stf.spill.nta [loc2]=f44,-256
- stf.spill.nta [loc3]=f36,-256
- adds loc1=96*16-80-128,in0
- ;;
- stf.spill.nta [loc0]=f123,-256
- stf.spill.nta [loc1]=f115,-256
- ;;
- stf.spill.nta [loc0]=f107,-256
- stf.spill.nta [loc1]=f99,-256
- ;;
- stf.spill.nta [loc0]=f91,-256
- stf.spill.nta [loc1]=f83,-256
- ;;
- stf.spill.nta [loc0]=f75,-256
- stf.spill.nta [loc1]=f67,-256
- ;;
- stf.spill.nta [loc0]=f59,-256
- stf.spill.nta [loc1]=f51,-256
- adds loc2=96*16-96,in0
- ;;
- stf.spill.nta [loc0]=f43,-256
- stf.spill.nta [loc1]=f35,-256
- adds loc3=96*16-96-128,in0
- ;;
- stf.spill.nta [loc2]=f122,-256
- stf.spill.nta [loc3]=f114,-256
- ;;
- stf.spill.nta [loc2]=f106,-256
- stf.spill.nta [loc3]=f98,-256
- ;;
- stf.spill.nta [loc2]=f90,-256
- stf.spill.nta [loc3]=f82,-256
- ;;
- stf.spill.nta [loc2]=f74,-256
- stf.spill.nta [loc3]=f66,-256
- ;;
- stf.spill.nta [loc2]=f58,-256
- stf.spill.nta [loc3]=f50,-256
- adds loc0=96*16-112,in0
- ;;
- stf.spill.nta [loc2]=f42,-256
- stf.spill.nta [loc3]=f34,-256
- adds loc1=96*16-112-128,in0
- ;;
- stf.spill.nta [loc0]=f121,-256
- stf.spill.nta [loc1]=f113,-256
- ;;
- stf.spill.nta [loc0]=f105,-256
- stf.spill.nta [loc1]=f97,-256
- ;;
- stf.spill.nta [loc0]=f89,-256
- stf.spill.nta [loc1]=f81,-256
- ;;
- stf.spill.nta [loc0]=f73,-256
- stf.spill.nta [loc1]=f65,-256
- ;;
- stf.spill.nta [loc0]=f57,-256
- stf.spill.nta [loc1]=f49,-256
- adds loc2=96*16-128,in0
- ;;
- stf.spill.nta [loc0]=f41,-256
- stf.spill.nta [loc1]=f33,-256
- adds loc3=96*16-128-128,in0
- ;;
- stf.spill.nta [loc2]=f120,-256
- stf.spill.nta [loc3]=f112,-256
- ;;
- stf.spill.nta [loc2]=f104,-256
- stf.spill.nta [loc3]=f96,-256
- ;;
- stf.spill.nta [loc2]=f88,-256
- stf.spill.nta [loc3]=f80,-256
- ;;
- stf.spill.nta [loc2]=f72,-256
- stf.spill.nta [loc3]=f64,-256
- ;;
- stf.spill.nta [loc2]=f56,-256
- stf.spill.nta [loc3]=f48,-256
- ;;
- stf.spill.nta [loc2]=f40
- stf.spill.nta [loc3]=f32
- br.ret.sptk.many rp
-END(__ia64_save_fpu)
-
-GLOBAL_ENTRY(__ia64_load_fpu)
- alloc r2=ar.pfs,1,2,0,0
- adds r3=128,in0
- adds r14=256,in0
- adds r15=384,in0
- mov loc0=512
- mov loc1=-1024+16
- ;;
- ldf.fill.nta f32=[in0],loc0
- ldf.fill.nta f40=[ r3],loc0
- ldf.fill.nta f48=[r14],loc0
- ldf.fill.nta f56=[r15],loc0
- ;;
- ldf.fill.nta f64=[in0],loc0
- ldf.fill.nta f72=[ r3],loc0
- ldf.fill.nta f80=[r14],loc0
- ldf.fill.nta f88=[r15],loc0
- ;;
- ldf.fill.nta f96=[in0],loc1
- ldf.fill.nta f104=[ r3],loc1
- ldf.fill.nta f112=[r14],loc1
- ldf.fill.nta f120=[r15],loc1
- ;;
- ldf.fill.nta f33=[in0],loc0
- ldf.fill.nta f41=[ r3],loc0
- ldf.fill.nta f49=[r14],loc0
- ldf.fill.nta f57=[r15],loc0
- ;;
- ldf.fill.nta f65=[in0],loc0
- ldf.fill.nta f73=[ r3],loc0
- ldf.fill.nta f81=[r14],loc0
- ldf.fill.nta f89=[r15],loc0
- ;;
- ldf.fill.nta f97=[in0],loc1
- ldf.fill.nta f105=[ r3],loc1
- ldf.fill.nta f113=[r14],loc1
- ldf.fill.nta f121=[r15],loc1
- ;;
- ldf.fill.nta f34=[in0],loc0
- ldf.fill.nta f42=[ r3],loc0
- ldf.fill.nta f50=[r14],loc0
- ldf.fill.nta f58=[r15],loc0
- ;;
- ldf.fill.nta f66=[in0],loc0
- ldf.fill.nta f74=[ r3],loc0
- ldf.fill.nta f82=[r14],loc0
- ldf.fill.nta f90=[r15],loc0
- ;;
- ldf.fill.nta f98=[in0],loc1
- ldf.fill.nta f106=[ r3],loc1
- ldf.fill.nta f114=[r14],loc1
- ldf.fill.nta f122=[r15],loc1
- ;;
- ldf.fill.nta f35=[in0],loc0
- ldf.fill.nta f43=[ r3],loc0
- ldf.fill.nta f51=[r14],loc0
- ldf.fill.nta f59=[r15],loc0
- ;;
- ldf.fill.nta f67=[in0],loc0
- ldf.fill.nta f75=[ r3],loc0
- ldf.fill.nta f83=[r14],loc0
- ldf.fill.nta f91=[r15],loc0
- ;;
- ldf.fill.nta f99=[in0],loc1
- ldf.fill.nta f107=[ r3],loc1
- ldf.fill.nta f115=[r14],loc1
- ldf.fill.nta f123=[r15],loc1
- ;;
- ldf.fill.nta f36=[in0],loc0
- ldf.fill.nta f44=[ r3],loc0
- ldf.fill.nta f52=[r14],loc0
- ldf.fill.nta f60=[r15],loc0
- ;;
- ldf.fill.nta f68=[in0],loc0
- ldf.fill.nta f76=[ r3],loc0
- ldf.fill.nta f84=[r14],loc0
- ldf.fill.nta f92=[r15],loc0
- ;;
- ldf.fill.nta f100=[in0],loc1
- ldf.fill.nta f108=[ r3],loc1
- ldf.fill.nta f116=[r14],loc1
- ldf.fill.nta f124=[r15],loc1
- ;;
- ldf.fill.nta f37=[in0],loc0
- ldf.fill.nta f45=[ r3],loc0
- ldf.fill.nta f53=[r14],loc0
- ldf.fill.nta f61=[r15],loc0
- ;;
- ldf.fill.nta f69=[in0],loc0
- ldf.fill.nta f77=[ r3],loc0
- ldf.fill.nta f85=[r14],loc0
- ldf.fill.nta f93=[r15],loc0
- ;;
- ldf.fill.nta f101=[in0],loc1
- ldf.fill.nta f109=[ r3],loc1
- ldf.fill.nta f117=[r14],loc1
- ldf.fill.nta f125=[r15],loc1
- ;;
- ldf.fill.nta f38 =[in0],loc0
- ldf.fill.nta f46 =[ r3],loc0
- ldf.fill.nta f54 =[r14],loc0
- ldf.fill.nta f62 =[r15],loc0
- ;;
- ldf.fill.nta f70 =[in0],loc0
- ldf.fill.nta f78 =[ r3],loc0
- ldf.fill.nta f86 =[r14],loc0
- ldf.fill.nta f94 =[r15],loc0
- ;;
- ldf.fill.nta f102=[in0],loc1
- ldf.fill.nta f110=[ r3],loc1
- ldf.fill.nta f118=[r14],loc1
- ldf.fill.nta f126=[r15],loc1
- ;;
- ldf.fill.nta f39 =[in0],loc0
- ldf.fill.nta f47 =[ r3],loc0
- ldf.fill.nta f55 =[r14],loc0
- ldf.fill.nta f63 =[r15],loc0
- ;;
- ldf.fill.nta f71 =[in0],loc0
- ldf.fill.nta f79 =[ r3],loc0
- ldf.fill.nta f87 =[r14],loc0
- ldf.fill.nta f95 =[r15],loc0
- ;;
- ldf.fill.nta f103=[in0]
- ldf.fill.nta f111=[ r3]
- ldf.fill.nta f119=[r14]
- ldf.fill.nta f127=[r15]
- br.ret.sptk.many rp
-END(__ia64_load_fpu)
-
-GLOBAL_ENTRY(__ia64_init_fpu)
- stf.spill [sp]=f0 // M3
- mov f32=f0 // F
- nop.b 0
-
- ldfps f33,f34=[sp] // M0
- ldfps f35,f36=[sp] // M1
- mov f37=f0 // F
- ;;
-
- setf.s f38=r0 // M2
- setf.s f39=r0 // M3
- mov f40=f0 // F
-
- ldfps f41,f42=[sp] // M0
- ldfps f43,f44=[sp] // M1
- mov f45=f0 // F
-
- setf.s f46=r0 // M2
- setf.s f47=r0 // M3
- mov f48=f0 // F
-
- ldfps f49,f50=[sp] // M0
- ldfps f51,f52=[sp] // M1
- mov f53=f0 // F
-
- setf.s f54=r0 // M2
- setf.s f55=r0 // M3
- mov f56=f0 // F
-
- ldfps f57,f58=[sp] // M0
- ldfps f59,f60=[sp] // M1
- mov f61=f0 // F
-
- setf.s f62=r0 // M2
- setf.s f63=r0 // M3
- mov f64=f0 // F
-
- ldfps f65,f66=[sp] // M0
- ldfps f67,f68=[sp] // M1
- mov f69=f0 // F
-
- setf.s f70=r0 // M2
- setf.s f71=r0 // M3
- mov f72=f0 // F
-
- ldfps f73,f74=[sp] // M0
- ldfps f75,f76=[sp] // M1
- mov f77=f0 // F
-
- setf.s f78=r0 // M2
- setf.s f79=r0 // M3
- mov f80=f0 // F
-
- ldfps f81,f82=[sp] // M0
- ldfps f83,f84=[sp] // M1
- mov f85=f0 // F
-
- setf.s f86=r0 // M2
- setf.s f87=r0 // M3
- mov f88=f0 // F
-
- /*
- * When the instructions are cached, it would be faster to initialize
- * the remaining registers with simply mov instructions (F-unit).
- * This gets the time down to ~29 cycles. However, this would use up
- * 33 bundles, whereas continuing with the above pattern yields
- * 10 bundles and ~30 cycles.
- */
-
- ldfps f89,f90=[sp] // M0
- ldfps f91,f92=[sp] // M1
- mov f93=f0 // F
-
- setf.s f94=r0 // M2
- setf.s f95=r0 // M3
- mov f96=f0 // F
-
- ldfps f97,f98=[sp] // M0
- ldfps f99,f100=[sp] // M1
- mov f101=f0 // F
-
- setf.s f102=r0 // M2
- setf.s f103=r0 // M3
- mov f104=f0 // F
-
- ldfps f105,f106=[sp] // M0
- ldfps f107,f108=[sp] // M1
- mov f109=f0 // F
-
- setf.s f110=r0 // M2
- setf.s f111=r0 // M3
- mov f112=f0 // F
-
- ldfps f113,f114=[sp] // M0
- ldfps f115,f116=[sp] // M1
- mov f117=f0 // F
-
- setf.s f118=r0 // M2
- setf.s f119=r0 // M3
- mov f120=f0 // F
-
- ldfps f121,f122=[sp] // M0
- ldfps f123,f124=[sp] // M1
- mov f125=f0 // F
-
- setf.s f126=r0 // M2
- setf.s f127=r0 // M3
- br.ret.sptk.many rp // F
-END(__ia64_init_fpu)
-
-/*
- * Switch execution mode from virtual to physical
- *
- * Inputs:
- * r16 = new psr to establish
- * Output:
- * r19 = old virtual address of ar.bsp
- * r20 = old virtual address of sp
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_phys)
- {
- alloc r2=ar.pfs,0,0,0,0
- rsm psr.i | psr.ic // disable interrupts and interrupt
collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode_phys,r15
-
- mov r19=ar.bsp
- mov r20=sp
- mov r14=rp // get return address into a general
register
- ;;
-
- // going to physical mode, use tpa to translate virt->phys
- tpa r17=r19
- tpa r3=r3
- tpa sp=sp
- tpa r14=r14
- ;;
-
- mov r18=ar.rnat // save ar.rnat
- mov ar.bspstore=r17 // this steps on ar.rnat
- mov cr.iip=r3
- mov cr.ifs=r0
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.many rp
-END(ia64_switch_mode_phys)
-
-/*
- * Switch execution mode from physical to virtual
- *
- * Inputs:
- * r16 = new psr to establish
- * r19 = new bspstore to establish
- * r20 = new sp to establish
- *
- * Note: RSE must already be in enforced lazy mode
- */
-GLOBAL_ENTRY(ia64_switch_mode_virt)
- {
- alloc r2=ar.pfs,0,0,0,0
- rsm psr.i | psr.ic // disable interrupts and interrupt
collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode_virt,r15
-
- mov r14=rp // get return address into a general
register
- ;;
-
- // going to virtual
- // - for code addresses, set upper bits of addr to KERNEL_START
- // - for stack addresses, copy from input argument
- movl r18=KERNEL_START
- dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
- dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
- mov sp=r20
- ;;
- or r3=r3,r18
- or r14=r14,r18
- ;;
-
- mov r18=ar.rnat // save ar.rnat
- mov ar.bspstore=r19 // this steps on ar.rnat
- mov cr.iip=r3
- mov cr.ifs=r0
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.many rp
-END(ia64_switch_mode_virt)
-
-GLOBAL_ENTRY(ia64_delay_loop)
- .prologue
-{ nop 0 // work around GAS unwind info generation bug...
- .save ar.lc,r2
- mov r2=ar.lc
- .body
- ;;
- mov ar.lc=r32
-}
- ;;
- // force loop to be 32-byte aligned (GAS bug means we cannot use .align
- // inside function body without corrupting unwind info).
-{ nop 0 }
-1: br.cloop.sptk.few 1b
- ;;
- mov ar.lc=r2
- br.ret.sptk.many rp
-END(ia64_delay_loop)
-
-#ifndef XEN
-/*
- * Return a CPU-local timestamp in nano-seconds. This timestamp is
- * NOT synchronized across CPUs its return value must never be
- * compared against the values returned on another CPU. The usage in
- * kernel/sched.c ensures that.
- *
- * The return-value of sched_clock() is NOT supposed to wrap-around.
- * If it did, it would cause some scheduling hiccups (at the worst).
- * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even
- * that would happen only once every 5+ years.
- *
- * The code below basically calculates:
- *
- * (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT
- *
- * except that the multiplication and the shift are done with 128-bit
- * intermediate precision so that we can produce a full 64-bit result.
- */
-GLOBAL_ENTRY(sched_clock)
-#ifdef XEN
- movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
-#else
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
-#endif
- mov.m r9=ar.itc // fetch cycle-counter
(35 cyc)
- ;;
- ldf8 f8=[r8]
- ;;
- setf.sig f9=r9 // certain to stall, so issue it _after_ ldf8...
- ;;
- xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product
(4 cyc)
- xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product
- ;;
- getf.sig r8=f10 //
(5 cyc)
- getf.sig r9=f11
- ;;
- shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
- br.ret.sptk.many rp
-END(sched_clock)
-
-GLOBAL_ENTRY(start_kernel_thread)
- .prologue
- .save rp, r0 // this is the end of the
call-chain
- .body
- alloc r2 = ar.pfs, 0, 0, 2, 0
- mov out0 = r9
- mov out1 = r11;;
- br.call.sptk.many rp = kernel_thread_helper;;
- mov out0 = r8
- br.call.sptk.many rp = sys_exit;;
-1: br.sptk.few 1b // not reached
-END(start_kernel_thread)
-#endif /* XEN */
-
-#ifdef CONFIG_IA64_BRL_EMU
-
-/*
- * Assembly routines used by brl_emu.c to set preserved register state.
- */
-
-#define SET_REG(reg) \
- GLOBAL_ENTRY(ia64_set_##reg); \
- alloc r16=ar.pfs,1,0,0,0; \
- mov reg=r32; \
- ;; \
- br.ret.sptk.many rp; \
- END(ia64_set_##reg)
-
-SET_REG(b1);
-SET_REG(b2);
-SET_REG(b3);
-SET_REG(b4);
-SET_REG(b5);
-
-#endif /* CONFIG_IA64_BRL_EMU */
-
-#ifdef CONFIG_SMP
- /*
- * This routine handles spinlock contention. It uses a non-standard
calling
- * convention to avoid converting leaf routines into interior routines.
Because
- * of this special convention, there are several restrictions:
- *
- * - do not use gp relative variables, this code is called from the
kernel
- * and from modules, r1 is undefined.
- * - do not use stacked registers, the caller owns them.
- * - do not use the scratch stack space, the caller owns it.
- * - do not use any registers other than the ones listed below
- *
- * Inputs:
- * ar.pfs - saved CFM of caller
- * ar.ccv - 0 (and available for use)
- * r27 - flags from spin_lock_irqsave or 0. Must be preserved.
- * r28 - available for use.
- * r29 - available for use.
- * r30 - available for use.
- * r31 - address of lock, available for use.
- * b6 - return address
- * p14 - available for use.
- * p15 - used to track flag status.
- *
- * If you patch this code to use more registers, do not forget to update
- * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
- */
-
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-
-GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
- .prologue
- .save ar.pfs, r0 // this code effectively has a zero frame size
- .save rp, r28
- .body
- nop 0
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- .restore sp // pop existing prologue after next insn
- mov b6 = r28
- .prologue
- .save ar.pfs, r0
- .altrp b6
- .body
- ;;
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not
required in this case
-.wait:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we
won't write the word
- nop 0
- ;;
- cmp4.ne p14,p0=r30,r0
-(p14) br.cond.sptk.few .wait
-(p15) rsm psr.i // disable interrupts if we reenabled them
- br.cond.sptk.few b6 // lock is now free, try to acquire
- .global ia64_spinlock_contention_pre3_4_end // for kernprof
-ia64_spinlock_contention_pre3_4_end:
-END(ia64_spinlock_contention_pre3_4)
-
-#else
-
-GLOBAL_ENTRY(ia64_spinlock_contention)
- .prologue
- .altrp b6
- .body
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- ;;
-.wait:
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not
required in this case
-.wait2:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we
won't write the word
- ;;
- cmp4.ne p14,p0=r30,r0
- mov r30 = 1
-(p14) br.cond.sptk.few .wait2
-(p15) rsm psr.i // disable interrupts if we reenabled them
- ;;
- cmpxchg4.acq r30=[r31], r30, ar.ccv
- ;;
- cmp4.ne p14,p0=r0,r30
-(p14) br.cond.sptk.few .wait
-
- br.ret.sptk.many b6 // lock is now taken
-END(ia64_spinlock_contention)
-
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-GLOBAL_ENTRY(ia64_jump_to_sal)
- alloc r16=ar.pfs,1,0,0,0;;
- rsm psr.i | psr.ic
-{
- flushrs
- srlz.i
-}
- tpa r25=in0
- movl r18=tlb_purge_done;;
- DATA_VA_TO_PA(r18);;
- mov b1=r18 // Return location
- movl r18=ia64_do_tlb_purge;;
- DATA_VA_TO_PA(r18);;
- mov b2=r18 // doing tlb_flush work
- mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
- movl r17=1f;;
- DATA_VA_TO_PA(r17);;
- mov cr.iip=r17
- movl r16=SAL_PSR_BITS_TO_SET;;
- mov cr.ipsr=r16
- mov cr.ifs=r0;;
- rfi;;
-1:
- /*
- * Invalidate all TLB data/inst
- */
- br.sptk.many b2;; // jump to tlb purge code
-
-tlb_purge_done:
- RESTORE_REGION_REGS(r25, r17,r18,r19);;
- RESTORE_REG(b0, r25, r17);;
- RESTORE_REG(b1, r25, r17);;
- RESTORE_REG(b2, r25, r17);;
- RESTORE_REG(b3, r25, r17);;
- RESTORE_REG(b4, r25, r17);;
- RESTORE_REG(b5, r25, r17);;
- ld8 r1=[r25],0x08;;
- ld8 r12=[r25],0x08;;
- ld8 r13=[r25],0x08;;
- RESTORE_REG(ar.fpsr, r25, r17);;
- RESTORE_REG(ar.pfs, r25, r17);;
- RESTORE_REG(ar.rnat, r25, r17);;
- RESTORE_REG(ar.unat, r25, r17);;
- RESTORE_REG(ar.bspstore, r25, r17);;
- RESTORE_REG(cr.dcr, r25, r17);;
- RESTORE_REG(cr.iva, r25, r17);;
- RESTORE_REG(cr.pta, r25, r17);;
-#ifdef XEN
- dv_serialize_instruction
-#endif
- RESTORE_REG(cr.itv, r25, r17);;
- RESTORE_REG(cr.pmv, r25, r17);;
- RESTORE_REG(cr.cmcv, r25, r17);;
- RESTORE_REG(cr.lrr0, r25, r17);;
- RESTORE_REG(cr.lrr1, r25, r17);;
- ld8 r4=[r25],0x08;;
- ld8 r5=[r25],0x08;;
- ld8 r6=[r25],0x08;;
- ld8 r7=[r25],0x08;;
- ld8 r17=[r25],0x08;;
- mov pr=r17,-1;;
- RESTORE_REG(ar.lc, r25, r17);;
- /*
- * Now Restore floating point regs
- */
- ldf.fill.nta f2=[r25],16;;
- ldf.fill.nta f3=[r25],16;;
- ldf.fill.nta f4=[r25],16;;
- ldf.fill.nta f5=[r25],16;;
- ldf.fill.nta f16=[r25],16;;
- ldf.fill.nta f17=[r25],16;;
- ldf.fill.nta f18=[r25],16;;
- ldf.fill.nta f19=[r25],16;;
- ldf.fill.nta f20=[r25],16;;
- ldf.fill.nta f21=[r25],16;;
- ldf.fill.nta f22=[r25],16;;
- ldf.fill.nta f23=[r25],16;;
- ldf.fill.nta f24=[r25],16;;
- ldf.fill.nta f25=[r25],16;;
- ldf.fill.nta f26=[r25],16;;
- ldf.fill.nta f27=[r25],16;;
- ldf.fill.nta f28=[r25],16;;
- ldf.fill.nta f29=[r25],16;;
- ldf.fill.nta f30=[r25],16;;
- ldf.fill.nta f31=[r25],16;;
-
- /*
- * Now that we have done all the register restores
- * we are now ready for the big DIVE to SAL Land
- */
- ssm psr.ic;;
- srlz.d;;
- br.ret.sptk.many b0;;
-END(ia64_jump_to_sal)
-#endif /* CONFIG_HOTPLUG_CPU */
-
-#endif /* CONFIG_SMP */
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/hpsim_ssc.h
--- a/xen/arch/ia64/linux-xen/hpsim_ssc.h Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Platform dependent support for HP simulator.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Vijay Chander <vijay@xxxxxxxxxxxx>
- */
-#ifndef _IA64_PLATFORM_HPSIM_SSC_H
-#define _IA64_PLATFORM_HPSIM_SSC_H
-
-/* Simulator system calls: */
-
-#define SSC_CONSOLE_INIT 20
-#define SSC_GETCHAR 21
-#define SSC_PUTCHAR 31
-#define SSC_CONNECT_INTERRUPT 58
-#define SSC_GENERATE_INTERRUPT 59
-#define SSC_SET_PERIODIC_INTERRUPT 60
-#define SSC_GET_RTC 65
-#define SSC_EXIT 66
-#define SSC_LOAD_SYMBOLS 69
-#define SSC_GET_TOD 74
-#define SSC_CTL_TRACE 76
-
-#define SSC_NETDEV_PROBE 100
-#define SSC_NETDEV_SEND 101
-#define SSC_NETDEV_RECV 102
-#define SSC_NETDEV_ATTACH 103
-#define SSC_NETDEV_DETACH 104
-
-/*
- * Simulator system call.
- */
-extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
-
-#ifdef XEN
-/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
- * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-#define SSC_OPEN 50
-#define SSC_CLOSE 51
-#define SSC_READ 52
-#define SSC_WRITE 53
-#define SSC_GET_COMPLETION 54
-#define SSC_WAIT_COMPLETION 55
-
-#define SSC_WRITE_ACCESS 2
-#define SSC_READ_ACCESS 1
-
-struct ssc_disk_req {
- unsigned long addr;
- unsigned long len;
-};
-#endif
-
-#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1288 +0,0 @@
-/*
- * I/O SAPIC support.
- *
- * Copyright (C) 1999 Intel Corp.
- * Copyright (C) 1999 Asit Mallick <asit.k.mallick@xxxxxxxxx>
- * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@xxxxxxxxx>
- * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999,2000 Walt Drummond <drummond@xxxxxxxxxxx>
- *
- * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
APIC code.
- * In particular, we now have separate handlers
for edge
- * and level triggered interrupts.
- * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@xxxxxxxxx> IRQ vector
allocation
- * PCI to vector mapping, shared PCI interrupts.
- * 00/10/27 D. Mosberger Document things a bit more to make them more
understandable.
- * Clean up much of the old IOSAPIC cruft.
- * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts and
fixes for
- * ACPI S5(SoftOff) support.
- * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
- * 02/01/07 E. Focht <efocht@xxxxxxxxxx> Redirectable interrupt
vectors in
- * iosapic_set_affinity(), initializations for
- * /proc/irq/#/smp_affinity
- * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
- * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
- * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
IOSAPIC mapping
- * error
- * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
- * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
interrupt, vector, etc.)
- * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
pci_irq code.
- * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
- * Remove iosapic_address & gsi_base from external
interfaces.
- * Rationalize __init/__devinit attributes.
- * 04/12/04 Ashok Raj <ashok.raj@xxxxxxxxx> Intel Corporation 2004
- * Updated to work with irq migration necessary
for CPU Hotplug
- */
-/*
- * Here is what the interrupt logic between a PCI device and the kernel looks
like:
- *
- * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
INTD). The
- * device is uniquely identified by its bus--, and slot-number (the
function
- * number does not matter here because all functions share the same
interrupt
- * lines).
- *
- * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
controller.
- * Multiple interrupt lines may have to share the same IOSAPIC pin (if
they're level
- * triggered and use the same polarity). Each interrupt line has a unique
Global
- * System Interrupt (GSI) number which can be calculated as the sum of the
controller's
- * base GSI number and the IOSAPIC pin number to which the line connects.
- *
- * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
IOSAPIC pin
- * into the IA-64 interrupt vector. This interrupt vector is then sent to
the CPU.
- *
- * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
used as
- * architecture-independent interrupt handling mechanism in Linux. As an
- * IRQ is a number, we have to have IA-64 interrupt vector number <-> IRQ
number
- * mapping. On smaller systems, we use one-to-one mapping between IA-64
vector and
- * IRQ. A platform can implement platform_irq_to_vector(irq) and
- * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
- * Please see also include/asm-ia64/hw_irq.h for those APIs.
- *
- * To sum up, there are three levels of mappings involved:
- *
- * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
- *
- * Note: The term "IRQ" is loosely used everywhere in Linux kernel to describe
interrupts.
- * Now we use "IRQ" only for Linux IRQ's. ISA IRQ (isa_irq) is the only
exception in this
- * source code.
- */
-#include <linux/config.h>
-
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#ifdef XEN
-#include <xen/errno.h>
-#endif
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/string.h>
-#include <linux/bootmem.h>
-
-#include <asm/delay.h>
-#include <asm/hw_irq.h>
-#include <asm/io.h>
-#include <asm/iosapic.h>
-#include <asm/machvec.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-
-#ifdef XEN
-static inline int iosapic_irq_to_vector (int irq)
-{
- return irq;
-}
-
-#undef irq_to_vector
-#define irq_to_vector(irq) iosapic_irq_to_vector(irq)
-#define AUTO_ASSIGN AUTO_ASSIGN_IRQ
-#endif
-
-#undef DEBUG_INTERRUPT_ROUTING
-
-#ifdef DEBUG_INTERRUPT_ROUTING
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct
iosapic_rte_info))
-#define RTE_PREALLOCATED (1)
-
-static DEFINE_SPINLOCK(iosapic_lock);
-
-/* These tables map IA-64 vectors to the IOSAPIC pin that generates this
vector. */
-
-struct iosapic_rte_info {
- struct list_head rte_list; /* node in list of RTEs sharing the
same vector */
- char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- char rte_index; /* IOSAPIC RTE index */
- int refcnt; /* reference counter */
- unsigned int flags; /* flags */
-} ____cacheline_aligned;
-
-static struct iosapic_intr_info {
- struct list_head rtes; /* RTEs using this vector (empty => not
an IOSAPIC interrupt) */
- int count; /* # of RTEs that shares this vector */
- u32 low32; /* current value of low word of
Redirection table entry */
- unsigned int dest; /* destination CPU physical ID */
- unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
- unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */
- unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
-} iosapic_intr_info[IA64_NUM_VECTORS];
-
-#ifndef XEN
-static struct iosapic {
- char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- unsigned short num_rte; /* number of RTE in this IOSAPIC */
- int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
-#ifdef CONFIG_NUMA
- unsigned short node; /* numa node association via pxm */
-#endif
-} iosapic_lists[NR_IOSAPICS];
-#else
-struct iosapic iosapic_lists[NR_IOSAPICS];
-#endif
-
-static unsigned char pcat_compat __devinitdata; /* 8259 compatibility
flag */
-
-static int iosapic_kmalloc_ok;
-static LIST_HEAD(free_rte_list);
-
-/*
- * Find an IOSAPIC associated with a GSI
- */
-static inline int
-find_iosapic (unsigned int gsi)
-{
- int i;
-
- for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
iosapic_lists[i].num_rte)
- return i;
- }
-
- return -1;
-}
-
-static inline int
-_gsi_to_vector (unsigned int gsi)
-{
- struct iosapic_intr_info *info;
- struct iosapic_rte_info *rte;
-
- for (info = iosapic_intr_info; info < iosapic_intr_info +
IA64_NUM_VECTORS; ++info)
- list_for_each_entry(rte, &info->rtes, rte_list)
- if (rte->gsi_base + rte->rte_index == gsi)
- return info - iosapic_intr_info;
- return -1;
-}
-
-/*
- * Translate GSI number to the corresponding IA-64 interrupt vector. If no
- * entry exists, return -1.
- */
-inline int
-gsi_to_vector (unsigned int gsi)
-{
- return _gsi_to_vector(gsi);
-}
-
-int
-gsi_to_irq (unsigned int gsi)
-{
- unsigned long flags;
- int irq;
- /*
- * XXX fix me: this assumes an identity mapping vetween IA-64 vector
and Linux irq
- * numbers...
- */
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- irq = _gsi_to_vector(gsi);
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- return irq;
-}
-
-static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned
int vec)
-{
- struct iosapic_rte_info *rte;
-
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
- if (rte->gsi_base + rte->rte_index == gsi)
- return rte;
- return NULL;
-}
-
-static void
-set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
-{
- unsigned long pol, trigger, dmode;
- u32 low32, high32;
- char __iomem *addr;
- int rte_index;
- char redir;
- struct iosapic_rte_info *rte;
-
- DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
-
- rte = gsi_vector_to_rte(gsi, vector);
- if (!rte)
- return; /* not an IOSAPIC interrupt */
-
- rte_index = rte->rte_index;
- addr = rte->addr;
- pol = iosapic_intr_info[vector].polarity;
- trigger = iosapic_intr_info[vector].trigger;
- dmode = iosapic_intr_info[vector].dmode;
-
- redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
-
-#ifdef CONFIG_SMP
- {
- unsigned int irq;
-
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == vector) {
- set_irq_affinity_info(irq, (int)(dest &
0xffff), redir);
- break;
- }
- }
-#endif
-
- low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
- (trigger << IOSAPIC_TRIGGER_SHIFT) |
- (dmode << IOSAPIC_DELIVERY_SHIFT) |
- ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
- vector);
-
- /* dest contains both id and eid */
- high32 = (dest << IOSAPIC_DEST_SHIFT);
-
- iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- iosapic_intr_info[vector].low32 = low32;
- iosapic_intr_info[vector].dest = dest;
-}
-
-void
-kexec_disable_iosapic(void)
-{
- struct iosapic_intr_info *info;
- struct iosapic_rte_info *rte;
- u8 vec = 0;
- for (info = iosapic_intr_info; info <
- iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
- list_for_each_entry(rte, &info->rtes,
- rte_list) {
- iosapic_write(rte->addr,
- IOSAPIC_RTE_LOW(rte->rte_index),
- IOSAPIC_MASK|vec);
- iosapic_eoi(rte->addr, vec);
- }
- }
-}
-
-static void
-mask_irq (struct irq_desc *desc)
-{
- unsigned long flags;
- char __iomem *addr;
- u32 low32;
- int rte_index;
- ia64_vector vec = irq_to_vector(desc->irq);
- struct iosapic_rte_info *rte;
-
- if (list_empty(&iosapic_intr_info[vec].rtes))
- return; /* not an IOSAPIC interrupt! */
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- /* set only the mask bit */
- low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
rte_list) {
- addr = rte->addr;
- rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-static void
-unmask_irq (struct irq_desc *desc)
-{
- unsigned long flags;
- char __iomem *addr;
- u32 low32;
- int rte_index;
- ia64_vector vec = irq_to_vector(desc->irq);
- struct iosapic_rte_info *rte;
-
- if (list_empty(&iosapic_intr_info[vec].rtes))
- return; /* not an IOSAPIC interrupt! */
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
rte_list) {
- addr = rte->addr;
- rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-}
-
-
-static void
-iosapic_set_affinity (struct irq_desc *desc, const cpumask_t *mask)
-{
-#ifdef CONFIG_SMP
- unsigned long flags;
- u32 high32, low32;
- int dest, rte_index;
- char __iomem *addr;
- int redir = (desc->irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
- unsigned int irq = desc->irq & ~IA64_IRQ_REDIRECTED;
- ia64_vector vec;
- struct iosapic_rte_info *rte;
-
- vec = irq_to_vector(irq);
-
- if (cpumask_empty(mask))
- return;
-
- dest = cpu_physical_id(cpumask_first(mask));
-
- if (list_empty(&iosapic_intr_info[vec].rtes))
- return; /* not an IOSAPIC interrupt */
-
- set_irq_affinity_info(irq, dest, redir);
-
- /* dest contains both id and eid */
- high32 = dest << IOSAPIC_DEST_SHIFT;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- low32 = iosapic_intr_info[vec].low32 & ~(7 <<
IOSAPIC_DELIVERY_SHIFT);
-
- if (redir)
- /* change delivery mode to lowest priority */
- low32 |= (IOSAPIC_LOWEST_PRIORITY <<
IOSAPIC_DELIVERY_SHIFT);
- else
- /* change delivery mode to fixed */
- low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
-
- iosapic_intr_info[vec].low32 = low32;
- iosapic_intr_info[vec].dest = dest;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
rte_list) {
- addr = rte->addr;
- rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index),
high32);
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-#endif
-}
-
-/*
- * Handlers for level-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_level_irq (struct irq_desc *desc)
-{
- unmask_irq(desc);
- return 0;
-}
-
-static void
-iosapic_end_level_irq (struct irq_desc *desc)
-{
- ia64_vector vec = irq_to_vector(desc->irq);
- struct iosapic_rte_info *rte;
-
- move_irq(desc->irq);
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
- iosapic_eoi(rte->addr, vec);
-}
-
-#define iosapic_shutdown_level_irq mask_irq
-#define iosapic_enable_level_irq unmask_irq
-#define iosapic_disable_level_irq mask_irq
-#define iosapic_ack_level_irq irq_actor_none
-
-static hw_irq_controller irq_type_iosapic_level = {
- .typename = "IO-SAPIC-level",
- .startup = iosapic_startup_level_irq,
- .shutdown = iosapic_shutdown_level_irq,
- .enable = iosapic_enable_level_irq,
- .disable = iosapic_disable_level_irq,
- .ack = iosapic_ack_level_irq,
- .end = iosapic_end_level_irq,
- .set_affinity = iosapic_set_affinity
-};
-
-/*
- * Handlers for edge-triggered interrupts.
- */
-
-static unsigned int
-iosapic_startup_edge_irq (struct irq_desc *desc)
-{
- unmask_irq(desc);
- /*
- * IOSAPIC simply drops interrupts pended while the
- * corresponding pin was masked, so we can't know if an
- * interrupt is pending already. Let's hope not...
- */
- return 0;
-}
-
-static void
-iosapic_ack_edge_irq (struct irq_desc *desc)
-{
- move_irq(idesc->irq);
- /*
- * Once we have recorded IRQ_PENDING already, we can mask the
- * interrupt for real. This prevents IRQ storms from unhandled
- * devices.
- */
- if ((desc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
(IRQ_PENDING|IRQ_DISABLED))
- mask_irq(desc);
-}
-
-#define iosapic_enable_edge_irq unmask_irq
-#define iosapic_disable_edge_irq irq_disable_none
-#define iosapic_end_edge_irq irq_actor_none
-
-static hw_irq_controller irq_type_iosapic_edge = {
- .typename = "IO-SAPIC-edge",
- .startup = iosapic_startup_edge_irq,
- .shutdown = iosapic_disable_edge_irq,
- .enable = iosapic_enable_edge_irq,
- .disable = iosapic_disable_edge_irq,
- .ack = iosapic_ack_edge_irq,
- .end = iosapic_end_edge_irq,
- .set_affinity = iosapic_set_affinity
-};
-
-unsigned int
-iosapic_version (char __iomem *addr)
-{
- /*
- * IOSAPIC Version Register return 32 bit structure like:
- * {
- * unsigned int version : 8;
- * unsigned int reserved1 : 8;
- * unsigned int max_redir : 8;
- * unsigned int reserved2 : 8;
- * }
- */
- return iosapic_read(addr, IOSAPIC_VERSION);
-}
-
-static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long
pol)
-{
- int i, vector = -1, min_count = -1;
- struct iosapic_intr_info *info;
-
- /*
- * shared vectors for edge-triggered interrupts are not
- * supported yet
- */
- if (trigger == IOSAPIC_EDGE)
- return -1;
-
- for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
- info = &iosapic_intr_info[i];
- if (info->trigger == trigger && info->polarity == pol &&
- (info->dmode == IOSAPIC_FIXED || info->dmode ==
IOSAPIC_LOWEST_PRIORITY)) {
- if (min_count == -1 || info->count < min_count) {
- vector = i;
- min_count = info->count;
- }
- }
- }
-
- return vector;
-}
-
-/*
- * if the given vector is already owned by other,
- * assign a new vector for the other and make the vector available
- */
-static void __init
-iosapic_reassign_vector (int vector)
-{
- int new_vector;
-
- if (!list_empty(&iosapic_intr_info[vector].rtes)) {
- new_vector = assign_irq_vector(AUTO_ASSIGN);
- if (new_vector < 0)
- panic("%s: out of interrupt vectors!\n", __FUNCTION__);
- printk(KERN_INFO "Reassigning vector %d to %d\n", vector,
new_vector);
- memcpy(&iosapic_intr_info[new_vector],
&iosapic_intr_info[vector],
- sizeof(struct iosapic_intr_info));
- INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
- list_move(iosapic_intr_info[vector].rtes.next,
&iosapic_intr_info[new_vector].rtes);
- memset(&iosapic_intr_info[vector], 0, sizeof(struct
iosapic_intr_info));
- iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
- }
-}
-
-static struct iosapic_rte_info *iosapic_alloc_rte (void)
-{
- int i;
- struct iosapic_rte_info *rte;
- int preallocated = 0;
-
- if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
-#ifdef XEN
- rte = xmalloc_bytes(sizeof(struct iosapic_rte_info) *
NR_PREALLOCATE_RTE_ENTRIES);
-#else
- rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
NR_PREALLOCATE_RTE_ENTRIES);
-#endif
- if (!rte)
- return NULL;
- for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
- list_add(&rte->rte_list, &free_rte_list);
- }
-
- if (!list_empty(&free_rte_list)) {
- rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
rte_list);
- list_del(&rte->rte_list);
- preallocated++;
- } else {
- rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
- if (!rte)
- return NULL;
- }
-
- memset(rte, 0, sizeof(struct iosapic_rte_info));
- if (preallocated)
- rte->flags |= RTE_PREALLOCATED;
-
- return rte;
-}
-
-static void iosapic_free_rte (struct iosapic_rte_info *rte)
-{
- if (rte->flags & RTE_PREALLOCATED)
- list_add_tail(&rte->rte_list, &free_rte_list);
- else
- kfree(rte);
-}
-
-static inline int vector_is_shared (int vector)
-{
- return (iosapic_intr_info[vector].count > 1);
-}
-
-static int
-register_intr (unsigned int gsi, int vector, unsigned char delivery,
- unsigned long polarity, unsigned long trigger)
-{
- irq_desc_t *idesc;
- hw_irq_controller *irq_type;
- int rte_index;
- int index;
- unsigned long gsi_base;
- void __iomem *iosapic_address;
- struct iosapic_rte_info *rte;
-
- index = find_iosapic(gsi);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
__FUNCTION__, gsi);
- return -ENODEV;
- }
-
- iosapic_address = iosapic_lists[index].addr;
- gsi_base = iosapic_lists[index].gsi_base;
-
- rte = gsi_vector_to_rte(gsi, vector);
- if (!rte) {
- rte = iosapic_alloc_rte();
- if (!rte) {
- printk(KERN_WARNING "%s: cannot allocate memory\n",
__FUNCTION__);
- return -ENOMEM;
- }
-
- rte_index = gsi - gsi_base;
- rte->rte_index = rte_index;
- rte->addr = iosapic_address;
- rte->gsi_base = gsi_base;
- rte->refcnt++;
- list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes);
- iosapic_intr_info[vector].count++;
- iosapic_lists[index].rtes_inuse++;
- }
- else if (vector_is_shared(vector)) {
- struct iosapic_intr_info *info = &iosapic_intr_info[vector];
- if (info->trigger != trigger || info->polarity != polarity) {
- printk (KERN_WARNING "%s: cannot override the
interrupt\n", __FUNCTION__);
- return -EINVAL;
- }
- }
-
- iosapic_intr_info[vector].polarity = polarity;
- iosapic_intr_info[vector].dmode = delivery;
- iosapic_intr_info[vector].trigger = trigger;
-
- if (trigger == IOSAPIC_EDGE)
- irq_type = &irq_type_iosapic_edge;
- else
- irq_type = &irq_type_iosapic_level;
-
- idesc = irq_descp(vector);
- if (idesc->handler != irq_type) {
- if (idesc->handler != &no_irq_type)
- printk(KERN_WARNING "%s: changing vector %d from %s to
%s\n",
- __FUNCTION__, vector, idesc->handler->typename,
irq_type->typename);
- idesc->handler = irq_type;
- }
- return 0;
-}
-
-static unsigned int
-get_target_cpu (unsigned int gsi, int vector)
-{
-#ifdef CONFIG_SMP
- static int cpu = -1;
-
- /*
- * In case of vector shared by multiple RTEs, all RTEs that
- * share the vector need to use the same destination CPU.
- */
- if (!list_empty(&iosapic_intr_info[vector].rtes))
- return iosapic_intr_info[vector].dest;
-
- /*
- * If the platform supports redirection via XTP, let it
- * distribute interrupts.
- */
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- return cpu_physical_id(smp_processor_id());
-
- /*
- * Some interrupts (ACPI SCI, for instance) are registered
- * before the BSP is marked as online.
- */
- if (!cpu_online(smp_processor_id()))
- return cpu_physical_id(smp_processor_id());
-
-#ifdef CONFIG_NUMA
- {
- int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
- cpumask_t cpu_mask;
-
- iosapic_index = find_iosapic(gsi);
- if (iosapic_index < 0 ||
- iosapic_lists[iosapic_index].node == MAX_NUMNODES)
- goto skip_numa_setup;
-
- cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
-
- for_each_cpu(numa_cpu, &cpu_mask) {
- if (!cpu_online(numa_cpu))
- cpumask_clear_cpu(numa_cpu, &cpu_mask);
- }
-
- num_cpus = cpumask_weight(&cpu_mask);
-
- if (!num_cpus)
- goto skip_numa_setup;
-
- /* Use vector assigment to distribute across cpus in node */
- cpu_index = vector % num_cpus;
-
- for (numa_cpu = cpumask_first(&cpu_mask) ; i < cpu_index ; i++)
- numa_cpu = cpumask_next(numa_cpu, &cpu_mask);
-
- if (numa_cpu != NR_CPUS)
- return cpu_physical_id(numa_cpu);
- }
-skip_numa_setup:
-#endif
- /*
- * Otherwise, round-robin interrupt vectors across all the
- * processors. (It'd be nice if we could be smarter in the
- * case of NUMA.)
- */
- do {
- if (++cpu >= NR_CPUS)
- cpu = 0;
- } while (!cpu_online(cpu));
-
- return cpu_physical_id(cpu);
-#else
- return cpu_physical_id(smp_processor_id());
-#endif
-}
-
-/*
- * ACPI can describe IOSAPIC interrupts via static tables and namespace
- * methods. This provides an interface to register those interrupts and
- * program the IOSAPIC RTE.
- */
-int
-iosapic_register_intr (unsigned int gsi,
- unsigned long polarity, unsigned long trigger)
-{
- int vector, mask = 1, err;
- unsigned int dest;
- unsigned long flags;
- struct iosapic_rte_info *rte;
- u32 low32;
-again:
- /*
- * If this GSI has already been registered (i.e., it's a
- * shared interrupt, or we lost a race to register it),
- * don't touch the RTE.
- */
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- vector = gsi_to_vector(gsi);
- if (vector > 0) {
- rte = gsi_vector_to_rte(gsi, vector);
- rte->refcnt++;
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return vector;
- }
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- /* If vector is running out, we try to find a sharable vector */
- vector = assign_irq_vector(AUTO_ASSIGN);
- if (vector < 0) {
- vector = iosapic_find_sharable_vector(trigger, polarity);
- if (vector < 0)
- return -ENOSPC;
- }
-
- spin_lock_irqsave(&irq_descp(vector)->lock, flags);
- spin_lock(&iosapic_lock);
- {
- if (gsi_to_vector(gsi) > 0) {
- if (list_empty(&iosapic_intr_info[vector].rtes))
- free_irq_vector(vector);
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
- goto again;
- }
-
- dest = get_target_cpu(gsi, vector);
- err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
- polarity, trigger);
- if (err < 0) {
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
- return err;
- }
-
- /*
- * If the vector is shared and already unmasked for
- * other interrupt sources, don't mask it.
- */
- low32 = iosapic_intr_info[vector].low32;
- if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
- mask = 0;
- set_rte(gsi, vector, dest, mask);
- }
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
-
- printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- return vector;
-}
-
-void
-iosapic_unregister_intr (unsigned int gsi)
-{
- unsigned long flags;
- int irq, vector, index;
- irq_desc_t *idesc;
- u32 low32;
- unsigned long trigger, polarity;
- unsigned int dest;
- struct iosapic_rte_info *rte;
-
- /*
- * If the irq associated with the gsi is not found,
- * iosapic_unregister_intr() is unbalanced. We need to check
- * this again after getting locks.
- */
- irq = gsi_to_irq(gsi);
- if (irq < 0) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
gsi);
- WARN_ON(1);
- return;
- }
- vector = irq_to_vector(irq);
-
- idesc = irq_descp(irq);
- spin_lock_irqsave(&idesc->lock, flags);
- spin_lock(&iosapic_lock);
- {
- if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
- printk(KERN_ERR "iosapic_unregister_intr(%u)
unbalanced\n", gsi);
- WARN_ON(1);
- goto out;
- }
-
- if (--rte->refcnt > 0)
- goto out;
-
- /* Mask the interrupt */
- low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
- iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index),
low32);
-
- /* Remove the rte entry from the list */
- list_del(&rte->rte_list);
- iosapic_intr_info[vector].count--;
- iosapic_free_rte(rte);
- index = find_iosapic(gsi);
- iosapic_lists[index].rtes_inuse--;
- WARN_ON(iosapic_lists[index].rtes_inuse < 0);
-
- trigger = iosapic_intr_info[vector].trigger;
- polarity = iosapic_intr_info[vector].polarity;
- dest = iosapic_intr_info[vector].dest;
- printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d
unregistered\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- if (list_empty(&iosapic_intr_info[vector].rtes)) {
- /* Sanity check */
- BUG_ON(iosapic_intr_info[vector].count);
-
- /* Clear the interrupt controller descriptor */
- idesc->handler = &no_irq_type;
-
- /* Clear the interrupt information */
- memset(&iosapic_intr_info[vector], 0, sizeof(struct
iosapic_intr_info));
- iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
-
- if (idesc->action) {
- printk(KERN_ERR "interrupt handlers still exist
on IRQ %u\n", irq);
- WARN_ON(1);
- }
-
- /* Free the interrupt vector */
- free_irq_vector(vector);
- }
- }
- out:
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&idesc->lock, flags);
-}
-
-/*
- * ACPI calls this when it finds an entry for a platform interrupt.
- * Note that the irq_base and IOSAPIC address must be set in iosapic_init().
- */
-int __init
-iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
- int iosapic_vector, u16 eid, u16 id,
- unsigned long polarity, unsigned long trigger)
-{
- static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
- unsigned char delivery;
- int vector, mask = 0;
- unsigned int dest = ((id << 8) | eid) & 0xffff;
-
- switch (int_type) {
- case ACPI_INTERRUPT_PMI:
- vector = iosapic_vector;
- /*
- * since PMI vector is alloc'd by FW(ACPI) not by kernel,
- * we need to make sure the vector is available
- */
- iosapic_reassign_vector(vector);
- delivery = IOSAPIC_PMI;
- break;
- case ACPI_INTERRUPT_INIT:
- vector = assign_irq_vector(AUTO_ASSIGN);
- if (vector < 0)
- panic("%s: out of interrupt vectors!\n", __FUNCTION__);
- delivery = IOSAPIC_INIT;
- break;
- case ACPI_INTERRUPT_CPEI:
- vector = IA64_CPE_VECTOR;
- delivery = IOSAPIC_LOWEST_PRIORITY;
- mask = 1;
- break;
- default:
- printk(KERN_ERR "iosapic_register_platform_irq(): invalid int
type 0x%x\n", int_type);
- return -1;
- }
-
- register_intr(gsi, vector, delivery, polarity, trigger);
-
- printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d
(0x%04x) vector %d\n",
- int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
- int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- cpu_logical_id(dest), dest, vector);
-
- set_rte(gsi, vector, dest, mask);
- return vector;
-}
-
-
-/*
- * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
- * Note that the gsi_base and IOSAPIC address must be set in iosapic_init().
- */
-void __init
-iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
- unsigned long polarity,
- unsigned long trigger)
-{
- int vector;
- unsigned int dest = cpu_physical_id(smp_processor_id());
-
- vector = isa_irq_to_vector(isa_irq);
-
- register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
-
- DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
- isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
- polarity == IOSAPIC_POL_HIGH ? "high" : "low",
- cpu_logical_id(dest), dest, vector);
-
- set_rte(gsi, vector, dest, 1);
-}
-
-void __init
-iosapic_system_init (int system_pcat_compat)
-{
- int vector;
-
- for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
- iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark
as unused */
- }
-
- pcat_compat = system_pcat_compat;
- if (pcat_compat) {
- /*
- * Disable the compatibility mode interrupts (8259 style),
needs IN/OUT support
- * enabled.
- */
- printk(KERN_INFO "%s: Disabling PC-AT compatible 8259
interrupts\n", __FUNCTION__);
- outb(0xff, 0xA1);
- outb(0xff, 0x21);
- }
-}
-
-static inline int
-iosapic_alloc (void)
-{
- int index;
-
- for (index = 0; index < NR_IOSAPICS; index++)
- if (!iosapic_lists[index].addr)
- return index;
-
- printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__);
- return -1;
-}
-
-static inline void
-iosapic_free (int index)
-{
- memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
-}
-
-static inline int
-iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
-{
- int index;
- unsigned int gsi_end, base, end;
-
- /* check gsi range */
- gsi_end = gsi_base + ((ver >> 16) & 0xff);
- for (index = 0; index < NR_IOSAPICS; index++) {
- if (!iosapic_lists[index].addr)
- continue;
-
- base = iosapic_lists[index].gsi_base;
- end = base + iosapic_lists[index].num_rte - 1;
-
- if (gsi_base < base && gsi_end < base)
- continue;/* OK */
-
- if (gsi_base > end && gsi_end > end)
- continue; /* OK */
-
- return -EBUSY;
- }
- return 0;
-}
-
-int __devinit
-#ifndef XEN
-iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
-#else
-iosapic_init (unsigned long phys_addr, unsigned int gsi_base, unsigned int id)
-#endif
-{
- int num_rte, err, index;
- unsigned int isa_irq, ver;
- char __iomem *addr;
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- addr = ioremap(phys_addr, 0);
- ver = iosapic_version(addr);
-
- if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
- iounmap(addr);
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return err;
- }
-
- /*
- * The MAX_REDIR register holds the highest input pin
- * number (starting from 0).
- * We add 1 so that we can use it for number of pins (= RTEs)
- */
- num_rte = ((ver >> 16) & 0xff) + 1;
-
- index = iosapic_alloc();
- iosapic_lists[index].addr = addr;
- iosapic_lists[index].gsi_base = gsi_base;
- iosapic_lists[index].num_rte = num_rte;
-#ifdef XEN
- iosapic_lists[index].id = id;
-#endif
-#ifdef CONFIG_NUMA
- iosapic_lists[index].node = MAX_NUMNODES;
-#endif
- }
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- if ((gsi_base == 0) && pcat_compat) {
- /*
- * Map the legacy ISA devices into the IOSAPIC data. Some of
these may
- * get reprogrammed later on with data from the ACPI Interrupt
Source
- * Override table.
- */
- for (isa_irq = 0; isa_irq < 16; ++isa_irq)
- iosapic_override_isa_irq(isa_irq, isa_irq,
IOSAPIC_POL_HIGH, IOSAPIC_EDGE);
- }
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG
-int
-iosapic_remove (unsigned int gsi_base)
-{
- int index, err = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- {
- index = find_iosapic(gsi_base);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
- __FUNCTION__, gsi_base);
- goto out;
- }
-
- if (iosapic_lists[index].rtes_inuse) {
- err = -EBUSY;
- printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is
busy\n",
- __FUNCTION__, gsi_base);
- goto out;
- }
-
- iounmap(iosapic_lists[index].addr);
- iosapic_free(index);
- }
- out:
- spin_unlock_irqrestore(&iosapic_lock, flags);
- return err;
-}
-#endif /* CONFIG_HOTPLUG */
-
-#ifdef CONFIG_NUMA
-void __devinit
-map_iosapic_to_node(unsigned int gsi_base, int node)
-{
- int index;
-
- index = find_iosapic(gsi_base);
- if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
- __FUNCTION__, gsi_base);
- return;
- }
- iosapic_lists[index].node = node;
- return;
-}
-#endif
-
-#ifndef XEN
-static int __init iosapic_enable_kmalloc (void)
-{
- iosapic_kmalloc_ok = 1;
- return 0;
-}
-core_initcall (iosapic_enable_kmalloc);
-#endif
-
-#ifdef XEN
-/* nop for now */
-void set_irq_affinity_info(unsigned int irq, int hwid, int redir) {}
-
-static int iosapic_physbase_to_id(unsigned long physbase)
-{
- int i;
- unsigned long addr = physbase | __IA64_UNCACHED_OFFSET;
-
- for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned long)(iosapic_lists[i].addr) == addr)
- return i;
- }
-
- return -1;
-}
-
-int iosapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
-{
- int id;
- unsigned long flags;
-
- if ((id = (iosapic_physbase_to_id(physbase))) < 0)
- return id;
-
- spin_lock_irqsave(&iosapic_lock, flags);
- *pval = iosapic_read(iosapic_lists[id].addr, reg);
- spin_unlock_irqrestore(&iosapic_lock, flags);
-
- return 0;
-}
-
-int iosapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
-{
- unsigned int id, gsi, vec, xen_vec, dest, high32;
- char rte_index;
- struct iosapic *ios;
- struct iosapic_intr_info *info;
- struct rte_entry rte;
- unsigned long flags;
-
- if ((id = (iosapic_physbase_to_id(physbase))) < 0)
- return -EINVAL;
- ios = &iosapic_lists[id];
-
- /* Only handle first half of RTE update */
- if ((reg < 0x10) || (reg & 1))
- return 0;
-
- rte.val = val;
- rte_index = IOSAPIC_RTEINDEX(reg);
- vec = rte.lo.vector;
-#if 0
- /* Take PMI/NMI/INIT/EXTINT handled by xen */
- if (rte.delivery_mode > IOSAPIC_LOWEST_PRIORITY) {
- printk("Attempt to write IOSAPIC dest mode owned by xen!\n");
- printk("IOSAPIC/PIN = (%d/%d), lo = 0x%x\n",
- id, rte_index, val);
- return -EINVAL;
- }
-#endif
-
- /* Sanity check. Vector should be allocated before this update */
- if ((rte_index > ios->num_rte) ||
- ((vec > IA64_FIRST_DEVICE_VECTOR) &&
- (vec < IA64_LAST_DEVICE_VECTOR) &&
- (!test_bit(vec - IA64_FIRST_DEVICE_VECTOR, ia64_vector_mask))))
- return -EINVAL;
-
- gsi = ios->gsi_base + rte_index;
- xen_vec = gsi_to_vector(gsi);
- if (xen_vec >= 0 && test_bit(xen_vec, ia64_xen_vector)) {
- printk("WARN: GSI %d in use by Xen.\n", gsi);
- return -EINVAL;
- }
- info = &iosapic_intr_info[vec];
- spin_lock_irqsave(&irq_descp(vec)->lock, flags);
- spin_lock(&iosapic_lock);
- if (!gsi_vector_to_rte(gsi, vec)) {
- register_intr(gsi, vec, IOSAPIC_LOWEST_PRIORITY,
- rte.lo.polarity, rte.lo.trigger);
- } else if (vector_is_shared(vec)) {
- if ((info->trigger != rte.lo.trigger) ||
- (info->polarity != rte.lo.polarity)) {
- printk("WARN: can't override shared interrupt vec\n");
- printk("IOSAPIC/PIN = (%d/%d), ori = 0x%x, new = 0x%x\n",
- id, rte_index, info->low32, rte.val);
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vec)->lock, flags);
- return -EINVAL;
- }
-
- /* If the vector is shared and already unmasked for other
- * interrupt sources, don't mask it.
- *
- * Same check may also apply to single gsi pin, which may
- * be shared by devices belonging to different domain. But
- * let's see how to act later on demand.
- */
- if (!(info->low32 & IOSAPIC_MASK))
- rte.lo.mask = 0;
- }
-
- /* time to update physical RTE */
- dest = cpu_physical_id(smp_processor_id());
- high32 = (dest << IOSAPIC_DEST_SHIFT);
- iosapic_write(iosapic_lists[id].addr, reg + 1, high32);
- iosapic_write(iosapic_lists[id].addr, reg, rte.val);
- info->low32 = rte.val;
- info->dest = dest;
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vec)->lock, flags);
- return 0;
-}
-
-/* for vtd interrupt remapping. xen/drivers/vtd/intremap.c */
-int iosapic_get_nr_iosapics(void)
-{
- int index;
-
- for (index = NR_IOSAPICS - 1; index >= 0; index--) {
- if (iosapic_lists[index].addr)
- break;
- }
-
- return index + 1;
-}
-
-int iosapic_get_nr_pins(int index)
-{
- return iosapic_lists[index].num_rte;
-}
-#endif /* XEN */
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,350 +0,0 @@
-/*
- * linux/arch/ia64/kernel/irq.c
- *
- * Copyright (C) 1998-2001 Hewlett-Packard Co
- * Stephane Eranian <eranian@xxxxxxxxxx>
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- *
- * 6/10/99: Updated to bring in sync with x86 version to facilitate
- * support for SMP and different interrupt controllers.
- *
- * 09/15/00 Goutham Rao <goutham.rao@xxxxxxxxx> Implemented pci_irq_to_vector
- * PCI to vector allocation routine.
- * 04/14/2004 Ashok Raj <ashok.raj@xxxxxxxxx>
- * Added CPU Hotplug handling for
IPF.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-
-#include <linux/jiffies.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel_stat.h>
-#include <linux/slab.h>
-#include <linux/ptrace.h>
-#include <linux/random.h> /* for rand_initialize_irq() */
-#include <linux/signal.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/threads.h>
-#include <linux/bitops.h>
-
-#include <asm/delay.h>
-#include <asm/intrinsics.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/machvec.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-#ifdef XEN
-#include <xen/perfc.h>
-#endif
-
-#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
-#endif
-
-#define IRQ_DEBUG 0
-
-/* default base addr of IPI table */
-void __iomem *ipi_base_addr = ((void __iomem *)
- (__IA64_UNCACHED_OFFSET |
IA64_IPI_DEFAULT_BASE_ADDR));
-
-/*
- * Legacy IRQ to IA-64 vector translation table.
- */
-__u8 isa_irq_to_vector_map[16] = {
- /* 8259 IRQ translation, first 16 entries */
- 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
- 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
-};
-EXPORT_SYMBOL(isa_irq_to_vector_map);
-
-#ifdef XEN
-unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
-#else
-static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
-#endif
-
-int
-assign_irq_vector (int irq)
-{
- int pos, vector;
- again:
- pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
- vector = IA64_FIRST_DEVICE_VECTOR + pos;
- if (vector > IA64_LAST_DEVICE_VECTOR)
- return -ENOSPC;
- if (test_and_set_bit(pos, ia64_vector_mask))
- goto again;
- return vector;
-}
-
-void
-free_irq_vector (int vector)
-{
- int pos;
-
- if (vector < IA64_FIRST_DEVICE_VECTOR || vector >
IA64_LAST_DEVICE_VECTOR)
- return;
-
- pos = vector - IA64_FIRST_DEVICE_VECTOR;
- if (!test_and_clear_bit(pos, ia64_vector_mask))
- printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
-}
-
-#ifdef CONFIG_SMP
-# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
-#else
-# define IS_RESCHEDULE(vec) (0)
-#endif
-/*
- * That's where the IVT branches when we get an external
- * interrupt. This branches to the correct hardware IRQ handler via
- * function ptr.
- */
-void
-ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
-{
- unsigned long saved_tpr;
-
-#ifdef XEN
- perfc_incr(irqs);
-#endif
-#if IRQ_DEBUG
-#ifdef XEN
- xen_debug_irq(vector, regs);
-#endif
- {
- unsigned long bsp, sp;
-
- /*
- * Note: if the interrupt happened while executing in
- * the context switch routine (ia64_switch_to), we may
- * get a spurious stack overflow here. This is
- * because the register and the memory stack are not
- * switched atomically.
- */
- bsp = ia64_getreg(_IA64_REG_AR_BSP);
- sp = ia64_getreg(_IA64_REG_SP);
-
- if ((sp - bsp) < 1024) {
- static unsigned char count;
- static long last_time;
-
- if (jiffies - last_time > 5*HZ)
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
- printk("ia64_handle_irq: DANGER: less than "
- "1KB of free stack space!!\n"
- "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
- }
- }
- }
-#endif /* IRQ_DEBUG */
-
- /*
- * Always set TPR to limit maximum interrupt nesting depth to
- * 16 (without this, it would be ~240, which could easily lead
- * to kernel stack overflows).
- */
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
- while (vector != IA64_SPURIOUS_INT_VECTOR) {
- if (!IS_RESCHEDULE(vector)) {
- ia64_setreg(_IA64_REG_CR_TPR, vector);
- ia64_srlz_d();
-
- __do_IRQ(local_vector_to_irq(vector), regs);
-
- /*
- * Disable interrupts and send EOI:
- */
- local_irq_disable();
- ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
- }
- ia64_eoi();
- vector = ia64_get_ivr();
- }
- /*
- * This must be done *after* the ia64_eoi(). For example, the keyboard
softirq
- * handler needs to be able to wait for further keyboard interrupts,
which can't
- * come through until ia64_eoi() has been done.
- */
- irq_exit();
-}
-
-#ifndef XEN
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * This function emulates a interrupt processing when a cpu is about to be
- * brought down.
- */
-void ia64_process_pending_intr(void)
-{
- ia64_vector vector;
- unsigned long saved_tpr;
- extern unsigned int vectors_in_migration[NR_IRQS];
-
- vector = ia64_get_ivr();
-
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
-
- /*
- * Perform normal interrupt style processing
- */
- while (vector != IA64_SPURIOUS_INT_VECTOR) {
- if (!IS_RESCHEDULE(vector)) {
- ia64_setreg(_IA64_REG_CR_TPR, vector);
- ia64_srlz_d();
-
- /*
- * Now try calling normal ia64_handle_irq as it would
have got called
- * from a real intr handler. Try passing null for
pt_regs, hopefully
- * it will work. I hope it works!.
- * Probably could shared code.
- */
- vectors_in_migration[local_vector_to_irq(vector)]=0;
- __do_IRQ(local_vector_to_irq(vector), NULL);
-
- /*
- * Disable interrupts and send EOI
- */
- local_irq_disable();
- ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
- }
- ia64_eoi();
- vector = ia64_get_ivr();
- }
- irq_exit();
-}
-#endif
-#endif
-
-
-#ifdef CONFIG_SMP
-extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
-
-static struct irqaction __read_mostly ipi_irqaction = {
- .handler = handle_IPI,
-#ifndef XEN
- .flags = SA_INTERRUPT,
-#endif
- .name = "IPI"
-};
-#endif
-
-static hw_irq_controller irq_type_ia64_lsapic = {
- .typename = "LSAPIC",
- .startup = irq_startup_none,
- .shutdown = irq_shutdown_none,
- .enable = irq_enable_none,
- .disable = irq_disable_none,
- .ack = irq_actor_none,
- .end = irq_actor_none
-};
-
-void
-register_percpu_irq (ia64_vector vec, struct irqaction *action)
-{
- irq_desc_t *desc;
-#ifndef XEN
- unsigned int irq;
-
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == vec) {
- desc = irq_descp(irq);
- desc->status |= IRQ_PER_CPU;
- desc->handler = &irq_type_ia64_lsapic;
- if (action)
- setup_irq(irq, action);
- }
-#else
- desc = irq_descp(vec);
- desc->status |= IRQ_PER_CPU;
- desc->handler = &irq_type_ia64_lsapic;
- if (action)
- setup_vector(vec, action);
-#endif
-}
-
-#ifdef XEN
-int __init request_irq_vector(unsigned int vector,
- void (*handler)(int, void *, struct cpu_user_regs *),
- unsigned long irqflags, const char * devname, void *dev_id)
-{
- struct irqaction * action;
- int retval;
-
- /*
- * Sanity-check: shared interrupts must pass in a real dev-ID,
- * otherwise we'll have trouble later trying to figure out
- * which interrupt is which (messes up the interrupt freeing logic etc).
- * */
- if (vector >= NR_VECTORS)
- return -EINVAL;
- if (!handler)
- return -EINVAL;
-
- action = xmalloc(struct irqaction);
- if (!action)
- return -ENOMEM;
-
- action->handler = handler;
- action->name = devname;
- action->dev_id = dev_id;
-
- retval = setup_vector(vector, action);
- if (retval)
- xfree(action);
-
- return retval;
-}
-#endif
-
-void __init
-init_IRQ (void)
-{
-#ifdef XEN
- BUG_ON(init_irq_data());
-#endif
- register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
-#ifdef CONFIG_SMP
- register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
-#endif
-#ifdef CONFIG_PERFMON
- pfm_init_percpu();
-#endif
- platform_irq_init();
-}
-
-void
-ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
-{
- void __iomem *ipi_addr;
- unsigned long ipi_data;
- unsigned long phys_cpu_id;
-
-#ifdef CONFIG_SMP
- phys_cpu_id = cpu_physical_id(cpu);
-#else
- phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
-#endif
-
- /*
- * cpu number is in 8bit ID and 8bit EID
- */
-
- ipi_data = (delivery_mode << 8) | (vector & 0xff);
- ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
-
- writeq(ipi_data, ipi_addr);
-}
diff -r 2386288b1bf1 -r 8aa1697d57e4 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c Mon Apr 02 18:14:31 2012 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1963 +0,0 @@
-/*
- * File: mca.c
- * Purpose: Generic MCA handling layer
- *
- * Updated for latest kernel
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@xxxxxxxxxx>
- *
- * Copyright (C) 2002 Dell Inc.
- * Copyright (C) Matt Domsch (Matt_Domsch@xxxxxxxx)
- *
- * Copyright (C) 2002 Intel
- * Copyright (C) Jenna Hall (jenna.s.hall@xxxxxxxxx)
- *
- * Copyright (C) 2001 Intel
- * Copyright (C) Fred Lewis (frederick.v.lewis@xxxxxxxxx)
- *
- * Copyright (C) 2000 Intel
- * Copyright (C) Chuck Fleckenstein (cfleck@xxxxxxxxxxxx)
- *
- * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander(vijay@xxxxxxxxxxxx)
- *
- * 03/04/15 D. Mosberger Added INIT backtrace support.
- * 02/03/25 M. Domsch GUID cleanups
- *
- * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
- * error flag, set SAL default return values, changed
- * error record structure to linked list, added init call
- * to sal_get_state_info_size().
- *
- * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
- * platform errors, completed code for logging of
- * corrected & uncorrected machine check errors, and
- * updated for conformance with Nov. 2000 revision of the
- * SAL 3.0 spec.
- * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes,
logging issues,
- * added min save state dump, added INIT handler.
- *
- * 2003-12-08 Keith Owens <kaos@xxxxxxx>
- * smp_call_function() must not be called from interrupt context
(can
- * deadlock on tasklist_lock). Use keventd to call
smp_call_function().
- *
- * 2004-02-01 Keith Owens <kaos@xxxxxxx>
- * Avoid deadlock when using printk() for MCA and INIT records.
- * Delete all record printing code, moved to salinfo_decode in user
space.
- * Mark variables and functions static where possible.
- * Delete dead variables and functions.
- * Reorder to remove the need for forward declarations and to
consolidate
- * related code.
- */
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kallsyms.h>
-#include <linux/smp_lock.h>
-#include <linux/bootmem.h>
-#include <linux/acpi.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/workqueue.h>
-
-#include <asm/delay.h>
-#include <asm/machvec.h>
-#include <asm/meminit.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/sal.h>
-#include <asm/mca.h>
-
-#include <asm/irq.h>
-#include <asm/hw_irq.h>
-
-#ifdef XEN
-#include <xen/symbols.h>
-#include <xen/mm.h>
-#include <xen/console.h>
-#include <xen/event.h>
-#include <xen/softirq.h>
-#include <asm/xenmca.h>
-#include <linux/shutdown.h>
-#endif
-
-#if defined(IA64_MCA_DEBUG_INFO)
-# define IA64_MCA_DEBUG(fmt...) printk(fmt)
-#else
-# define IA64_MCA_DEBUG(fmt...)
-#endif
-
-/* Used by mca_asm.S */
-#ifndef XEN
-ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
-#else
-ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state[NR_CPUS];
-DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr);
-#endif
-ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
-u64 ia64_mca_serialize;
-DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
-DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
-DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
-DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
-
-unsigned long __per_cpu_mca[NR_CPUS];
-
-/* In mca_asm.S */
-extern void ia64_monarch_init_handler (void);
-extern void ia64_slave_init_handler (void);
-
-static ia64_mc_info_t ia64_mc_info;
-
-#ifdef XEN
-#define jiffies NOW()
-#undef HZ
-#define HZ 1000000000UL
-#endif
-
-#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
-#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
-#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
-#define CPE_HISTORY_LENGTH 5
-#define CMC_HISTORY_LENGTH 5
-
-#ifndef XEN
-static struct timer_list cpe_poll_timer;
-static struct timer_list cmc_poll_timer;
-#else
-#define mod_timer(timer, expires) set_timer(timer, expires)
-static struct timer cpe_poll_timer;
-static struct timer cmc_poll_timer;
-#endif
-/*
- * This variable tells whether we are currently in polling mode.
- * Start with this in the wrong state so we won't play w/ timers
- * before the system is ready.
- */
-static int cmc_polling_enabled = 1;
-
-/*
- * Clearing this variable prevents CPE polling from getting activated
- * in mca_late_init. Use it if your system doesn't provide a CPEI,
- * but encounters problems retrieving CPE logs. This should only be
- * necessary for debugging.
- */
-static int cpe_poll_enabled = 1;
-
-extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-
-static int mca_init;
-
-/*
- * IA64_MCA log support
- */
-#define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
-#define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
-
-typedef struct ia64_state_log_s
-{
- spinlock_t isl_lock;
- int isl_index;
- unsigned long isl_count;
- ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header
+ error log */
-} ia64_state_log_t;
-
-static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
-
-#ifndef XEN
-#define IA64_LOG_ALLOCATE(it, size) \
- {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size); \
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size);}
-#else
-#define IA64_LOG_ALLOCATE(it, size) \
- do { \
- unsigned int pageorder; \
- struct page_info *page; \
- pageorder = get_order_from_bytes(size); \
- page = alloc_domheap_pages(NULL, pageorder, 0); \
- ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
- page = alloc_domheap_pages(NULL, pageorder, 0); \
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
- } while(0)
-#endif
-
-#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
-#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock,
s)
-#define IA64_LOG_UNLOCK(it)
spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
-#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
-#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
-#define IA64_LOG_INDEX_INC(it) \
- {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
- ia64_state_log[it].isl_count++;}
-#define IA64_LOG_INDEX_DEC(it) \
- ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
-#define IA64_LOG_NEXT_BUFFER(it) (void
*)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
-#define IA64_LOG_CURR_BUFFER(it) (void
*)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
-#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
-
-#ifdef XEN
-sal_queue_entry_t sal_entry[NR_CPUS][IA64_MAX_LOG_TYPES];
-struct list_head *sal_queue, sal_log_queues[IA64_MAX_LOG_TYPES];
-sal_log_record_header_t *sal_record;
-DEFINE_SPINLOCK(sal_queue_lock);
-#endif
-
-/*
- * ia64_log_init
- * Reset the OS ia64 log buffer
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * Outputs : None
- */
-static void
-ia64_log_init(int sal_info_type)
-{
- u64 max_size = 0;
-
- IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
- IA64_LOG_LOCK_INIT(sal_info_type);
-
- // SAL will tell us the maximum size of any error record of this type
- max_size = ia64_sal_get_state_info_size(sal_info_type);
- if (!max_size)
- /* alloc_bootmem() doesn't like zero-sized allocations! */
- return;
-
- // set up OS data structures to hold error info
- IA64_LOG_ALLOCATE(sal_info_type, max_size);
- memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
- memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
-
-#ifdef XEN
- if (sal_record == NULL) {
- unsigned int pageorder;
- struct page_info *page;
- pageorder = get_order_from_bytes(max_size);
- page = alloc_domheap_pages(NULL, pageorder, 0);
- BUG_ON(page == NULL);
- sal_record = (sal_log_record_header_t *)page_to_virt(page);
- BUG_ON(sal_record == NULL);
- }
-#endif
-}
-
-#ifndef XEN
-/*
- * ia64_log_get
- *
- * Get the current MCA log from SAL and copy it into the OS log buffer.
- *
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * irq_safe whether you can use printk at this point
- * Outputs : size (total record length)
- * *buffer (ptr to error record)
- *
- */
-static u64
-ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
-{
- sal_log_record_header_t *log_buffer;
- u64 total_len = 0;
- int s;
-
- IA64_LOG_LOCK(sal_info_type);
-
- /* Get the process state information */
- log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
-
- total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
-
- if (total_len) {
- IA64_LOG_INDEX_INC(sal_info_type);
- IA64_LOG_UNLOCK(sal_info_type);
- if (irq_safe) {
- IA64_MCA_DEBUG("%s: SAL error record type %d retrieved.
"
- "Record length = %ld\n", __FUNCTION__,
sal_info_type, total_len);
- }
- *buffer = (u8 *) log_buffer;
- return total_len;
- } else {
- IA64_LOG_UNLOCK(sal_info_type);
- return 0;
- }
-}
-
-/*
- * ia64_mca_log_sal_error_record
- *
- * This function retrieves a specified error record type from SAL
- * and wakes up any processes waiting for error records.
- *
- * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT)
- */
-static void
-ia64_mca_log_sal_error_record(int sal_info_type)
-{
- u8 *buffer;
- sal_log_record_header_t *rh;
- u64 size;
- int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type !=
SAL_INFO_TYPE_INIT;
-#ifdef IA64_MCA_DEBUG_INFO
- static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
-#endif
-
- size = ia64_log_get(sal_info_type, &buffer, irq_safe);
- if (!size)
- return;
-
- salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
-
- if (irq_safe)
- IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
- smp_processor_id(),
- sal_info_type < ARRAY_SIZE(rec_name) ?
rec_name[sal_info_type] : "UNKNOWN");
-
- /* Clear logs from corrected errors in case there's no user-level
logger */
- rh = (sal_log_record_header_t *)buffer;
- if (rh->severity == sal_log_severity_corrected)
- ia64_sal_clear_state_info(sal_info_type);
-}
-#else /* !XEN */
-/*
- * ia64_log_queue
- *
- * Get the current MCA log from SAL and copy it into the OS log buffer.
- *
- * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
- * Outputs : size (total record length)
- * *buffer (ptr to error record)
- *
- */
-static u64
-ia64_log_queue(int sal_info_type, int virq)
-{
- sal_log_record_header_t *log_buffer;
- u64 total_len = 0;
- int s;
- sal_queue_entry_t *e;
- unsigned long flags;
-
- IA64_LOG_LOCK(sal_info_type);
-
- /* Get the process state information */
- log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
-
- total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
-
- if (total_len) {
- int queue_type;
- int cpuid = smp_processor_id();
-
- spin_lock_irqsave(&sal_queue_lock, flags);
-
- if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
- queue_type = SAL_INFO_TYPE_CMC;
- else
- queue_type = sal_info_type;
-
- /* Skip if sal_entry is already listed in sal_queue */
- list_for_each_entry(e, &sal_queue[queue_type], list) {
- if (e == &sal_entry[cpuid][queue_type])
- goto found;
- }
- e = &sal_entry[cpuid][queue_type];
- memset(e, 0, sizeof(sal_queue_entry_t));
- e->cpuid = cpuid;
- e->sal_info_type = sal_info_type;
- e->vector = IA64_CMC_VECTOR;
- e->virq = virq;
- e->length = total_len;
-
- list_add_tail(&e->list, &sal_queue[queue_type]);
-
- found:
- spin_unlock_irqrestore(&sal_queue_lock, flags);
-
- IA64_LOG_INDEX_INC(sal_info_type);
- IA64_LOG_UNLOCK(sal_info_type);
- if (sal_info_type != SAL_INFO_TYPE_MCA &&
- sal_info_type != SAL_INFO_TYPE_INIT) {
- IA64_MCA_DEBUG("%s: SAL error record type %d retrieved.
"
- "Record length = %ld\n", __FUNCTION__,
- sal_info_type, total_len);
- }
- return total_len;
- } else {
- IA64_LOG_UNLOCK(sal_info_type);
- return 0;
- }
-}
-#endif /* !XEN */
-
-/*
- * platform dependent error handling
- */
-#ifndef PLATFORM_MCA_HANDLERS
-
-#ifdef CONFIG_ACPI
-
-#ifdef XEN
-/**
- * Copy from linux/kernel/irq/manage.c
- *
- * disable_irq_nosync - disable an irq without waiting
- * @irq: Interrupt to disable
- *
- * Disable the selected interrupt line. Disables and Enables are
- * nested.
- * Unlike disable_irq(), this function does not ensure existing
- * instances of the IRQ handler have completed before returning.
- *
- * This function may be called from IRQ context.
- */
-void disable_irq_nosync(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (!desc->arch.depth++) {
- desc->status |= IRQ_DISABLED;
- desc->handler->disable(desc);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/**
- * Copy from linux/kernel/irq/manage.c
- *
- * enable_irq - enable handling of an irq
- * @irq: Interrupt to enable
- *
- * Undoes the effect of one call to disable_irq(). If this
- * matches the last disable, processing of interrupts on this
- * IRQ line is re-enabled.
- *
- * This function may be called from IRQ context.
- */
-void enable_irq(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- spin_lock_irqsave(&desc->lock, flags);
- switch (desc->arch.depth) {
- case 0:
- WARN_ON(1);
- break;
- case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
-
- desc->status = status;
- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = status | IRQ_REPLAY;
- hw_resend_irq(desc->handler,irq);
- }
- desc->handler->enable(desc);
- /* fall-through */
- }
- default:
- desc->arch.depth--;
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-#endif /* XEN */
-
-int cpe_vector = -1;
-
-static irqreturn_t
-ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
-{
- static unsigned long cpe_history[CPE_HISTORY_LENGTH];
- static int index;
- static DEFINE_SPINLOCK(cpe_history_lock);
-
- IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __FUNCTION__, cpe_irq, smp_processor_id());
-
- /* SAL spec states this should run w/ interrupts enabled */
- local_irq_enable();
-
-#ifndef XEN
- /* Get the CPE error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
-#else
- ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
- /* CPE error does not inform to dom0 but the following codes are
- reserved for future implementation */
-/* send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
-#endif
-
- spin_lock(&cpe_history_lock);
- if (!cpe_poll_enabled && cpe_vector >= 0) {
-
- int i, count = 1; /* we know 1 happened now */
- unsigned long now = jiffies;
-
- for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
- if (now - cpe_history[i] <= HZ)
- count++;
- }
-
- IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count,
CPE_HISTORY_LENGTH);
- if (count >= CPE_HISTORY_LENGTH) {
-
- cpe_poll_enabled = 1;
- spin_unlock(&cpe_history_lock);
-
disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
-
- /*
- * Corrected errors will still be corrected, but
- * make sure there's a log somewhere that indicates
- * something is generating more than we can handle.
- */
- printk(KERN_WARNING "WARNING: Switching to polling CPE
handler; error records may be lost\n");
-
- mod_timer(&cpe_poll_timer, jiffies +
MIN_CPE_POLL_INTERVAL);
-
- /* lock already released, get out now */
- return IRQ_HANDLED;
- } else {
- cpe_history[index++] = now;
- if (index == CPE_HISTORY_LENGTH)
- index = 0;
- }
- }
- spin_unlock(&cpe_history_lock);
- return IRQ_HANDLED;
-}
-
-#endif /* CONFIG_ACPI */
-
-static void
-show_min_state (pal_min_state_area_t *minstate)
-{
- u64 iip = minstate->pmsa_iip + ((struct ia64_psr
*)(&minstate->pmsa_ipsr))->ri;
- u64 xip = minstate->pmsa_xip + ((struct ia64_psr
*)(&minstate->pmsa_xpsr))->ri;
-
- printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
- printk("pr\t\t%016lx\n", minstate->pmsa_pr);
- printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n",
minstate->pmsa_br0);
- printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
- printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
- printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
- printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
- printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
- printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
- printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
- printk("b1\t\t%016lx ", minstate->pmsa_br1);
- print_symbol("%s\n", minstate->pmsa_br1);
-
- printk("\nstatic registers r0-r15:\n");
- printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
- 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1],
minstate->pmsa_gr[2]);
- printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_gr[3], minstate->pmsa_gr[4],
- minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
- printk(" r8-11 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_gr[7], minstate->pmsa_gr[8],
- minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
- printk("r12-15 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_gr[11], minstate->pmsa_gr[12],
- minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
-
- printk("\nbank 0:\n");
- printk("r16-19 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
- minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
- printk("r20-23 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
- minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
- printk("r24-27 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
- minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
- printk("r28-31 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
- minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
-
- printk("\nbank 1:\n");
- printk("r16-19 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
- minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
- printk("r20-23 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
- minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
- printk("r24-27 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
- minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
- printk("r28-31 %016lx %016lx %016lx %016lx\n",
- minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
- minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
-}
-
-static void
-fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct
switch_stack *sw)
-{
- u64 *dst_banked, *src_banked, bit, shift, nat_bits;
- int i;
-
- /*
- * First, update the pt-regs and switch-stack structures with the
contents stored
- * in the min-state area:
- */
- if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
- pt->cr_ipsr = ms->pmsa_xpsr;
- pt->cr_iip = ms->pmsa_xip;
- pt->cr_ifs = ms->pmsa_xfs;
- } else {
- pt->cr_ipsr = ms->pmsa_ipsr;
- pt->cr_iip = ms->pmsa_iip;
- pt->cr_ifs = ms->pmsa_ifs;
- }
- pt->ar_rsc = ms->pmsa_rsc;
- pt->pr = ms->pmsa_pr;
- pt->r1 = ms->pmsa_gr[0];
- pt->r2 = ms->pmsa_gr[1];
- pt->r3 = ms->pmsa_gr[2];
- sw->r4 = ms->pmsa_gr[3];
- sw->r5 = ms->pmsa_gr[4];
- sw->r6 = ms->pmsa_gr[5];
- sw->r7 = ms->pmsa_gr[6];
- pt->r8 = ms->pmsa_gr[7];
- pt->r9 = ms->pmsa_gr[8];
- pt->r10 = ms->pmsa_gr[9];
- pt->r11 = ms->pmsa_gr[10];
- pt->r12 = ms->pmsa_gr[11];
- pt->r13 = ms->pmsa_gr[12];
- pt->r14 = ms->pmsa_gr[13];
- pt->r15 = ms->pmsa_gr[14];
- dst_banked = &pt->r16; /* r16-r31 are contiguous in struct
pt_regs */
- src_banked = ms->pmsa_bank1_gr;
- for (i = 0; i < 16; ++i)
- dst_banked[i] = src_banked[i];
- pt->b0 = ms->pmsa_br0;
- sw->b1 = ms->pmsa_br1;
-
- /* construct the NaT bits for the pt-regs structure: */
-# define PUT_NAT_BIT(dst, addr) \
- do { \
- bit = nat_bits & 1; nat_bits >>= 1; \
- shift = ((unsigned long) addr >> 3) & 0x3f; \
- dst = ((dst) & ~(1UL << shift)) | (bit << shift); \
- } while (0)
-
- /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]:
*/
- shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
- nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 -
shift));
-
- PUT_NAT_BIT(sw->caller_unat, &pt->r1);
- PUT_NAT_BIT(sw->caller_unat, &pt->r2);
- PUT_NAT_BIT(sw->caller_unat, &pt->r3);
- PUT_NAT_BIT(sw->ar_unat, &sw->r4);
- PUT_NAT_BIT(sw->ar_unat, &sw->r5);
- PUT_NAT_BIT(sw->ar_unat, &sw->r6);
- PUT_NAT_BIT(sw->ar_unat, &sw->r7);
- PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat,
&pt->r9);
- PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat,
&pt->r11);
- PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat,
&pt->r13);
- PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat,
&pt->r15);
- nat_bits >>= 16; /* skip over bank0 NaT bits */
- PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat,
&pt->r17);
- PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat,
&pt->r19);
- PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat,
&pt->r21);
- PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat,
&pt->r23);
- PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat,
&pt->r25);
- PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat,
&pt->r27);
- PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat,
&pt->r29);
- PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat,
&pt->r31);
-}
-
-#ifdef XEN
-static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
-static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
-extern void show_stack (struct task_struct *, unsigned long *);
-
-#define CPU_FLUSH_RETRY_MAX 5
-static void
-init_cache_flush (void)
-{
- unsigned long flags;
- int i;
- s64 rval = 0;
- u64 vector, progress = 0;
-
- for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
- local_irq_save(flags);
- rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
- 0, &progress, &vector);
- local_irq_restore(flags);
- if (rval == 0){
- printk("\nPAL cache flush success\n");
- return;
- }
- }
- printk("\nPAL cache flush failed. status=%ld\n",rval);
-}
-
-static void inline
-save_ksp (struct unw_frame_info *info)
-{
- current->arch._thread.ksp = (__u64)(info->sw) - 16;
- wmb();
- init_cache_flush();
-}
-
-static void
-freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
-{
- save_ksp(info);
- atomic_inc(&num_stopped_cpus);
- printk("%s: CPU%d init handler done\n",
- __FUNCTION__, smp_processor_id());
- for (;;)
- local_irq_disable();
-}
-
-/* FIXME */
-static void
-try_crashdump(struct unw_frame_info *info, void *arg)
-{
- save_ksp(info);
- printk("\nINIT dump complete. Please reboot now.\n");
- for (;;)
- local_irq_disable();
-}
-#endif /* XEN */
-
-static void
-init_handler_platform (pal_min_state_area_t *ms,
- struct pt_regs *pt, struct switch_stack *sw)
-{
- struct unw_frame_info info;
-
- /* if a kernel debugger is available call it here else just dump the
registers */
-
- /*
- * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000,
INIT can be
- * generated via the BMC's command-line interface, but since the
console is on the
- * same serial line, the user will need some time to switch out of the
BMC before
- * the dump begins.
- */
- printk("Delaying for 5 seconds...\n");
- udelay(5*1000000);
-#ifdef XEN
- fetch_min_state(ms, pt, sw);
- spin_lock(&show_stack_lock);
-#endif
- show_min_state(ms);
-
-#ifdef XEN
- printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
- current->vcpu_id, current->domain->domain_id);
-#else
- printk("Backtrace of current task (pid %d, %s)\n", current->pid,
current->comm);
- fetch_min_state(ms, pt, sw);
-#endif
- unw_init_from_interruption(&info, current, pt, sw);
- ia64_do_show_stack(&info, NULL);
-#ifdef XEN
- spin_unlock(&show_stack_lock);
-
- if (spin_trylock(&init_dump_lock)) {
- struct domain *d;
- struct vcpu *v;
-#ifdef CONFIG_SMP
- int other_cpus = num_online_cpus() - 1;
- int wait = 1000 * other_cpus;
-
- while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
- udelay(1000);
- if (other_cpus && wait < 0)
- printk("timeout %d\n", atomic_read(&num_stopped_cpus));
-#endif
- if (opt_noreboot) {
- /* this route is for dump routine */
- unw_init_running(try_crashdump, pt);
- } else {
- rcu_read_lock(&domlist_read_lock);
- for_each_domain(d) {
- for_each_vcpu(d, v) {
- printk("Backtrace of current vcpu "
- "(vcpu_id %d of domid %d)\n",
- v->vcpu_id, d->domain_id);
- show_stack(v, NULL);
- }
- }
- rcu_read_unlock(&domlist_read_lock);
- }
- }
- unw_init_running(freeze_cpu_osinit, NULL);
-#else /* XEN */
-#ifdef CONFIG_SMP
- /* read_trylock() would be handy... */
- if (!tasklist_lock.write_lock)
- read_lock(&tasklist_lock);
-#endif
- {
- struct task_struct *g, *t;
- do_each_thread (g, t) {
- if (t == current)
- continue;
-
- printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t, NULL);
- } while_each_thread (g, t);
- }
-#ifdef CONFIG_SMP
- if (!tasklist_lock.write_lock)
- read_unlock(&tasklist_lock);
-#endif
-
- printk("\nINIT dump complete. Please reboot now.\n");
-#endif /* XEN */
- while (1); /* hang city if no debugger */
-}
-
-#ifdef CONFIG_ACPI
-/*
- * ia64_mca_register_cpev
- *
- * Register the corrected platform error vector with SAL.
- *
- * Inputs
- * cpev Corrected Platform Error Vector number
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_register_cpev (int cpev)
-{
- /* Register the CPE interrupt vector with SAL */
- struct ia64_sal_retval isrv;
-
- isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT,
SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
- if (isrv.status) {
- printk(KERN_ERR "Failed to register Corrected Platform "
- "Error interrupt vector with SAL (status %ld)\n",
isrv.status);
- return;
- }
-
- IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x registered\n", __FUNCTION__, cpev);
-}
-#endif /* CONFIG_ACPI */
-
-#endif /* PLATFORM_MCA_HANDLERS */
-
-/*
- * ia64_mca_cmc_vector_setup
- *
- * Setup the corrected machine check vector register in the processor.
- * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * None
- *
- * Outputs
- * None
- */
-void
-ia64_mca_cmc_vector_setup (void)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = 0;
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first
*/
- cmcv.cmcv_vector = IA64_CMC_VECTOR;
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x registered.\n",
- __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
-
- IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
- __FUNCTION__, smp_processor_id(),
ia64_getreg(_IA64_REG_CR_CMCV));
-}
-
-/*
- * ia64_mca_cmc_vector_disable
- *
- * Mask the corrected machine check vector register in the processor.
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * dummy(unused)
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_cmc_vector_disable (void *dummy)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
-
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x disabled.\n",
- __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
-}
-
-/*
- * ia64_mca_cmc_vector_enable
- *
- * Unmask the corrected machine check vector register in the processor.
- * This function is invoked on a per-processor basis.
- *
- * Inputs
- * dummy(unused)
- *
- * Outputs
- * None
- */
-static void
-ia64_mca_cmc_vector_enable (void *dummy)
-{
- cmcv_reg_t cmcv;
-
- cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
-
- cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
- ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
-
- IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x enabled.\n",
- __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
-}
-
-#ifndef XEN
-/*
- * ia64_mca_cmc_vector_disable_keventd
- *
- * Called via keventd (smp_call_function() is not safe in interrupt context) to
- * disable the cmc interrupt vector.
- */
-static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
-{
- on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
-}
-
-/*
- * ia64_mca_cmc_vector_enable_keventd
- *
- * Called via keventd (smp_call_function() is not safe in interrupt context) to
- * enable the cmc interrupt vector.
- */
-static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
-{
- on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
-}
-#endif /* !XEN */
-
-/*
- * ia64_mca_wakeup_ipi_wait
- *
- * Wait for the inter-cpu interrupt to be sent by the
- * monarch processor once it is done with handling the
- * MCA.
- *
- * Inputs : None
- * Outputs : None
- */
-static void
-ia64_mca_wakeup_ipi_wait(void)
-{
- int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
- int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
- u64 irr = 0;
-
- do {
- switch(irr_num) {
- case 0:
- irr = ia64_getreg(_IA64_REG_CR_IRR0);
- break;
- case 1:
- irr = ia64_getreg(_IA64_REG_CR_IRR1);
- break;
- case 2:
- irr = ia64_getreg(_IA64_REG_CR_IRR2);
- break;
- case 3:
- irr = ia64_getreg(_IA64_REG_CR_IRR3);
- break;
- }
- cpu_relax();
- } while (!(irr & (1UL << irr_bit))) ;
-}
-
-/*
- * ia64_mca_wakeup
- *
- * Send an inter-cpu interrupt to wake-up a particular cpu
- * and mark that cpu to be out of rendez.
- *
- * Inputs : cpuid
- * Outputs : None
- */
-static void
-ia64_mca_wakeup(int cpu)
-{
- platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
-
-}
-
-/*
- * ia64_mca_wakeup_all
- *
- * Wakeup all the cpus which have rendez'ed previously.
- *
- * Inputs : None
- * Outputs : None
- */
-static void
-ia64_mca_wakeup_all(void)
-{
- int cpu;
-
- /* Clear the Rendez checkin flag for all cpus */
- for(cpu = 0; cpu < NR_CPUS; cpu++) {
- if (!cpu_online(cpu))
- continue;
- if (ia64_mc_info.imi_rendez_checkin[cpu] ==
IA64_MCA_RENDEZ_CHECKIN_DONE)
- ia64_mca_wakeup(cpu);
- }
-
-}
-
-/*
- * ia64_mca_rendez_interrupt_handler
- *
- * This is handler used to put slave processors into spinloop
- * while the monarch processor does the mca handling and later
- * wake each slave up once the monarch is done.
- *
- * Inputs : None
- * Outputs : None
- */
-static irqreturn_t
-ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
-{
- unsigned long flags;
- int cpu = smp_processor_id();
-
- /* Mask all interrupts */
- local_irq_save(flags);
-
- ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
- /* Register with the SAL monarch that the slave has
- * reached SAL
- */
- ia64_sal_mc_rendez();
-
- /* Wait for the wakeup IPI from the monarch
- * This waiting is done by polling on the wakeup-interrupt
- * vector bit in the processor's IRRs
- */
- ia64_mca_wakeup_ipi_wait();
-
- /* Enable all interrupts */
- local_irq_restore(flags);
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_wakeup_int_handler
- *
- * The interrupt handler for processing the inter-cpu interrupt to the
- * slave cpu which was spinning in the rendez loop.
- * Since this spinning is done by turning off the interrupts and
- * polling on the wakeup-interrupt bit in the IRR, there is
- * nothing useful to be done in the handler.
- *
- * Inputs : wakeup_irq (Wakeup-interrupt bit)
- * arg (Interrupt handler specific argument)
- * ptregs (Exception frame at the time of the interrupt)
- * Outputs : None
- *
- */
-static irqreturn_t
-ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
-{
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_return_to_sal_check
- *
- * This is function called before going back from the OS_MCA handler
- * to the OS_MCA dispatch code which finally takes the control back
- * to the SAL.
- * The main purpose of this routine is to setup the OS_MCA to SAL
- * return state which can be used by the OS_MCA dispatch code
- * just before going back to SAL.
- *
- * Inputs : None
- * Outputs : None
- */
-
-static void
-ia64_return_to_sal_check(int recover)
-{
-#ifdef XEN
- int cpu = smp_processor_id();
-#endif
-
- /* Copy over some relevant stuff from the sal_to_os_mca_handoff
- * so that it can be used at the time of os_mca_to_sal_handoff
- */
-#ifdef XEN
- ia64_os_to_sal_handoff_state.imots_sal_gp =
- ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
-
- ia64_os_to_sal_handoff_state.imots_sal_check_ra =
- ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
-#else
- ia64_os_to_sal_handoff_state.imots_sal_gp =
- ia64_sal_to_os_handoff_state.imsto_sal_gp;
-
- ia64_os_to_sal_handoff_state.imots_sal_check_ra =
- ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
-#endif
-
- if (recover)
- ia64_os_to_sal_handoff_state.imots_os_status =
IA64_MCA_CORRECTED;
- else
- ia64_os_to_sal_handoff_state.imots_os_status =
IA64_MCA_COLD_BOOT;
-
- /* Default = tell SAL to return to same context */
- ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
-
-#ifdef XEN
- ia64_os_to_sal_handoff_state.imots_new_min_state =
- (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
-#else
- ia64_os_to_sal_handoff_state.imots_new_min_state =
- (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
-#endif
-
-}
-
-/* Function pointer for extra MCA recovery */
-int (*ia64_mca_ucmc_extension)
- (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
- = NULL;
-
-int
-ia64_reg_MCA_extension(void *fn)
-{
- if (ia64_mca_ucmc_extension)
- return 1;
-
- ia64_mca_ucmc_extension = fn;
- return 0;
-}
-
-void
-ia64_unreg_MCA_extension(void)
-{
- if (ia64_mca_ucmc_extension)
- ia64_mca_ucmc_extension = NULL;
-}
-
-EXPORT_SYMBOL(ia64_reg_MCA_extension);
-EXPORT_SYMBOL(ia64_unreg_MCA_extension);
-
-/*
- * ia64_mca_ucmc_handler
- *
- * This is uncorrectable machine check handler called from OS_MCA
- * dispatch code which is in turn called from SAL_CHECK().
- * This is the place where the core of OS MCA handling is done.
- * Right now the logs are extracted and displayed in a well-defined
- * format. This handler code is supposed to be run only on the
- * monarch processor. Once the monarch is done with MCA handling
- * further MCA logging is enabled by clearing logs.
- * Monarch also has the duty of sending wakeup-IPIs to pull the
- * slave processors out of rendezvous spinloop.
- *
- * Inputs : None
- * Outputs : None
- */
-void
-ia64_mca_ucmc_handler(void)
-{
-#ifdef XEN
- int cpu = smp_processor_id();
- pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
- &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
-#else
- pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
- &ia64_sal_to_os_handoff_state.proc_state_param;
-#endif
- int recover;
-
-#ifndef XEN
- /* Get the MCA error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
-#else
- ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
- send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
-#endif
-
- /* TLB error is only exist in this SAL error record */
- recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
- /* other error recovery */
-#ifndef XEN
- || (ia64_mca_ucmc_extension
- && ia64_mca_ucmc_extension(
- IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
- &ia64_sal_to_os_handoff_state,
- &ia64_os_to_sal_handoff_state));
-#else
- ;
-#endif
-
-#ifndef XEN
- if (recover) {
- sal_log_record_header_t *rh =
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
- rh->severity = sal_log_severity_corrected;
- ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
- }
-#endif
- /*
- * Wakeup all the processors which are spinning in the rendezvous
- * loop.
- */
- ia64_mca_wakeup_all();
-
- /* Return to SAL */
- ia64_return_to_sal_check(recover);
-}
-
-#ifndef XEN
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd,
NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
-#endif
-
-/*
- * ia64_mca_cmc_int_handler
- *
- * This is corrected machine check interrupt handler.
- * Right now the logs are extracted and displayed in a well-defined
- * format.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * saved registers ptr
- *
- * Outputs
- * None
- */
-static irqreturn_t
-ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
-{
- static unsigned long cmc_history[CMC_HISTORY_LENGTH];
- static int index;
- static DEFINE_SPINLOCK(cmc_history_lock);
-
- IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
- __FUNCTION__, cmc_irq, smp_processor_id());
-
- /* SAL spec states this should run w/ interrupts enabled */
- local_irq_enable();
-
-#ifndef XEN
- /* Get the CMC error record and log it */
- ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
-#else
- ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
- send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
-#endif
-
- spin_lock(&cmc_history_lock);
- if (!cmc_polling_enabled) {
- int i, count = 1; /* we know 1 happened now */
- unsigned long now = jiffies;
-
- for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
- if (now - cmc_history[i] <= HZ)
- count++;
- }
-
- IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count,
CMC_HISTORY_LENGTH);
- if (count >= CMC_HISTORY_LENGTH) {
-
- cmc_polling_enabled = 1;
- spin_unlock(&cmc_history_lock);
-#ifndef XEN /* XXX FIXME */
- schedule_work(&cmc_disable_work);
-#else
- cpumask_raise_softirq(&cpu_online_map,
- CMC_DISABLE_SOFTIRQ);
-#endif
-
- /*
- * Corrected errors will still be corrected, but
- * make sure there's a log somewhere that indicates
- * something is generating more than we can handle.
- */
- printk(KERN_WARNING "WARNING: Switching to polling CMC
handler; error records may be lost\n");
-
- mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
-
- /* lock already released, get out now */
- return IRQ_HANDLED;
- } else {
- cmc_history[index++] = now;
- if (index == CMC_HISTORY_LENGTH)
- index = 0;
- }
- }
- spin_unlock(&cmc_history_lock);
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cmc_int_caller
- *
- * Triggered by sw interrupt from CMC polling routine. Calls
- * real interrupt handler and either triggers a sw interrupt
- * on the next cpu or does cleanup at the end.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * saved registers ptr
- * Outputs
- * handled
- */
-static irqreturn_t
-ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
-{
- static int start_count = -1;
- unsigned int cpuid;
-
- cpuid = smp_processor_id();
-
- /* If first cpu, update count */
- if (start_count == -1)
- start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
-
-#ifndef XEN
- ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
-#else
- IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
- __FUNCTION__, cmc_irq, smp_processor_id());
- ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
-#endif
-
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
-
- if (cpuid < NR_CPUS) {
- platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
- } else {
- /* If no log record, switch out of polling mode */
- if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
-
- printk(KERN_WARNING "Returning to interrupt driven CMC
handler\n");
-#ifndef XEN /* XXX FIXME */
- schedule_work(&cmc_enable_work);
-#else
- cpumask_raise_softirq(&cpu_online_map,
- CMC_ENABLE_SOFTIRQ);
-#endif
- cmc_polling_enabled = 0;
-
- } else {
-
- mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
- }
-
- start_count = -1;
- }
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cmc_poll
- *
- * Poll for Corrected Machine Checks (CMCs)
- *
- * Inputs : dummy(unused)
- * Outputs : None
- *
- */
-static void
-#ifndef XEN
-ia64_mca_cmc_poll (unsigned long dummy)
-#else
-ia64_mca_cmc_poll (void *dummy)
-#endif
-{
- /* Trigger a CMC interrupt cascade */
- platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CMCP_VECTOR,
IA64_IPI_DM_INT, 0);
-}
-
-/*
- * ia64_mca_cpe_int_caller
- *
- * Triggered by sw interrupt from CPE polling routine. Calls
- * real interrupt handler and either triggers a sw interrupt
- * on the next cpu or does cleanup at the end.
- *
- * Inputs
- * interrupt number
- * client data arg ptr
- * saved registers ptr
- * Outputs
- * handled
- */
-#ifdef CONFIG_ACPI
-
-static irqreturn_t
-ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
-{
- static int start_count = -1;
-#ifdef XEN
- static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;
-#else
- static int poll_time = MIN_CPE_POLL_INTERVAL;
-#endif
- unsigned int cpuid;
-
- cpuid = smp_processor_id();
-
- /* If first cpu, update count */
- if (start_count == -1)
- start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
-
-#ifndef XEN
- ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
-#else
- IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
- __FUNCTION__, cpe_irq, smp_processor_id());
- ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
-#endif
-
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
-
- if (cpuid < NR_CPUS) {
- platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
- } else {
- /*
- * If a log was recorded, increase our polling frequency,
- * otherwise, backoff or return to interrupt mode.
- */
- if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
- poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
- } else if (cpe_vector < 0) {
- poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
- } else {
- poll_time = MIN_CPE_POLL_INTERVAL;
-
- printk(KERN_WARNING "Returning to interrupt driven CPE
handler\n");
- enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
- cpe_poll_enabled = 0;
- }
-
- if (cpe_poll_enabled)
- mod_timer(&cpe_poll_timer, jiffies + poll_time);
- start_count = -1;
- }
- return IRQ_HANDLED;
-}
-
-/*
- * ia64_mca_cpe_poll
- *
- * Poll for Corrected Platform Errors (CPEs), trigger interrupt
- * on first cpu, from there it will trickle through all the cpus.
- *
- * Inputs : dummy(unused)
- * Outputs : None
- *
- */
-static void
-#ifndef XEN
-ia64_mca_cpe_poll (unsigned long dummy)
-#else
-ia64_mca_cpe_poll (void *dummy)
-#endif
-{
- /* Trigger a CPE interrupt cascade */
- platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CPEP_VECTOR,
IA64_IPI_DM_INT, 0);
-}
-
-#endif /* CONFIG_ACPI */
-
-/*
- * C portion of the OS INIT handler
- *
- * Called from ia64_monarch_init_handler
- *
- * Inputs: pointer to pt_regs where processor info was saved.
- *
- * Returns:
- * 0 if SAL must warm boot the System
- * 1 if SAL must return to interrupted context using PAL_MC_RESUME
- *
- */
-void
-ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
-{
- pal_min_state_area_t *ms;
-
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |