[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/10] linux 2.6.18: assembly annotations
This adds (or moves to the proper place) annotations paralleling those added in native code in 2.6.18. To make this look less guly, it required moving around a few things. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> Index: head-2007-02-27/arch/i386/kernel/entry-xen.S =================================================================== --- head-2007-02-27.orig/arch/i386/kernel/entry-xen.S 2007-03-05 10:00:18.000000000 +0100 +++ head-2007-02-27/arch/i386/kernel/entry-xen.S 2007-03-01 17:49:29.000000000 +0100 @@ -368,6 +368,7 @@ sysexit_scrit: /**** START OF SYSEXIT CR movl ESI(%esp), %esi sysexit 14: __DISABLE_INTERRUPTS + TRACE_IRQS_OFF sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ push %esp call evtchn_do_upcall @@ -427,11 +428,13 @@ restore_nocheck: restore_nocheck: movl EFLAGS(%esp), %eax testl $(VM_MASK|NMI_MASK), %eax + CFI_REMEMBER_STATE jnz hypervisor_iret shr $9, %eax # EAX[0] == IRET_EFLAGS.IF GET_VCPU_INFO andb evtchn_upcall_mask(%esi),%al andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask + CFI_REMEMBER_STATE jnz restore_all_enable_events # != 0 => enable event delivery #endif TRACE_IRQS_IRET @@ -455,8 +458,8 @@ iret_exc: .long 1b,iret_exc .previous -#ifndef CONFIG_XEN CFI_RESTORE_STATE +#ifndef CONFIG_XEN ldt_ss: larl OLDSS(%esp), %eax jnz restore_nocheck @@ -485,14 +488,36 @@ ldt_ss: .align 4 .long 1b,iret_exc .previous - CFI_ENDPROC #else + ALIGN +restore_all_enable_events: + TRACE_IRQS_ON + __ENABLE_INTERRUPTS +scrit: /**** START OF CRITICAL REGION ****/ + __TEST_PENDING + jnz 14f # process more events if necessary... + RESTORE_REGS + addl $4, %esp + CFI_ADJUST_CFA_OFFSET -4 +1: iret +.section __ex_table,"a" + .align 4 + .long 1b,iret_exc +.previous +14: __DISABLE_INTERRUPTS + TRACE_IRQS_OFF + jmp 11f +ecrit: /**** END OF CRITICAL REGION ****/ + + CFI_RESTORE_STATE hypervisor_iret: andl $~NMI_MASK, EFLAGS(%esp) RESTORE_REGS addl $4, %esp + CFI_ADJUST_CFA_OFFSET -4 jmp hypercall_page + (__HYPERVISOR_iret * 32) #endif + CFI_ENDPROC # perform work that needs to be done immediately before resumption ALIGN @@ -736,7 +761,9 @@ error_code: # critical region we know that the entire frame is present and correct # so we can simply throw away the new one. ENTRY(hypervisor_callback) + RING0_INT_FRAME pushl %eax + CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL movl EIP(%esp),%eax cmpl $scrit,%eax @@ -749,26 +776,13 @@ ENTRY(hypervisor_callback) ja 11f addl $OLDESP,%esp # Remove eflags...ebx from stack frame. 11: push %esp + CFI_ADJUST_CFA_OFFSET 4 call evtchn_do_upcall add $4,%esp + CFI_ADJUST_CFA_OFFSET -4 jmp ret_from_intr + CFI_ENDPROC - ALIGN -restore_all_enable_events: - __ENABLE_INTERRUPTS -scrit: /**** START OF CRITICAL REGION ****/ - __TEST_PENDING - jnz 14f # process more events if necessary... - RESTORE_REGS - addl $4, %esp -1: iret -.section __ex_table,"a" - .align 4 - .long 1b,iret_exc -.previous -14: __DISABLE_INTERRUPTS - jmp 11b -ecrit: /**** END OF CRITICAL REGION ****/ # [How we do the fixup]. We want to merge the current stack frame with the # just-interrupted frame. How we do this depends on where in the critical # region the interrupted handler was executing, and so how many saved @@ -835,6 +849,7 @@ ENTRY(failsafe_callback) addl $16,%esp # EAX != 0 => Category 2 (Bad IRET) jmp iret_exc 5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment) + RING0_INT_FRAME pushl $0 SAVE_ALL jmp ret_from_exception @@ -860,6 +875,7 @@ ENTRY(failsafe_callback) .long 4b,9b; \ .previous #endif + CFI_ENDPROC ENTRY(coprocessor_error) RING0_INT_FRAME @@ -1187,8 +1203,11 @@ ENDPROC(arch_unwind_init_running) #endif ENTRY(fixup_4gb_segment) + RING0_EC_FRAME pushl $do_fixup_4gb_segment + CFI_ADJUST_CFA_OFFSET 4 jmp error_code + CFI_ENDPROC .section .rodata,"a" .align 4 Index: head-2007-02-27/arch/i386/kernel/head-xen.S =================================================================== --- head-2007-02-27.orig/arch/i386/kernel/head-xen.S 2007-03-05 10:00:18.000000000 +0100 +++ head-2007-02-27/arch/i386/kernel/head-xen.S 2007-03-02 15:33:04.000000000 +0100 @@ -9,6 +9,7 @@ #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/dwarf2.h> #include <xen/interface/xen.h> #include <xen/interface/elfnote.h> @@ -72,7 +73,9 @@ ENTRY(startup_32) #define HYPERCALL_PAGE_OFFSET 0x1000 .org HYPERCALL_PAGE_OFFSET ENTRY(hypercall_page) + CFI_STARTPROC .skip 0x1000 + CFI_ENDPROC /* * Real beginning of normal "text" segment Index: head-2007-02-27/arch/x86_64/kernel/entry-xen.S =================================================================== --- head-2007-02-27.orig/arch/x86_64/kernel/entry-xen.S 2007-03-05 10:00:18.000000000 +0100 +++ head-2007-02-27/arch/x86_64/kernel/entry-xen.S 2007-03-02 16:37:46.000000000 +0100 @@ -116,19 +116,21 @@ NMI_MASK = 0x80000000 CFI_ADJUST_CFA_OFFSET -(6*8) .endm - .macro CFI_DEFAULT_STACK start=1 + .macro CFI_DEFAULT_STACK start=1,adj=0 .if \start CFI_STARTPROC simple - CFI_DEF_CFA rsp,SS+8 + CFI_DEF_CFA rsp,SS+8-(\adj*ARGOFFSET) .else - CFI_DEF_CFA_OFFSET SS+8 + CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET) .endif + .if \adj == 0 CFI_REL_OFFSET r15,R15 CFI_REL_OFFSET r14,R14 CFI_REL_OFFSET r13,R13 CFI_REL_OFFSET r12,R12 CFI_REL_OFFSET rbp,RBP CFI_REL_OFFSET rbx,RBX + .endif CFI_REL_OFFSET r11,R11 CFI_REL_OFFSET r10,R10 CFI_REL_OFFSET r9,R9 @@ -363,8 +365,8 @@ ENTRY(int_ret_from_sys_call) CFI_REL_OFFSET r9,R9-ARGOFFSET CFI_REL_OFFSET r10,R10-ARGOFFSET CFI_REL_OFFSET r11,R11-ARGOFFSET - TRACE_IRQS_OFF XEN_BLOCK_EVENTS(%rsi) + TRACE_IRQS_OFF testb $3,CS-ARGOFFSET(%rsp) jnz 1f /* Need to set the proper %ss (not NULL) for ring 3 iretq */ @@ -534,7 +536,7 @@ END(stub_rt_sigreturn) */ retint_check: - CFI_DEFAULT_STACK + CFI_DEFAULT_STACK adj=1 movl threadinfo_flags(%rcx),%edx andl %edi,%edx CFI_REMEMBER_STATE @@ -838,6 +840,7 @@ ENTRY(error_entry) CFI_REL_OFFSET r15,R15 #if 0 cmpl $__KERNEL_CS,CS(%rsp) + CFI_REMEMBER_STATE je error_kernelspace #endif error_call_handler: @@ -864,7 +867,7 @@ error_exit: TRACE_IRQS_IRETQ jmp retint_restore_args -error_kernelspace: +#if 0 /* * We need to re-write the logic here because we don't do iretq to * to return to user mode. It's still possible that we get trap/fault @@ -872,7 +875,8 @@ error_kernelspace: * for example). * */ -#if 0 + CFI_RESTORE_STATE +error_kernelspace: incl %ebx /* There are two places in the kernel that can potentially fault with usergs. Handle them here. The exception handlers after @@ -888,11 +892,13 @@ error_kernelspace: cmpq $gs_change,RIP(%rsp) je error_swapgs jmp error_sti -#endif +#endif + CFI_ENDPROC END(error_entry) ENTRY(hypervisor_callback) zeroentry do_hypervisor_callback +END(hypervisor_callback) /* * Copied from arch/xen/i386/kernel/entry.S @@ -909,48 +915,66 @@ ENTRY(hypervisor_callback) # existing activation in its critical region -- if so, we pop the current # activation and restart the handler using the previous one. ENTRY(do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) + CFI_STARTPROC # Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will # see the correct pointer to the pt_regs movq %rdi, %rsp # we don't return, adjust the stack frame -11: movq %gs:pda_irqstackptr,%rax - incl %gs:pda_irqcount - cmovzq %rax,%rsp - pushq %rdi + CFI_ENDPROC + CFI_DEFAULT_STACK +11: incl %gs:pda_irqcount + movq %rsp,%rbp + CFI_DEF_CFA_REGISTER rbp + cmovzq %gs:pda_irqstackptr,%rsp + pushq %rbp # backlink for old unwinder call evtchn_do_upcall popq %rsp + CFI_DEF_CFA_REGISTER rsp decl %gs:pda_irqcount jmp error_exit + CFI_ENDPROC +END(do_hypervisor_callback) #ifdef CONFIG_X86_LOCAL_APIC KPROBE_ENTRY(nmi) zeroentry do_nmi_callback ENTRY(do_nmi_callback) + CFI_STARTPROC addq $8, %rsp + CFI_ENDPROC + CFI_DEFAULT_STACK call do_nmi orl $NMI_MASK,EFLAGS(%rsp) RESTORE_REST XEN_BLOCK_EVENTS(%rsi) + TRACE_IRQS_OFF GET_THREAD_INFO(%rcx) jmp retint_restore_args + CFI_ENDPROC .previous .text +END(nmi) #endif ALIGN restore_all_enable_events: + CFI_DEFAULT_STACK adj=1 + TRACE_IRQS_ON XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up... scrit: /**** START OF CRITICAL REGION ****/ XEN_TEST_PENDING(%rsi) + CFI_REMEMBER_STATE jnz 14f # process more events if necessary... XEN_PUT_VCPU_INFO(%rsi) RESTORE_ARGS 0,8,0 HYPERVISOR_IRET 0 + CFI_RESTORE_STATE 14: XEN_LOCKED_BLOCK_EVENTS(%rsi) XEN_PUT_VCPU_INFO(%rsi) SAVE_REST movq %rsp,%rdi # set the argument again jmp 11b + CFI_ENDPROC ecrit: /**** END OF CRITICAL REGION ****/ # At this point, unlike on x86-32, we don't do the fixup to simplify the # code and the stack frame is more complex on x86-64. @@ -970,8 +994,12 @@ ecrit: /**** END OF CRITICAL REGION *** # We distinguish between categories by comparing each saved segment register # with its current contents: any discrepancy means we in category 1. ENTRY(failsafe_callback) + _frame (RIP-0x30) + CFI_REL_OFFSET rcx, 0 + CFI_REL_OFFSET r11, 8 movw %ds,%cx cmpw %cx,0x10(%rsp) + CFI_REMEMBER_STATE jne 1f movw %es,%cx cmpw %cx,0x18(%rsp) @@ -984,17 +1012,26 @@ ENTRY(failsafe_callback) jne 1f /* All segments match their saved values => Category 2 (Bad IRET). */ movq (%rsp),%rcx + CFI_RESTORE rcx movq 8(%rsp),%r11 + CFI_RESTORE r11 addq $0x30,%rsp + CFI_ADJUST_CFA_OFFSET -0x30 movq $11,%rdi /* SIGSEGV */ jmp do_exit + CFI_RESTORE_STATE 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp),%rcx + CFI_RESTORE rcx movq 8(%rsp),%r11 + CFI_RESTORE r11 addq $0x30,%rsp + CFI_ADJUST_CFA_OFFSET -0x30 pushq $0 + CFI_ADJUST_CFA_OFFSET 8 SAVE_ALL jmp error_exit + CFI_ENDPROC #if 0 .section __ex_table,"a" .align 8 @@ -1117,12 +1154,12 @@ END(device_not_available) /* runs on exception stack */ KPROBE_ENTRY(debug) - INTR_FRAME -/* pushq $0 +/* INTR_FRAME + pushq $0 CFI_ADJUST_CFA_OFFSET 8 */ zeroentry do_debug -/* paranoid_exit */ - CFI_ENDPROC +/* paranoidexit + CFI_ENDPROC */ END(debug) .previous .text @@ -1144,12 +1181,12 @@ END(nmi) #endif KPROBE_ENTRY(int3) - INTR_FRAME -/* pushq $0 +/* INTR_FRAME + pushq $0 CFI_ADJUST_CFA_OFFSET 8 */ zeroentry do_int3 -/* jmp paranoid_exit1 */ - CFI_ENDPROC +/* jmp paranoid_exit1 + CFI_ENDPROC */ END(int3) .previous .text @@ -1193,9 +1230,11 @@ END(segment_not_present) /* runs on exception stack */ ENTRY(stack_segment) - XCPT_FRAME +/* XCPT_FRAME + paranoidentry do_stack_segment */ errorentry do_stack_segment - CFI_ENDPROC +/* jmp paranoid_exit1 + CFI_ENDPROC */ END(stack_segment) KPROBE_ENTRY(general_protection) Index: head-2007-02-27/arch/x86_64/kernel/head-xen.S =================================================================== --- head-2007-02-27.orig/arch/x86_64/kernel/head-xen.S 2007-03-05 10:00:18.000000000 +0100 +++ head-2007-02-27/arch/x86_64/kernel/head-xen.S 2007-03-02 15:47:16.000000000 +0100 @@ -22,7 +22,7 @@ #include <asm/page.h> #include <asm/msr.h> #include <asm/cache.h> - +#include <asm/dwarf2.h> #include <xen/interface/elfnote.h> #define _PAGE_PRESENT 0x1 @@ -42,6 +42,7 @@ ENTRY(_start) /* rsi is pointer to startup info structure. pass it to C */ movq %rsi,%rdi + pushq $0 # fake return address jmp x86_64_start_kernel ENTRY(stext) @@ -82,7 +83,25 @@ NEXT_PAGE(level2_kernel_pgt) .fill 512,8,0 NEXT_PAGE(hypercall_page) - .fill 512,8,0 + CFI_STARTPROC + .rept 0x1000 / 0x20 + .skip 1 /* push %rcx */ + CFI_ADJUST_CFA_OFFSET 8 + CFI_REL_OFFSET rcx,0 + .skip 2 /* push %r11 */ + CFI_ADJUST_CFA_OFFSET 8 + CFI_REL_OFFSET rcx,0 + .skip 5 /* mov $#,%eax */ + .skip 2 /* syscall */ + .skip 2 /* pop %r11 */ + CFI_ADJUST_CFA_OFFSET -8 + CFI_RESTORE r11 + .skip 1 /* pop %rcx */ + CFI_ADJUST_CFA_OFFSET -8 + CFI_RESTORE rcx + .align 0x20,0 /* ret */ + .endr + CFI_ENDPROC #undef NEXT_PAGE _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |