[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 22/27] x86_64: assembly, change all ENTRY+END to SYM_CODE_*
Here, we change all code which is not marked as functions. In other words, this code has been using END, not ENDPROC. So switch all of this to appropriate new markings SYM_CODE_START and SYM_CODE_END. Signed-off-by: Jiri Slaby <jslaby@xxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: x86@xxxxxxxxxx Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx --- arch/x86/entry/entry_64.S | 48 ++++++++++++++++++++-------------------- arch/x86/entry/entry_64_compat.S | 8 +++---- arch/x86/kernel/ftrace_64.S | 16 +++++++------- arch/x86/xen/xen-asm_64.S | 4 ++-- arch/x86/xen/xen-head.S | 8 +++---- 5 files changed, 42 insertions(+), 42 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index ff4964dac2dc..02e15a18e132 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -43,11 +43,11 @@ .section .entry.text, "ax" #ifdef CONFIG_PARAVIRT -ENTRY(native_usergs_sysret64) +SYM_CODE_START(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq -END(native_usergs_sysret64) +SYM_CODE_END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_IRETQ @@ -135,7 +135,7 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ -ENTRY(entry_SYSCALL_64) +SYM_CODE_START(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. @@ -322,7 +322,7 @@ syscall_return_via_sysret: opportunistic_sysret_failed: SWAPGS jmp restore_c_regs_and_iret -END(entry_SYSCALL_64) +SYM_CODE_END(entry_SYSCALL_64) SYM_CODE_START_LOCAL(stub_ptregs_64) /* @@ -352,11 +352,11 @@ SYM_CODE_START_LOCAL(stub_ptregs_64) SYM_CODE_END(stub_ptregs_64) .macro ptregs_stub func -ENTRY(ptregs_\func) +SYM_CODE_START(ptregs_\func) UNWIND_HINT_FUNC leaq \func(%rip), %rax jmp stub_ptregs_64 -END(ptregs_\func) +SYM_CODE_END(ptregs_\func) .endm /* Instantiate ptregs_stub for each ptregs-using syscall */ @@ -369,7 +369,7 @@ END(ptregs_\func) * %rdi: prev task * %rsi: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers @@ -400,7 +400,7 @@ ENTRY(__switch_to_asm) popq %rbp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -409,7 +409,7 @@ END(__switch_to_asm) * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ @@ -436,14 +436,14 @@ ENTRY(ret_from_fork) */ movq $0, RAX(%rsp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS @@ -452,7 +452,7 @@ ENTRY(irq_entries_start) .align 8 vector=vector+1 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY @@ -737,14 +737,14 @@ SYM_CODE_END(common_interrupt) * APIC interrupts. */ .macro apicinterrupt3 num sym do_sym -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS ASM_CLAC pushq $~(\num) .Lcommon_\sym: interrupt \do_sym jmp ret_from_intr -END(\sym) +SYM_CODE_END(\sym) .endm /* Make sure APIC interrupt handlers end up in the irqentry section: */ @@ -806,7 +806,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS offset=8 /* Sanity check */ @@ -895,7 +895,7 @@ ENTRY(\sym) jmp error_exit /* %ebx: no swapgs flag */ .endif -END(\sym) +SYM_CODE_END(\sym) .endm idtentry divide_error do_divide_error has_error_code=0 @@ -1010,7 +1010,7 @@ SYM_CODE_END(xen_do_hypervisor_callback) * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ -ENTRY(xen_failsafe_callback) +SYM_CODE_START(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) @@ -1042,7 +1042,7 @@ ENTRY(xen_failsafe_callback) SAVE_EXTRA_REGS ENCODE_FRAME_POINTER jmp error_exit -END(xen_failsafe_callback) +SYM_CODE_END(xen_failsafe_callback) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall @@ -1225,7 +1225,7 @@ SYM_CODE_END(error_exit) /* Runs on exception stack */ /* XXX: broken on Xen PV */ -ENTRY(nmi) +SYM_CODE_START(nmi) UNWIND_HINT_IRET_REGS /* * We allow breakpoints in NMIs. If a breakpoint occurs, then @@ -1567,15 +1567,15 @@ nmi_restore: * mode, so this cannot result in a fault. */ INTERRUPT_RETURN -END(nmi) +SYM_CODE_END(nmi) -ENTRY(ignore_sysret) +SYM_CODE_START(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret -END(ignore_sysret) +SYM_CODE_END(ignore_sysret) -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1585,4 +1585,4 @@ ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index b0d0584f5a61..4815a8115c90 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -181,7 +181,7 @@ ENDPROC(entry_SYSENTER_compat) * esp user stack * 0(%esp) arg6 */ -ENTRY(entry_SYSCALL_compat) +SYM_CODE_START(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs @@ -261,7 +261,7 @@ sysret32_from_system_call: movq RSP-ORIG_RAX(%rsp), %rsp swapgs sysretl -END(entry_SYSCALL_compat) +SYM_CODE_END(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. @@ -289,7 +289,7 @@ END(entry_SYSCALL_compat) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_compat) +SYM_CODE_START(entry_INT80_compat) /* * Interrupts are off on entry. */ @@ -338,7 +338,7 @@ ENTRY(entry_INT80_compat) TRACE_IRQS_ON SWAPGS jmp restore_regs_and_iret -END(entry_INT80_compat) +SYM_CODE_END(entry_INT80_compat) ENTRY(stub32_clone) /* diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 1ce977394fbe..3e20eb31cca6 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -149,7 +149,7 @@ SYM_FUNC_START(function_hook) retq SYM_FUNC_END(function_hook) -ENTRY(ftrace_caller) +SYM_CODE_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs @@ -183,9 +183,9 @@ SYM_CODE_INNER_LABEL_NOALIGN(ftrace_graph_call, SYM_V_GLOBAL) /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) retq -END(ftrace_caller) +SYM_CODE_END(ftrace_caller) -ENTRY(ftrace_regs_caller) +SYM_CODE_START(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq @@ -254,12 +254,12 @@ SYM_CODE_INNER_LABEL_NOALIGN(ftrace_regs_caller_end, SYM_V_GLOBAL) jmp ftrace_epilogue -END(ftrace_regs_caller) +SYM_CODE_END(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ -ENTRY(function_hook) +SYM_CODE_START(function_hook) cmpq $ftrace_stub, ftrace_trace_function jnz trace @@ -290,11 +290,11 @@ trace: restore_mcount_regs jmp fgraph_trace -END(function_hook) +SYM_CODE_END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) +SYM_CODE_START(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs @@ -312,7 +312,7 @@ ENTRY(ftrace_graph_caller) restore_mcount_regs retq -END(ftrace_graph_caller) +SYM_CODE_END(ftrace_graph_caller) SYM_CODE_START_NOALIGN(return_to_handler) subq $24, %rsp diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 4afa50c40f12..d18d2118a80a 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -17,11 +17,11 @@ #include <linux/linkage.h> .macro xen_pv_trap name -ENTRY(xen_\name) +SYM_CODE_START(xen_\name) pop %rcx pop %r11 jmp \name -END(xen_\name) +SYM_CODE_END(xen_\name) .endm xen_pv_trap divide_error diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 124941d09b2b..83c64ece680d 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -19,7 +19,7 @@ #ifdef CONFIG_XEN_PV __INIT -ENTRY(startup_xen) +SYM_CODE_START(startup_xen) UNWIND_HINT_EMPTY cld @@ -35,13 +35,13 @@ ENTRY(startup_xen) mov $init_thread_union+THREAD_SIZE, %_ASM_SP jmp xen_start_kernel -END(startup_xen) +SYM_CODE_END(startup_xen) __FINIT #endif .pushsection .text .balign PAGE_SIZE -ENTRY(hypercall_page) +SYM_CODE_START(hypercall_page) .rept (PAGE_SIZE / 32) UNWIND_HINT_EMPTY .skip 32 @@ -52,7 +52,7 @@ ENTRY(hypercall_page) .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include <asm/xen-hypercalls.h> #undef HYPERCALL -END(hypercall_page) +SYM_CODE_END(hypercall_page) .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") -- 2.14.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |