[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 1/2] x86: annotate entry points with type and size
Recent gas versions generate minimalistic Dwarf debug info for items annotated as functions and having their sizes specified [1]. "Borrow" Arm's END() and (remotely) derive other annotation infrastructure from Linux'es. For switch_to_kernel() and restore_all_guest() so far implicit alignment (from being first in their respective sections) is being made explicit (as in: using FUNC() without 2nd argument). Whereas for {,compat}create_bounce_frame() and autogen_entrypoints[] alignment is newly arranged for. Except for the added alignment padding (including their knock-on effects) no change in generated code/data. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> [1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28 --- v2: Full rework. --- Only two of the assembly files are being converted for now. More could be done right here or as follow-on in separate patches. In principle the framework should be possible to use by other architectures as well. If we want this, the main questions are going to be: - What header file name? (I don't really like Linux'es linkage.h, so I'd prefer e.g. asm-defns.h or asm_defns.h as we already have in x86.) - How much per-arch customization do we want to permit up front (i.e. without knowing how much of it is going to be needed)? Initially I'd expect only the default function alignment (and padding) to require per-arch definitions. Note that the FB-label in autogen_stubs() cannot be converted just yet: Such labels cannot be used with .type. We could further diverge from Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type labels get by default anyway). Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we still have ALIGN. --- a/xen/arch/x86/include/asm/asm_defns.h +++ b/xen/arch/x86/include/asm/asm_defns.h @@ -81,6 +81,45 @@ register unsigned long current_stack_poi #ifdef __ASSEMBLY__ +#define SYM_ALIGN(algn...) .balign algn + +#define SYM_L_GLOBAL(name) .globl name +#define SYM_L_WEAK(name) .weak name +#define SYM_L_LOCAL(name) /* nothing */ + +#define SYM_T_FUNC STT_FUNC +#define SYM_T_DATA STT_OBJECT +#define SYM_T_NONE STT_NOTYPE + +#define SYM(name, typ, linkage, algn...) \ + .type name, SYM_T_ ## typ; \ + SYM_L_ ## linkage(name); \ + SYM_ALIGN(algn); \ + name: + +#define END(name) .size name, . - name + +#define ARG1_(x, y...) (x) +#define ARG2_(x, y...) ARG1_(y) + +#define LAST__(nr) ARG ## nr ## _ +#define LAST_(nr) LAST__(nr) +#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y) + +#define FUNC(name, algn...) \ + SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90) +#define LABEL(name, algn...) \ + SYM(name, NONE, GLOBAL, LAST(16, ## algn), 0x90) +#define DATA(name, algn...) \ + SYM(name, DATA, GLOBAL, LAST(0, ## algn), 0xff) + +#define FUNC_LOCAL(name, algn...) \ + SYM(name, FUNC, LOCAL, LAST(16, ## algn), 0x90) +#define LABEL_LOCAL(name, algn...) \ + SYM(name, NONE, LOCAL, LAST(16, ## algn), 0x90) +#define DATA_LOCAL(name, algn...) \ + SYM(name, DATA, LOCAL, LAST(0, ## algn), 0xff) + #ifdef HAVE_AS_QUOTED_SYM #define SUBSECTION_LBL(tag) \ .ifndef .L.tag; \ --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -8,10 +8,11 @@ #include <asm/page.h> #include <asm/processor.h> #include <asm/desc.h> +#include <xen/lib.h> #include <public/xen.h> #include <irq_vectors.h> -ENTRY(entry_int82) +FUNC(entry_int82) ENDBR64 ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP pushq $0 @@ -27,9 +28,10 @@ ENTRY(entry_int82) mov %rsp, %rdi call do_entry_int82 +END(entry_int82) /* %rbx: struct vcpu */ -ENTRY(compat_test_all_events) +FUNC(compat_test_all_events) ASSERT_NOT_IN_ATOMIC cli # tests must not race interrupts /*compat_test_softirqs:*/ @@ -66,24 +68,21 @@ compat_test_guest_events: call compat_create_bounce_frame jmp compat_test_all_events - ALIGN /* %rbx: struct vcpu */ -compat_process_softirqs: +LABEL_LOCAL(compat_process_softirqs) sti call do_softirq jmp compat_test_all_events - ALIGN /* %rbx: struct vcpu, %rdx: struct trap_bounce */ -.Lcompat_process_trapbounce: +LABEL_LOCAL(.Lcompat_process_trapbounce) sti .Lcompat_bounce_exception: call compat_create_bounce_frame jmp compat_test_all_events - ALIGN /* %rbx: struct vcpu */ -compat_process_mce: +LABEL_LOCAL(compat_process_mce) testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) jnz .Lcompat_test_guest_nmi sti @@ -97,9 +96,8 @@ compat_process_mce: movb %dl,VCPU_async_exception_mask(%rbx) jmp compat_process_trap - ALIGN /* %rbx: struct vcpu */ -compat_process_nmi: +LABEL_LOCAL(compat_process_nmi) testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx) jnz compat_test_guest_events sti @@ -116,9 +114,10 @@ compat_process_trap: leaq VCPU_trap_bounce(%rbx),%rdx call compat_create_bounce_frame jmp compat_test_all_events +END(compat_test_all_events) /* %rbx: struct vcpu, interrupts disabled */ -ENTRY(compat_restore_all_guest) +FUNC(compat_restore_all_guest) ASSERT_INTERRUPTS_DISABLED mov $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d and UREGS_eflags(%rsp),%r11d @@ -161,9 +160,10 @@ ENTRY(compat_restore_all_guest) RESTORE_ALL adj=8 compat=1 .Lft0: iretq _ASM_PRE_EXTABLE(.Lft0, handle_exception) +END(compat_restore_all_guest) /* This mustn't modify registers other than %rax. */ -ENTRY(cr4_pv32_restore) +FUNC(cr4_pv32_restore) push %rdx GET_CPUINFO_FIELD(cr4, dx) mov (%rdx), %rax @@ -193,8 +193,9 @@ ENTRY(cr4_pv32_restore) pop %rdx xor %eax, %eax ret +END(cr4_pv32_restore) -ENTRY(compat_syscall) +FUNC(compat_syscall) /* Fix up reported %cs/%ss for compat domains. */ movl $FLAT_COMPAT_USER_SS, UREGS_ss(%rsp) movl $FLAT_COMPAT_USER_CS, UREGS_cs(%rsp) @@ -222,8 +223,9 @@ UNLIKELY_END(compat_syscall_gpf) movw %si,TRAPBOUNCE_cs(%rdx) movb %cl,TRAPBOUNCE_flags(%rdx) jmp .Lcompat_bounce_exception +END(compat_syscall) -ENTRY(compat_sysenter) +FUNC(compat_sysenter) CR4_PV32_RESTORE movq VCPU_trap_ctxt(%rbx),%rcx cmpb $X86_EXC_GP, UREGS_entry_vector(%rsp) @@ -236,17 +238,19 @@ ENTRY(compat_sysenter) movw %ax,TRAPBOUNCE_cs(%rdx) call compat_create_bounce_frame jmp compat_test_all_events +END(compat_sysenter) -ENTRY(compat_int80_direct_trap) +FUNC(compat_int80_direct_trap) CR4_PV32_RESTORE call compat_create_bounce_frame jmp compat_test_all_events +END(compat_int80_direct_trap) /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */ /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */ /* %rdx: trap_bounce, %rbx: struct vcpu */ /* On return only %rbx and %rdx are guaranteed non-clobbered. */ -compat_create_bounce_frame: +FUNC_LOCAL(compat_create_bounce_frame) ASSERT_INTERRUPTS_ENABLED mov %fs,%edi ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP @@ -352,3 +356,4 @@ compat_crash_page_fault: jmp .Lft14 .previous _ASM_EXTABLE(.Lft14, .Lfx14) +END(compat_create_bounce_frame) --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -9,6 +9,7 @@ #include <asm/asm_defns.h> #include <asm/page.h> #include <asm/processor.h> +#include <xen/lib.h> #include <public/xen.h> #include <irq_vectors.h> @@ -24,7 +25,7 @@ #ifdef CONFIG_PV /* %rbx: struct vcpu */ -switch_to_kernel: +FUNC_LOCAL(switch_to_kernel) leaq VCPU_trap_bounce(%rbx),%rdx /* TB_eip = 32-bit syscall ? syscall32_addr : syscall_addr */ @@ -89,24 +90,21 @@ test_guest_events: call create_bounce_frame jmp test_all_events - ALIGN /* %rbx: struct vcpu */ -process_softirqs: +LABEL_LOCAL(process_softirqs) sti call do_softirq jmp test_all_events - ALIGN /* %rbx: struct vcpu, %rdx struct trap_bounce */ -.Lprocess_trapbounce: +LABEL_LOCAL(.Lprocess_trapbounce) sti .Lbounce_exception: call create_bounce_frame jmp test_all_events - ALIGN /* %rbx: struct vcpu */ -process_mce: +LABEL_LOCAL(process_mce) testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx) jnz .Ltest_guest_nmi sti @@ -120,9 +118,8 @@ process_mce: movb %dl, VCPU_async_exception_mask(%rbx) jmp process_trap - ALIGN /* %rbx: struct vcpu */ -process_nmi: +LABEL_LOCAL(process_nmi) testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx) jnz test_guest_events sti @@ -139,11 +136,12 @@ process_trap: leaq VCPU_trap_bounce(%rbx), %rdx call create_bounce_frame jmp test_all_events +END(switch_to_kernel) .section .text.entry, "ax", @progbits /* %rbx: struct vcpu, interrupts disabled */ -restore_all_guest: +FUNC_LOCAL(restore_all_guest) ASSERT_INTERRUPTS_DISABLED /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ @@ -220,8 +218,7 @@ restore_all_guest: sysretq 1: sysretl - ALIGN -.Lrestore_rcx_iret_exit_to_guest: +LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest) movq 8(%rsp), %rcx # RIP /* No special register assumptions. */ iret_exit_to_guest: @@ -230,6 +227,7 @@ iret_exit_to_guest: addq $8,%rsp .Lft0: iretq _ASM_PRE_EXTABLE(.Lft0, handle_exception) +END(restore_all_guest) /* * When entering SYSCALL from kernel mode: @@ -246,7 +244,7 @@ iret_exit_to_guest: * - Guest %rsp stored in %rax * - Xen stack loaded, pointing at the %ss slot */ -ENTRY(lstar_enter) +FUNC(lstar_enter) #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif @@ -281,9 +279,10 @@ ENTRY(lstar_enter) mov %rsp, %rdi call pv_hypercall jmp test_all_events +END(lstar_enter) /* See lstar_enter for entry register state. */ -ENTRY(cstar_enter) +FUNC(cstar_enter) #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif @@ -321,8 +320,9 @@ ENTRY(cstar_enter) jne compat_syscall #endif jmp switch_to_kernel +END(cstar_enter) -ENTRY(sysenter_entry) +FUNC(sysenter_entry) ENDBR64 #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK @@ -331,7 +331,7 @@ ENTRY(sysenter_entry) pushq $FLAT_USER_SS pushq $0 pushfq -GLOBAL(sysenter_eflags_saved) +LABEL(sysenter_eflags_saved, 0) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP pushq $3 /* ring 3 null cs */ pushq $0 /* null rip */ @@ -385,8 +385,9 @@ UNLIKELY_END(sysenter_gpf) jne compat_sysenter #endif jmp .Lbounce_exception +END(sysenter_entry) -ENTRY(int80_direct_trap) +FUNC(int80_direct_trap) ENDBR64 ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP pushq $0 @@ -474,6 +475,7 @@ int80_slow_path: */ GET_STACK_END(14) jmp handle_exception_saved +END(int80_direct_trap) /* create_bounce_frame & helpers don't need to be in .text.entry */ .text @@ -482,7 +484,7 @@ int80_slow_path: /* { RCX, R11, [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */ /* %rdx: trap_bounce, %rbx: struct vcpu */ /* On return only %rbx and %rdx are guaranteed non-clobbered. */ -create_bounce_frame: +FUNC_LOCAL(create_bounce_frame) ASSERT_INTERRUPTS_ENABLED testb $TF_kernel_mode,VCPU_thread_flags(%rbx) jnz 1f @@ -618,6 +620,7 @@ ENTRY(dom_crash_sync_extable) xorl %edi,%edi jmp asm_domain_crash_synchronous /* Does not return */ .popsection +END(create_bounce_frame) #endif /* CONFIG_PV */ /* --- CODE BELOW THIS LINE (MOSTLY) NOT GUEST RELATED --- */ @@ -626,7 +629,7 @@ ENTRY(dom_crash_sync_extable) /* No special register assumptions. */ #ifdef CONFIG_PV -ENTRY(continue_pv_domain) +FUNC(continue_pv_domain) ENDBR64 call check_wakeup_from_wait ret_from_intr: @@ -641,26 +644,28 @@ ret_from_intr: #else jmp test_all_events #endif +END(continue_pv_domain) #else -ret_from_intr: +FUNC(ret_from_intr, 0) ASSERT_CONTEXT_IS_XEN jmp restore_all_xen +END(ret_from_intr) #endif .section .init.text, "ax", @progbits -ENTRY(early_page_fault) +FUNC(early_page_fault) ENDBR64 movl $X86_EXC_PF, 4(%rsp) SAVE_ALL movq %rsp, %rdi call do_early_page_fault jmp restore_all_xen +END(early_page_fault) .section .text.entry, "ax", @progbits - ALIGN /* No special register assumptions. */ -restore_all_xen: +FUNC_LOCAL(restore_all_xen) /* * Check whether we need to switch to the per-CPU page tables, in * case we return to late PV exit code (from an NMI or #MC). @@ -677,8 +682,9 @@ UNLIKELY_END(exit_cr3) RESTORE_ALL adj=8 iretq +END(restore_all_xen) -ENTRY(common_interrupt) +FUNC(common_interrupt) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP SAVE_ALL @@ -707,12 +713,14 @@ ENTRY(common_interrupt) mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) jmp ret_from_intr +END(common_interrupt) -ENTRY(page_fault) +FUNC(page_fault) ENDBR64 movl $X86_EXC_PF, 4(%rsp) +END(page_fault) /* No special register assumptions. */ -GLOBAL(handle_exception) +FUNC(handle_exception, 0) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP SAVE_ALL @@ -882,92 +890,108 @@ FATAL_exception_with_ints_disabled: movq %rsp,%rdi call fatal_trap BUG /* fatal_trap() shouldn't return. */ +END(handle_exception) -ENTRY(divide_error) +FUNC(divide_error) ENDBR64 pushq $0 movl $X86_EXC_DE, 4(%rsp) jmp handle_exception +END(divide_error) -ENTRY(coprocessor_error) +FUNC(coprocessor_error) ENDBR64 pushq $0 movl $X86_EXC_MF, 4(%rsp) jmp handle_exception +END(coprocessor_error) -ENTRY(simd_coprocessor_error) +FUNC(simd_coprocessor_error) ENDBR64 pushq $0 movl $X86_EXC_XM, 4(%rsp) jmp handle_exception +END(coprocessor_error) -ENTRY(device_not_available) +FUNC(device_not_available) ENDBR64 pushq $0 movl $X86_EXC_NM, 4(%rsp) jmp handle_exception +END(device_not_available) -ENTRY(debug) +FUNC(debug) ENDBR64 pushq $0 movl $X86_EXC_DB, 4(%rsp) jmp handle_ist_exception +END(debug) -ENTRY(int3) +FUNC(int3) ENDBR64 pushq $0 movl $X86_EXC_BP, 4(%rsp) jmp handle_exception +END(int3) -ENTRY(overflow) +FUNC(overflow) ENDBR64 pushq $0 movl $X86_EXC_OF, 4(%rsp) jmp handle_exception +END(overflow) -ENTRY(bounds) +FUNC(bounds) ENDBR64 pushq $0 movl $X86_EXC_BR, 4(%rsp) jmp handle_exception +END(bounds) -ENTRY(invalid_op) +FUNC(invalid_op) ENDBR64 pushq $0 movl $X86_EXC_UD, 4(%rsp) jmp handle_exception +END(invalid_op) -ENTRY(invalid_TSS) +FUNC(invalid_TSS) ENDBR64 movl $X86_EXC_TS, 4(%rsp) jmp handle_exception +END(invalid_TSS) -ENTRY(segment_not_present) +FUNC(segment_not_present) ENDBR64 movl $X86_EXC_NP, 4(%rsp) jmp handle_exception +END(segment_not_present) -ENTRY(stack_segment) +FUNC(stack_segment) ENDBR64 movl $X86_EXC_SS, 4(%rsp) jmp handle_exception +END(stack_segment) -ENTRY(general_protection) +FUNC(general_protection) ENDBR64 movl $X86_EXC_GP, 4(%rsp) jmp handle_exception +END(general_protection) -ENTRY(alignment_check) +FUNC(alignment_check) ENDBR64 movl $X86_EXC_AC, 4(%rsp) jmp handle_exception +END(alignment_check) -ENTRY(entry_CP) +FUNC(entry_CP) ENDBR64 movl $X86_EXC_CP, 4(%rsp) jmp handle_exception +END(entry_CP) -ENTRY(double_fault) +FUNC(double_fault) ENDBR64 movl $X86_EXC_DF, 4(%rsp) /* Set AC to reduce chance of further SMAP faults */ @@ -991,8 +1015,9 @@ ENTRY(double_fault) movq %rsp,%rdi call do_double_fault BUG /* do_double_fault() shouldn't return. */ +END(double_fault) -ENTRY(nmi) +FUNC(nmi) ENDBR64 pushq $0 movl $X86_EXC_NMI, 4(%rsp) @@ -1120,21 +1145,24 @@ handle_ist_exception: ASSERT_CONTEXT_IS_XEN jmp restore_all_xen #endif +END(nmi) -ENTRY(machine_check) +FUNC(machine_check) ENDBR64 pushq $0 movl $X86_EXC_MC, 4(%rsp) jmp handle_ist_exception +END(machine_check) /* No op trap handler. Required for kexec crash path. */ -GLOBAL(trap_nop) +FUNC(trap_nop, 0) ENDBR64 iretq +END(trap_nop) /* Table of automatically generated entry points. One per vector. */ .pushsection .init.rodata, "a", @progbits -GLOBAL(autogen_entrypoints) +DATA(autogen_entrypoints, 8) /* pop into the .init.rodata section and record an entry point. */ .macro entrypoint ent .pushsection .init.rodata, "a", @progbits @@ -1143,7 +1171,7 @@ GLOBAL(autogen_entrypoints) .endm .popsection -autogen_stubs: /* Automatically generated stubs. */ +FUNC_LOCAL(autogen_stubs, 0) /* Automatically generated stubs. */ vec = 0 .rept X86_NR_VECTORS @@ -1187,6 +1215,7 @@ autogen_stubs: /* Automatically generate vec = vec + 1 .endr +END(autogen_stubs) .section .init.rodata, "a", @progbits - .size autogen_entrypoints, . - autogen_entrypoints +END(autogen_entrypoints)
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |