[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 3/3] x86/entry: Make intra-funciton symbols properly local



On Mon, Jan 22, 2024 at 06:17:14PM +0000, Andrew Cooper wrote:
> Each of these symbols are local to their main function.  By not having them
> globally visible, livepatch's binary diffing logic can reason about the
> functions properly.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> CC: Wei Liu <wl@xxxxxxx>
> CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> CC: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
> 
> sysenter_eflags_saved() is an open question.  This does need to be globally
> visible, and I think any livepatch modifying sysenter_entry() will need a
> manual adjustment to do_debug() to update the reference there.

Hm, yes, this stuff is indeed problematic.  There's also the usage of
sysenter_entry in do_debug(), which will also need adjusting to
account for the position of the payload text replacement, as there's
no explicitly guarantee the payload will be loaded at an address
greater than the current sysenter_entry position.

> ---
>  xen/arch/x86/x86_64/compat/entry.S | 20 ++++++++++----------
>  xen/arch/x86/x86_64/entry.S        | 22 +++++++++++-----------
>  2 files changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/xen/arch/x86/x86_64/compat/entry.S 
> b/xen/arch/x86/x86_64/compat/entry.S
> index 4fbd89cea1a9..1e9e0455eaf3 100644
> --- a/xen/arch/x86/x86_64/compat/entry.S
> +++ b/xen/arch/x86/x86_64/compat/entry.S
> @@ -41,7 +41,7 @@ FUNC(compat_test_all_events)
>          shll  $IRQSTAT_shift,%eax
>          leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
>          cmpl  $0,(%rcx,%rax,1)
> -        jne   compat_process_softirqs
> +        jne   .L_compat_process_softirqs
>  
>          /* Inject exception if pending. */
>          lea   VCPU_trap_bounce(%rbx), %rdx
> @@ -49,11 +49,11 @@ FUNC(compat_test_all_events)
>          jnz   .Lcompat_process_trapbounce
>  
>          cmpb  $0, VCPU_mce_pending(%rbx)
> -        jne   compat_process_mce
> +        jne   .L_compat_process_mce
>  .Lcompat_test_guest_nmi:
>          cmpb  $0, VCPU_nmi_pending(%rbx)
> -        jne   compat_process_nmi
> -compat_test_guest_events:
> +        jne   .L_compat_process_nmi
> +.L_compat_test_guest_events:
>          movq  VCPU_vcpu_info(%rbx),%rax
>          movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax
>          decl  %eax
> @@ -71,7 +71,7 @@ compat_test_guest_events:
>          jmp   compat_test_all_events
>  
>  /* %rbx: struct vcpu */
> -LABEL_LOCAL(compat_process_softirqs)
> +LABEL_LOCAL(.L_compat_process_softirqs)
>          sti
>          call  do_softirq
>          jmp   compat_test_all_events
> @@ -84,7 +84,7 @@ LABEL_LOCAL(.Lcompat_process_trapbounce)
>          jmp   compat_test_all_events
>  
>  /* %rbx: struct vcpu */
> -LABEL_LOCAL(compat_process_mce)
> +LABEL_LOCAL(.L_compat_process_mce)
>          testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
>          jnz   .Lcompat_test_guest_nmi
>          sti
> @@ -96,12 +96,12 @@ LABEL_LOCAL(compat_process_mce)
>          movb %dl,VCPU_mce_old_mask(%rbx)            # iret hypercall
>          orl  $1 << VCPU_TRAP_MCE,%edx
>          movb %dl,VCPU_async_exception_mask(%rbx)
> -        jmp   compat_process_trap
> +        jmp   .L_compat_process_trap
>  
>  /* %rbx: struct vcpu */
> -LABEL_LOCAL(compat_process_nmi)
> +LABEL_LOCAL(.L_compat_process_nmi)
>          testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
> -        jnz   compat_test_guest_events
> +        jnz   .L_compat_test_guest_events
>          sti
>          movb  $0, VCPU_nmi_pending(%rbx)
>          call  set_guest_nmi_trapbounce
> @@ -112,7 +112,7 @@ LABEL_LOCAL(compat_process_nmi)
>          orl  $1 << VCPU_TRAP_NMI,%edx
>          movb %dl,VCPU_async_exception_mask(%rbx)
>          /* FALLTHROUGH */
> -compat_process_trap:
> +.L_compat_process_trap:
>          leaq  VCPU_trap_bounce(%rbx),%rdx
>          call  compat_create_bounce_frame
>          jmp   compat_test_all_events
> diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
> index fc64ef1fd460..130462ba0e1a 100644
> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -188,7 +188,7 @@ FUNC_LOCAL(restore_all_guest)
>  
>          RESTORE_ALL
>          testw $TRAP_syscall,4(%rsp)
> -        jz    iret_exit_to_guest
> +        jz    .L_iret_exit_to_guest
>  
>          movq  24(%rsp),%r11           # RFLAGS
>          andq  $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11
> @@ -220,7 +220,7 @@ FUNC_LOCAL(restore_all_guest)
>  LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest)
>          movq  8(%rsp), %rcx           # RIP
>  /* No special register assumptions. */
> -iret_exit_to_guest:
> +.L_iret_exit_to_guest:
>          andl  $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), 24(%rsp)
>          orl   $X86_EFLAGS_IF,24(%rsp)
>          addq  $8,%rsp
> @@ -432,10 +432,10 @@ UNLIKELY_END(msi_check)
>          cmove %rdi, %rdx
>  
>          test  %rdx, %rdx
> -        jz    int80_slow_path
> +        jz    .L_int80_slow_path
>  #else
>          test  %rdi, %rdi
> -        jz    int80_slow_path
> +        jz    .L_int80_slow_path
>  #endif
>  
>          /* Construct trap_bounce from trap_ctxt[0x80]. */
> @@ -457,7 +457,7 @@ UNLIKELY_END(msi_check)
>          call  create_bounce_frame
>          jmp   test_all_events
>  
> -int80_slow_path:
> +.L_int80_slow_path:
>          /* 
>           * Setup entry vector and error code as if this was a GPF caused by 
> an
>           * IDT entry with DPL==0.
> @@ -472,7 +472,7 @@ int80_slow_path:
>           * need to set up %r14 here, while %r15 is required to still be zero.
>           */
>          GET_STACK_END(14)
> -        jmp   handle_exception_saved
> +        jmp   .L_handle_exception_saved

This one is IMO problematic, as you are jumping from function symbol
entry_int80 into handle_exception.  Any livepatch that modifies
handle_exception will quite likely break that jump, as the jump won't
be relocated to point at the new position of the local label inside of
the new handle_exception text.

handle_exception needs further work in order to be livepatched, and
making handle_exception_saved local makes it looks like it's safe to
patch, while it's not.

We need to split handle_exception_saved into a separate function, so
it has it's own section, as long as it's referenced from other
functions.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.