[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 5/5] x86: don't build unused entry code when !PV32


  • To: Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • From: Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Date: Mon, 28 Dec 2020 16:30:04 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=R1afBVWYWt1z+7ic+OajD7y46HOXxZMR6kiY1OGEmyw=; b=nwzF0Coso9mMklWs3HgtK2hYbPyZEmIq2lxmsto9p6wU9VjeWUjiLiu/Oi3EKMMZwXIcauFJL0AvJ0QKaWqjzHB2B0MQYBYOR9WlbU4HqXG8r9HTgIGWCKKMArRCP6fwL7cEjM3sXd/bRy3xtZeQMvVxg64zjwNuKx2QyERu5H37PtD4aT5mAPtApJpK5fpnirV6HZ8gIcNfmqdj1TGxnXZpP3DbX7ZwVAF5ofWi/JSo8/qbmv62Yb5GmDEmHRJg2nVrRHwNKPms+TJcPq6Rk1VAVOIg/a3aHn5eyiYAPL5r+nEHdSiC4y9QYCqw4J5uOqkvb/kPlf4RZTB8N8hGuw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=DlnbRgnC4IOXGSsc+H4f+bZ+Oj9UQR46ZqjTAaee7siuopugif+GTyG8iCTjw8NLXnpRuLPz9lmn81NSr/b1AgFnQx04SVRHHCI9YTxFWc/zegwm2fWH4GmJwn1CHj+PN8F2TC8jXupLsMLkcsWA0YgLeUvKwYsi9buQ/Rb9n2Gk9U9F6iX5rebWryQRFvG1uyRd2nnIQYJ0HEbSzMsybCmswOetwnKWZXtv3ZfRE+/mHOUSYnSMN85vRF4tgzBGyHoVF8uKp31cFj9CK0DBpnaYL9fHpLg77gIc64Dlim5AoeyIJVx8oH619bp/i4J40lrbfwildE3lJFGggY/yzw==
  • Authentication-results: esa3.hc3370-68.iphmx.com; dkim=pass (signature verified) header.i=@citrix.onmicrosoft.com
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Wei Liu <wl@xxxxxxx>
  • Delivery-date: Mon, 28 Dec 2020 15:30:33 +0000
  • Ironport-sdr: M5mIx2FO/v6I+rF/uDWZa4cu5D/JD4w/GOBMrLWfRmvTlEO86NttnTppZjbVDe4XtHKPwXudji asZLFWD+nWwPuL9lbBW9zKSd39qNo0Kk8f49Avs3ahODDovJmqtVJcGZs05E9xOwTtF6LA0Ata kFZvnmnVggilPGcuqqkAOgf+VTiS4DFbSqsc7pLrjI1EUDJoC1YsUak6eQFoYmXK5azko4FKZ+ 8KI9FT9OyNjOLbQeSxW0ynMwHsJsjtTpOVGHvlqYBO/B6FASq3fggQfIROpoM4futj2RjPwAoi L2A=
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On Wed, Nov 25, 2020 at 09:51:33AM +0100, Jan Beulich wrote:
> Except for the initial part of cstar_enter compat/entry.S is all dead
> code in this case. Further, along the lines of the PV conditionals we
> already have in entry.S, make code PV32-conditional there too (to a
> fair part because this code actually references compat/entry.S).
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> TBD: I'm on the fence of whether (in a separate patch) to also make
>      conditional struct pv_domain's is_32bit field.
> 
> --- a/xen/arch/x86/x86_64/asm-offsets.c
> +++ b/xen/arch/x86/x86_64/asm-offsets.c
> @@ -9,7 +9,7 @@
>  #include <xen/perfc.h>
>  #endif
>  #include <xen/sched.h>
> -#ifdef CONFIG_PV
> +#ifdef CONFIG_PV32
>  #include <compat/xen.h>
>  #endif
>  #include <asm/hardirq.h>
> @@ -102,19 +102,21 @@ void __dummy__(void)
>      BLANK();
>  #endif
>  
> -#ifdef CONFIG_PV
> +#ifdef CONFIG_PV32
>      OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.pv.is_32bit);
>      BLANK();
>  
> -    OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
> -    OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
> -    BLANK();
> -
>      OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, 
> evtchn_upcall_pending);
>      OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, 
> evtchn_upcall_mask);
>      BLANK();
>  #endif
>  
> +#ifdef CONFIG_PV
> +    OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
> +    OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
> +    BLANK();
> +#endif
> +
>      OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, 
> guest_cpu_user_regs);
>      OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
>      OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
> --- a/xen/arch/x86/x86_64/compat/entry.S
> +++ b/xen/arch/x86/x86_64/compat/entry.S
> @@ -29,8 +29,6 @@ ENTRY(entry_int82)
>          mov   %rsp, %rdi
>          call  do_entry_int82
>  
> -#endif /* CONFIG_PV32 */
> -
>  /* %rbx: struct vcpu */
>  ENTRY(compat_test_all_events)
>          ASSERT_NOT_IN_ATOMIC
> @@ -197,6 +195,8 @@ ENTRY(cr4_pv32_restore)
>          xor   %eax, %eax
>          ret
>  
> +#endif /* CONFIG_PV32 */

I've also wondered, it feels weird to add CONFIG_PV32 gates to the
compat entry.S, since that's supposed to be only used when there's
support for 32bit PV guests?

Wouldn't this file only get built when such support is enabled?

> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -328,8 +328,10 @@ UNLIKELY_END(sysenter_gpf)
>          movq  VCPU_domain(%rbx),%rdi
>          movq  %rax,TRAPBOUNCE_eip(%rdx)
>          movb  %cl,TRAPBOUNCE_flags(%rdx)
> +#ifdef CONFIG_PV32
>          cmpb  $0, DOMAIN_is_32bit_pv(%rdi)
>          jne   compat_sysenter
> +#endif
>          jmp   .Lbounce_exception
>  
>  ENTRY(int80_direct_trap)
> @@ -370,6 +372,7 @@ UNLIKELY_END(msi_check)
>          mov    0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
>          movzwl 0x80 * TRAPINFO_sizeof + TRAPINFO_cs (%rsi), %ecx
>  
> +#ifdef CONFIG_PV32
>          mov   %ecx, %edx
>          and   $~3, %edx
>  
> @@ -378,6 +381,10 @@ UNLIKELY_END(msi_check)
>  
>          test  %rdx, %rdx
>          jz    int80_slow_path
> +#else
> +        test  %rdi, %rdi
> +        jz    int80_slow_path
> +#endif
>  
>          /* Construct trap_bounce from trap_ctxt[0x80]. */
>          lea   VCPU_trap_bounce(%rbx), %rdx
> @@ -390,8 +397,10 @@ UNLIKELY_END(msi_check)
>          lea   (, %rcx, TBF_INTERRUPT), %ecx
>          mov   %cl, TRAPBOUNCE_flags(%rdx)
>  
> +#ifdef CONFIG_PV32
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          jne   compat_int80_direct_trap
> +#endif
>  
>          call  create_bounce_frame
>          jmp   test_all_events
> @@ -541,12 +550,16 @@ ENTRY(dom_crash_sync_extable)
>          GET_STACK_END(ax)
>          leaq  STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
>          # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
> +#ifdef CONFIG_PV32
>          movq  STACK_CPUINFO_FIELD(current_vcpu)(%rax), %rax
>          movq  VCPU_domain(%rax),%rax
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          sete  %al
>          leal  (%rax,%rax,2),%eax
>          orb   %al,UREGS_cs(%rsp)
> +#else
> +        orb   $3, UREGS_cs(%rsp)
> +#endif
>          xorl  %edi,%edi
>          jmp   asm_domain_crash_synchronous /* Does not return */
>          .popsection
> @@ -562,11 +575,15 @@ ENTRY(ret_from_intr)
>          GET_CURRENT(bx)
>          testb $3, UREGS_cs(%rsp)
>          jz    restore_all_xen
> +#ifdef CONFIG_PV32
>          movq  VCPU_domain(%rbx), %rax
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          je    test_all_events
>          jmp   compat_test_all_events
>  #else
> +        jmp   test_all_events
> +#endif
> +#else
>          ASSERT_CONTEXT_IS_XEN
>          jmp   restore_all_xen
>  #endif
> @@ -652,7 +669,7 @@ handle_exception_saved:
>          testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
>          jz    exception_with_ints_disabled
>  
> -#ifdef CONFIG_PV
> +#if defined(CONFIG_PV32)
>          ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
>              __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMEP, \
>              __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMAP
> @@ -692,7 +709,7 @@ handle_exception_saved:
>          test  $~(PFEC_write_access|PFEC_insn_fetch),%eax
>          jz    compat_test_all_events
>  .Lcr4_pv32_done:
> -#else
> +#elif !defined(CONFIG_PV)
>          ASSERT_CONTEXT_IS_XEN
>  #endif /* CONFIG_PV */
>          sti
> @@ -711,9 +728,11 @@ handle_exception_saved:
>  #ifdef CONFIG_PV
>          testb $3,UREGS_cs(%rsp)
>          jz    restore_all_xen
> +#ifdef CONFIG_PV32
>          movq  VCPU_domain(%rbx),%rax
>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>          jne   compat_test_all_events
> +#endif
>          jmp   test_all_events
>  #else
>          ASSERT_CONTEXT_IS_XEN
> @@ -947,11 +966,16 @@ handle_ist_exception:
>          je    1f
>          movl  $EVENT_CHECK_VECTOR,%edi
>          call  send_IPI_self
> -1:      movq  VCPU_domain(%rbx),%rax
> +1:
> +#ifdef CONFIG_PV32
> +        movq  VCPU_domain(%rbx),%rax
>          cmpb  $0,DOMAIN_is_32bit_pv(%rax)
>          je    restore_all_guest
>          jmp   compat_restore_all_guest
>  #else
> +        jmp   restore_all_guest
> +#endif
> +#else
>          ASSERT_CONTEXT_IS_XEN
>          jmp   restore_all_xen
>  #endif

I would like to have Andrew's opinion on this one (as you and him tend
to modify more asm code than myself). There are quite a lot of
addition to the assembly code, and IMO it makes the code more complex
which I think we should try to avoid, as assembly is already hard
enough.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.