[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.8] x86/XPTI: reduce .text.entry



commit 372583c2dd4363446cb5788a865138b9aca54767
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Apr 18 16:49:21 2018 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Apr 18 16:49:21 2018 +0200

    x86/XPTI: reduce .text.entry
    
    This exposes less code pieces and at the same time reduces the range
    covered from slightly above 3 pages to a little below 2 of them.
    
    The code being moved is unchanged, except for the removal of trailing
    blanks, insertion of blanks between operands, and a pointless q suffix
    from "retq".
    
    A few more small pieces could be moved, but it seems better to me to
    leave them where they are to not make it overly hard to follow code
    paths.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: 454efb2a31b64b98e3dd55c083ce41b87375faa6
    master date: 2018-04-05 15:48:23 +0100
---
 xen/arch/x86/x86_64/compat/entry.S |   9 +-
 xen/arch/x86/x86_64/entry.S        | 209 ++++++++++++++++++-------------------
 2 files changed, 107 insertions(+), 111 deletions(-)

diff --git a/xen/arch/x86/x86_64/compat/entry.S 
b/xen/arch/x86/x86_64/compat/entry.S
index 812f404771..6828f40f48 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -12,8 +12,6 @@
 #include <public/xen.h>
 #include <irq_vectors.h>
 
-        .section .text.entry, "ax", @progbits
-
 ENTRY(compat_hypercall)
         ASM_CLAC
         pushq $0
@@ -205,6 +203,8 @@ ENTRY(compat_post_handle_exception)
         movb  $0,TRAPBOUNCE_flags(%rdx)
         jmp   compat_test_all_events
 
+        .section .text.entry, "ax", @progbits
+
 /* See lstar_enter for entry register state. */
 ENTRY(cstar_enter)
         /* sti could live here when we don't switch page tables below. */
@@ -259,6 +259,8 @@ UNLIKELY_END(compat_syscall_gpf)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         jmp   .Lcompat_bounce_exception
 
+        .text
+
 ENTRY(compat_sysenter)
         CR4_PV32_RESTORE
         movq  VCPU_trap_ctxt(%rbx),%rcx
@@ -278,9 +280,6 @@ ENTRY(compat_int80_direct_trap)
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 
-        /* compat_create_bounce_frame & helpers don't need to be in 
.text.entry */
-        .text
-
 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
 /*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
 /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index ad921c4df7..9510b37ef6 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -13,8 +13,6 @@
 #include <public/xen.h>
 #include <irq_vectors.h>
 
-        .section .text.entry, "ax", @progbits
-
 /* %rbx: struct vcpu */
 ENTRY(switch_to_kernel)
         leaq  VCPU_trap_bounce(%rbx),%rdx
@@ -33,8 +31,111 @@ ENTRY(switch_to_kernel)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         call  create_bounce_frame
         andl  $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
+/* %rbx: struct vcpu */
+test_all_events:
+        ASSERT_NOT_IN_ATOMIC
+        cli                             # tests must not race interrupts
+/*test_softirqs:*/
+        movl  VCPU_processor(%rbx), %eax
+        shll  $IRQSTAT_shift, %eax
+        leaq  irq_stat+IRQSTAT_softirq_pending(%rip), %rcx
+        cmpl  $0, (%rcx, %rax, 1)
+        jne   process_softirqs
+        cmpb  $0, VCPU_mce_pending(%rbx)
+        jne   process_mce
+.Ltest_guest_nmi:
+        cmpb  $0, VCPU_nmi_pending(%rbx)
+        jne   process_nmi
+test_guest_events:
+        movq  VCPU_vcpu_info(%rbx), %rax
+        movzwl VCPUINFO_upcall_pending(%rax), %eax
+        decl  %eax
+        cmpl  $0xfe, %eax
+        ja    restore_all_guest
+/*process_guest_events:*/
+        sti
+        leaq  VCPU_trap_bounce(%rbx), %rdx
+        movq  VCPU_event_addr(%rbx), %rax
+        movq  %rax, TRAPBOUNCE_eip(%rdx)
+        movb  $TBF_INTERRUPT, TRAPBOUNCE_flags(%rdx)
+        call  create_bounce_frame
         jmp   test_all_events
 
+        ALIGN
+/* %rbx: struct vcpu */
+process_softirqs:
+        sti
+        call do_softirq
+        jmp  test_all_events
+
+        ALIGN
+/* %rbx: struct vcpu */
+process_mce:
+        testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx)
+        jnz  .Ltest_guest_nmi
+        sti
+        movb $0, VCPU_mce_pending(%rbx)
+        call set_guest_machinecheck_trapbounce
+        test %eax, %eax
+        jz   test_all_events
+        movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+        movb %dl, VCPU_mce_old_mask(%rbx)            # iret hypercall
+        orl  $1 << VCPU_TRAP_MCE, %edx
+        movb %dl, VCPU_async_exception_mask(%rbx)
+        jmp  process_trap
+
+        ALIGN
+/* %rbx: struct vcpu */
+process_nmi:
+        testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx)
+        jnz  test_guest_events
+        sti
+        movb $0, VCPU_nmi_pending(%rbx)
+        call set_guest_nmi_trapbounce
+        test %eax, %eax
+        jz   test_all_events
+        movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+        movb %dl, VCPU_nmi_old_mask(%rbx)            # iret hypercall
+        orl  $1 << VCPU_TRAP_NMI, %edx
+        movb %dl, VCPU_async_exception_mask(%rbx)
+        /* FALLTHROUGH */
+process_trap:
+        leaq VCPU_trap_bounce(%rbx), %rdx
+        call create_bounce_frame
+        jmp  test_all_events
+
+/* No special register assumptions. */
+ENTRY(ret_from_intr)
+        GET_CURRENT(bx)
+        testb $3, UREGS_cs(%rsp)
+        jz    restore_all_xen
+        movq  VCPU_domain(%rbx), %rax
+        cmpb  $0, DOMAIN_is_32bit_pv(%rax)
+        je    test_all_events
+        jmp   compat_test_all_events
+
+/* Enable NMIs.  No special register assumptions. Only %rax is not preserved. 
*/
+ENTRY(enable_nmis)
+        movq  %rsp, %rax /* Grab RSP before pushing */
+
+        /* Set up stack frame */
+        pushq $0               /* SS */
+        pushq %rax             /* RSP */
+        pushfq                 /* RFLAGS */
+        pushq $__HYPERVISOR_CS /* CS */
+        leaq  1f(%rip),%rax
+        pushq %rax             /* RIP */
+
+/* No op trap handler.  Required for kexec crash path. */
+GLOBAL(trap_nop)
+        iretq /* Disable the hardware NMI latch */
+1:
+        retq
+       .type enable_nmis, @function
+       .size enable_nmis, .-enable_nmis
+
+        .section .text.entry, "ax", @progbits
+
 /* %rbx: struct vcpu, interrupts disabled */
 restore_all_guest:
         ASSERT_INTERRUPTS_DISABLED
@@ -188,80 +289,8 @@ ENTRY(lstar_enter)
 
         mov   %rsp, %rdi
         call  pv_hypercall
-
-/* %rbx: struct vcpu */
-test_all_events:
-        ASSERT_NOT_IN_ATOMIC
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/  
-        movl  VCPU_processor(%rbx),%eax
-        shll  $IRQSTAT_shift,%eax
-        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
-        cmpl  $0,(%rcx,%rax,1)
-        jne   process_softirqs
-        testb $1,VCPU_mce_pending(%rbx)
-        jnz   process_mce
-.Ltest_guest_nmi:
-        testb $1,VCPU_nmi_pending(%rbx)
-        jnz   process_nmi
-test_guest_events:
-        movq  VCPU_vcpu_info(%rbx),%rax
-        movzwl VCPUINFO_upcall_pending(%rax),%eax
-        decl  %eax
-        cmpl  $0xfe,%eax
-        ja    restore_all_guest
-/*process_guest_events:*/
-        sti
-        leaq  VCPU_trap_bounce(%rbx),%rdx
-        movq  VCPU_event_addr(%rbx),%rax
-        movq  %rax,TRAPBOUNCE_eip(%rdx)
-        movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-        call  create_bounce_frame
         jmp   test_all_events
 
-        ALIGN
-/* %rbx: struct vcpu */
-process_softirqs:
-        sti       
-        call do_softirq
-        jmp  test_all_events
-
-        ALIGN
-/* %rbx: struct vcpu */
-process_mce:
-        testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
-        jnz  .Ltest_guest_nmi
-        sti
-        movb $0,VCPU_mce_pending(%rbx)
-        call set_guest_machinecheck_trapbounce
-        test %eax,%eax
-        jz   test_all_events
-        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
-        movb %dl,VCPU_mce_old_mask(%rbx)            # iret hypercall
-        orl  $1 << VCPU_TRAP_MCE,%edx
-        movb %dl,VCPU_async_exception_mask(%rbx)
-        jmp  process_trap
-
-        ALIGN
-/* %rbx: struct vcpu */
-process_nmi:
-        testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
-        jnz  test_guest_events
-        sti
-        movb $0,VCPU_nmi_pending(%rbx)
-        call set_guest_nmi_trapbounce
-        test %eax,%eax
-        jz   test_all_events
-        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
-        movb %dl,VCPU_nmi_old_mask(%rbx)            # iret hypercall
-        orl  $1 << VCPU_TRAP_NMI,%edx
-        movb %dl,VCPU_async_exception_mask(%rbx)
-        /* FALLTHROUGH */
-process_trap:
-        leaq VCPU_trap_bounce(%rbx),%rdx
-        call create_bounce_frame
-        jmp  test_all_events
-
 ENTRY(sysenter_entry)
         /* sti could live here when we don't switch page tables below. */
         pushq $FLAT_USER_SS
@@ -530,16 +559,6 @@ ENTRY(common_interrupt)
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
         jmp ret_from_intr
 
-/* No special register assumptions. */
-ENTRY(ret_from_intr)
-        GET_CURRENT(bx)
-        testb $3,UREGS_cs(%rsp)
-        jz    restore_all_xen
-        movq  VCPU_domain(%rbx),%rax
-        testb $1,DOMAIN_is_32bit_pv(%rax)
-        jz    test_all_events
-        jmp   compat_test_all_events
-
 ENTRY(page_fault)
         movl  $TRAP_page_fault,4(%rsp)
 /* No special register assumptions. */
@@ -838,28 +857,6 @@ ENTRY(machine_check)
         movl  $TRAP_machine_check,4(%rsp)
         jmp   handle_ist_exception
 
-/* Enable NMIs.  No special register assumptions. Only %rax is not preserved. 
*/
-ENTRY(enable_nmis)
-        movq  %rsp, %rax /* Grab RSP before pushing */
-
-        /* Set up stack frame */
-        pushq $0               /* SS */
-        pushq %rax             /* RSP */
-        pushfq                 /* RFLAGS */
-        pushq $__HYPERVISOR_CS /* CS */
-        leaq  1f(%rip),%rax
-        pushq %rax             /* RIP */
-
-        iretq /* Disable the hardware NMI latch */
-1:
-        retq
-
-/* No op trap handler.  Required for kexec crash path. */
-GLOBAL(trap_nop)
-        iretq
-
-
-
         .pushsection .rodata, "a", @progbits
 ENTRY(exception_table)
         .quad do_trap
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.8

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.