[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/XPTI: reduce .text.entry



commit 454efb2a31b64b98e3dd55c083ce41b87375faa6
Author:     Jan Beulich <JBeulich@xxxxxxxx>
AuthorDate: Mon Mar 19 07:40:12 2018 -0600
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Apr 5 15:48:23 2018 +0100

    x86/XPTI: reduce .text.entry
    
    This exposes less code pieces and at the same time reduces the range
    covered from slightly above 3 pages to a little below 2 of them.
    
    The code being moved is unchanged, except for the removal of trailing
    blanks, insertion of blanks between operands, and a pointless q suffix
    from "retq".
    
    A few more small pieces could be moved, but it seems better to me to
    leave them where they are to not make it overly hard to follow code
    paths.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/x86_64/compat/entry.S |   9 +-
 xen/arch/x86/x86_64/entry.S        | 167 ++++++++++++++++++-------------------
 2 files changed, 87 insertions(+), 89 deletions(-)

diff --git a/xen/arch/x86/x86_64/compat/entry.S 
b/xen/arch/x86/x86_64/compat/entry.S
index f52bffc3f4..6c7fcf95b3 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -13,8 +13,6 @@
 #include <public/xen.h>
 #include <irq_vectors.h>
 
-        .section .text.entry, "ax", @progbits
-
 ENTRY(entry_int82)
         ASM_CLAC
         pushq $0
@@ -192,6 +190,8 @@ ENTRY(compat_post_handle_exception)
         movb  $0,TRAPBOUNCE_flags(%rdx)
         jmp   compat_test_all_events
 
+        .section .text.entry, "ax", @progbits
+
 /* See lstar_enter for entry register state. */
 ENTRY(cstar_enter)
         /* sti could live here when we don't switch page tables below. */
@@ -246,6 +246,8 @@ UNLIKELY_END(compat_syscall_gpf)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         jmp   .Lcompat_bounce_exception
 
+        .text
+
 ENTRY(compat_sysenter)
         CR4_PV32_RESTORE
         movq  VCPU_trap_ctxt(%rbx),%rcx
@@ -265,9 +267,6 @@ ENTRY(compat_int80_direct_trap)
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 
-        /* compat_create_bounce_frame & helpers don't need to be in 
.text.entry */
-        .text
-
 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
 /*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
 /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index e3d18daba8..45d9842d09 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -14,8 +14,6 @@
 #include <public/xen.h>
 #include <irq_vectors.h>
 
-        .section .text.entry, "ax", @progbits
-
 /* %rbx: struct vcpu */
 ENTRY(switch_to_kernel)
         leaq  VCPU_trap_bounce(%rbx),%rdx
@@ -34,8 +32,91 @@ ENTRY(switch_to_kernel)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         call  create_bounce_frame
         andl  $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
+/* %rbx: struct vcpu */
+test_all_events:
+        ASSERT_NOT_IN_ATOMIC
+        cli                             # tests must not race interrupts
+/*test_softirqs:*/
+        movl  VCPU_processor(%rbx), %eax
+        shll  $IRQSTAT_shift, %eax
+        leaq  irq_stat+IRQSTAT_softirq_pending(%rip), %rcx
+        cmpl  $0, (%rcx, %rax, 1)
+        jne   process_softirqs
+        cmpb  $0, VCPU_mce_pending(%rbx)
+        jne   process_mce
+.Ltest_guest_nmi:
+        cmpb  $0, VCPU_nmi_pending(%rbx)
+        jne   process_nmi
+test_guest_events:
+        movq  VCPU_vcpu_info(%rbx), %rax
+        movzwl VCPUINFO_upcall_pending(%rax), %eax
+        decl  %eax
+        cmpl  $0xfe, %eax
+        ja    restore_all_guest
+/*process_guest_events:*/
+        sti
+        leaq  VCPU_trap_bounce(%rbx), %rdx
+        movq  VCPU_event_addr(%rbx), %rax
+        movq  %rax, TRAPBOUNCE_eip(%rdx)
+        movb  $TBF_INTERRUPT, TRAPBOUNCE_flags(%rdx)
+        call  create_bounce_frame
         jmp   test_all_events
 
+        ALIGN
+/* %rbx: struct vcpu */
+process_softirqs:
+        sti
+        call do_softirq
+        jmp  test_all_events
+
+        ALIGN
+/* %rbx: struct vcpu */
+process_mce:
+        testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx)
+        jnz  .Ltest_guest_nmi
+        sti
+        movb $0, VCPU_mce_pending(%rbx)
+        call set_guest_machinecheck_trapbounce
+        test %eax, %eax
+        jz   test_all_events
+        movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+        movb %dl, VCPU_mce_old_mask(%rbx)            # iret hypercall
+        orl  $1 << VCPU_TRAP_MCE, %edx
+        movb %dl, VCPU_async_exception_mask(%rbx)
+        jmp  process_trap
+
+        ALIGN
+/* %rbx: struct vcpu */
+process_nmi:
+        testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx)
+        jnz  test_guest_events
+        sti
+        movb $0, VCPU_nmi_pending(%rbx)
+        call set_guest_nmi_trapbounce
+        test %eax, %eax
+        jz   test_all_events
+        movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+        movb %dl, VCPU_nmi_old_mask(%rbx)            # iret hypercall
+        orl  $1 << VCPU_TRAP_NMI, %edx
+        movb %dl, VCPU_async_exception_mask(%rbx)
+        /* FALLTHROUGH */
+process_trap:
+        leaq VCPU_trap_bounce(%rbx), %rdx
+        call create_bounce_frame
+        jmp  test_all_events
+
+/* No special register assumptions. */
+ENTRY(ret_from_intr)
+        GET_CURRENT(bx)
+        testb $3, UREGS_cs(%rsp)
+        jz    restore_all_xen
+        movq  VCPU_domain(%rbx), %rax
+        cmpb  $0, DOMAIN_is_32bit_pv(%rax)
+        je    test_all_events
+        jmp   compat_test_all_events
+
+        .section .text.entry, "ax", @progbits
+
 /* %rbx: struct vcpu, interrupts disabled */
 restore_all_guest:
         ASSERT_INTERRUPTS_DISABLED
@@ -190,80 +271,8 @@ ENTRY(lstar_enter)
 
         mov   %rsp, %rdi
         call  pv_hypercall
-
-/* %rbx: struct vcpu */
-test_all_events:
-        ASSERT_NOT_IN_ATOMIC
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/  
-        movl  VCPU_processor(%rbx),%eax
-        shll  $IRQSTAT_shift,%eax
-        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
-        cmpl  $0,(%rcx,%rax,1)
-        jne   process_softirqs
-        cmpb  $0, VCPU_mce_pending(%rbx)
-        jne   process_mce
-.Ltest_guest_nmi:
-        cmpb  $0, VCPU_nmi_pending(%rbx)
-        jne   process_nmi
-test_guest_events:
-        movq  VCPU_vcpu_info(%rbx),%rax
-        movzwl VCPUINFO_upcall_pending(%rax),%eax
-        decl  %eax
-        cmpl  $0xfe,%eax
-        ja    restore_all_guest
-/*process_guest_events:*/
-        sti
-        leaq  VCPU_trap_bounce(%rbx),%rdx
-        movq  VCPU_event_addr(%rbx),%rax
-        movq  %rax,TRAPBOUNCE_eip(%rdx)
-        movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-        call  create_bounce_frame
         jmp   test_all_events
 
-        ALIGN
-/* %rbx: struct vcpu */
-process_softirqs:
-        sti       
-        call do_softirq
-        jmp  test_all_events
-
-        ALIGN
-/* %rbx: struct vcpu */
-process_mce:
-        testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
-        jnz  .Ltest_guest_nmi
-        sti
-        movb $0,VCPU_mce_pending(%rbx)
-        call set_guest_machinecheck_trapbounce
-        test %eax,%eax
-        jz   test_all_events
-        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
-        movb %dl,VCPU_mce_old_mask(%rbx)            # iret hypercall
-        orl  $1 << VCPU_TRAP_MCE,%edx
-        movb %dl,VCPU_async_exception_mask(%rbx)
-        jmp  process_trap
-
-        ALIGN
-/* %rbx: struct vcpu */
-process_nmi:
-        testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
-        jnz  test_guest_events
-        sti
-        movb $0,VCPU_nmi_pending(%rbx)
-        call set_guest_nmi_trapbounce
-        test %eax,%eax
-        jz   test_all_events
-        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
-        movb %dl,VCPU_nmi_old_mask(%rbx)            # iret hypercall
-        orl  $1 << VCPU_TRAP_NMI,%edx
-        movb %dl,VCPU_async_exception_mask(%rbx)
-        /* FALLTHROUGH */
-process_trap:
-        leaq VCPU_trap_bounce(%rbx),%rdx
-        call create_bounce_frame
-        jmp  test_all_events
-
 ENTRY(sysenter_entry)
         /* sti could live here when we don't switch page tables below. */
         pushq $FLAT_USER_SS
@@ -562,16 +571,6 @@ ENTRY(common_interrupt)
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
         jmp ret_from_intr
 
-/* No special register assumptions. */
-ENTRY(ret_from_intr)
-        GET_CURRENT(bx)
-        testb $3,UREGS_cs(%rsp)
-        jz    restore_all_xen
-        movq  VCPU_domain(%rbx),%rax
-        cmpb  $0, DOMAIN_is_32bit_pv(%rax)
-        je    test_all_events
-        jmp   compat_test_all_events
-
 ENTRY(page_fault)
         movl  $TRAP_page_fault,4(%rsp)
 /* No special register assumptions. */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.