[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/entry: Avoid register spilling in cr4_pv32_restore()



commit b24b540de613dad0f432bb606ea1eb935a8a89cf
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Feb 10 21:20:42 2023 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Jan 25 17:46:57 2024 +0000

    x86/entry: Avoid register spilling in cr4_pv32_restore()
    
    cr4_pv32_restore() needs two registers.  Right now, it spills %rdx and
    clobbers %rax.
    
    However, %rcx is free to use at all callsites.  Annotate CR4_PV32_RESTORE 
with
    our usual clobber comments, and swap %rdx for %rcx in the non-fatal paths
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/x86_64/compat/entry.S | 17 +++++++----------
 xen/arch/x86/x86_64/entry.S        |  8 ++++----
 2 files changed, 11 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/x86_64/compat/entry.S 
b/xen/arch/x86/x86_64/compat/entry.S
index 49811a56e9..d4f0e48040 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -23,7 +23,7 @@ FUNC(entry_int82)
 
         sti
 
-        CR4_PV32_RESTORE
+        CR4_PV32_RESTORE /* Clob: ac */
 
         GET_CURRENT(bx)
 
@@ -163,17 +163,15 @@ FUNC(compat_restore_all_guest)
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
 END(compat_restore_all_guest)
 
-/* This mustn't modify registers other than %rax. */
+/* Callers can cope with both %rax and %rcx being clobbered. */
 FUNC(cr4_pv32_restore)
-        push  %rdx
-        GET_CPUINFO_FIELD(cr4, dx)
-        mov   (%rdx), %rax
+        GET_CPUINFO_FIELD(cr4, cx)
+        mov   (%rcx), %rax
         test  $XEN_CR4_PV32_BITS, %eax
         jnz   0f
         or    cr4_pv32_mask(%rip), %rax
         mov   %rax, %cr4
-        mov   %rax, (%rdx)
-        pop   %rdx
+        mov   %rax, (%rcx)
         ret
 0:
 #ifndef NDEBUG
@@ -191,7 +189,6 @@ FUNC(cr4_pv32_restore)
         BUG
 1:
 #endif
-        pop   %rdx
         xor   %eax, %eax
         ret
 END(cr4_pv32_restore)
@@ -227,7 +224,7 @@ UNLIKELY_END(compat_syscall_gpf)
 END(compat_syscall)
 
 FUNC(compat_sysenter)
-        CR4_PV32_RESTORE
+        CR4_PV32_RESTORE /* Clob: ac */
         movq  VCPU_trap_ctxt(%rbx),%rcx
         cmpb  $X86_EXC_GP, UREGS_entry_vector(%rsp)
         movzwl VCPU_sysenter_sel(%rbx),%eax
@@ -242,7 +239,7 @@ FUNC(compat_sysenter)
 END(compat_sysenter)
 
 FUNC(compat_int80_direct_trap)
-        CR4_PV32_RESTORE
+        CR4_PV32_RESTORE /* Clob: ac */
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 END(compat_int80_direct_trap)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 5dab3e4832..44f25def86 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -309,7 +309,7 @@ FUNC(cstar_enter)
 .Lcstar_cr3_okay:
         sti
 
-        CR4_PV32_RESTORE
+        CR4_PV32_RESTORE /* Clob: ac */
 
         movq  STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
 
@@ -712,7 +712,7 @@ FUNC(common_interrupt)
         cmovnz %r12d, %ebx
 .Lintr_cr3_okay:
 
-        CR4_PV32_RESTORE
+        CR4_PV32_RESTORE /* Clob: ac */
         movq %rsp,%rdi
         callq do_IRQ
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
@@ -762,7 +762,7 @@ handle_exception_saved:
         jz    .Lcr4_pv32_done
         cmpb  $0,DOMAIN_is_32bit_pv(%rax)
         je    .Lcr4_pv32_done
-        call  cr4_pv32_restore
+        call  cr4_pv32_restore /* Clob: ac */
         /*
          * An NMI or #MC may occur between clearing CR4.SMEP / CR4.SMAP in
          * compat_restore_all_guest and it actually returning to guest
@@ -1046,7 +1046,7 @@ FUNC(handle_ist_exception)
 .List_cr3_okay:
 
 #ifdef CONFIG_PV
-        CR4_PV32_RESTORE
+        CR4_PV32_RESTORE /* Clob: ac */
         testb $3,UREGS_cs(%rsp)
         jz    1f
         /*
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.