[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] SVM: streamline entry.S code



commit 110b2d6e8aa2a35d8f9d8efc3f3bfda3c49a3855
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Sep 9 10:24:21 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Sep 9 10:24:21 2013 +0200

    SVM: streamline entry.S code
    
    - fix a bogus "test" with zero immediate
    - move stuff easily/better done in C into C code
    - re-arrange code paths so that no redundant GET_CURRENT() would remain
      on the fast paths
    - move long latency operations earlier
    - slightly defer disabling global interrupts on the VM entry path
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/svm/entry.S    |   61 +++++++++++++++-----------------------
 xen/arch/x86/hvm/svm/svm.c      |    2 +
 xen/include/asm-x86/asm_defns.h |   10 +++++-
 3 files changed, 34 insertions(+), 39 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 1969629..3ee4247 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -32,28 +32,34 @@
 #define CLGI   .byte 0x0F,0x01,0xDD
 
 ENTRY(svm_asm_do_resume)
+        GET_CURRENT(%rbx)
+.Lsvm_do_resume:
         call svm_intr_assist
         mov  %rsp,%rdi
         call nsvm_vcpu_switch
         ASSERT_NOT_IN_ATOMIC
 
-        GET_CURRENT(%rbx)
-        CLGI
-
         mov  VCPU_processor(%rbx),%eax
-        shl  $IRQSTAT_shift,%eax
         lea  irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
-        cmpl $0,(%rdx,%rax,1)
+        xor  %ecx,%ecx
+        shl  $IRQSTAT_shift,%eax
+        CLGI
+        cmp  %ecx,(%rdx,%rax,1)
         jne  .Lsvm_process_softirqs
 
-        testb $0, VCPU_nsvm_hap_enabled(%rbx)
-UNLIKELY_START(nz, nsvm_hap)
-        mov  VCPU_nhvm_p2m(%rbx),%rax
-        test %rax,%rax
+        cmp  %cl,VCPU_nsvm_hap_enabled(%rbx)
+UNLIKELY_START(ne, nsvm_hap)
+        cmp  %rcx,VCPU_nhvm_p2m(%rbx)
         sete %al
-        andb VCPU_nhvm_guestmode(%rbx),%al
-        jnz  .Lsvm_nsvm_no_p2m
-UNLIKELY_END(nsvm_hap)
+        test VCPU_nhvm_guestmode(%rbx),%al
+        UNLIKELY_DONE(z, nsvm_hap)
+        /*
+         * Someone shot down our nested p2m table; go round again
+         * and nsvm_vcpu_switch() will fix it for us.
+         */
+        STGI
+        jmp  .Lsvm_do_resume
+__UNLIKELY_END(nsvm_hap)
 
         call svm_asid_handle_vmrun
 
@@ -72,13 +78,12 @@ UNLIKELY_END(svm_trace)
         mov  UREGS_eflags(%rsp),%rax
         mov  %rax,VMCB_rflags(%rcx)
 
-        mov  VCPU_svm_vmcb_pa(%rbx),%rax
-
         pop  %r15
         pop  %r14
         pop  %r13
         pop  %r12
         pop  %rbp
+        mov  VCPU_svm_vmcb_pa(%rbx),%rax
         pop  %rbx
         pop  %r11
         pop  %r10
@@ -92,25 +97,26 @@ UNLIKELY_END(svm_trace)
 
         VMRUN
 
+        GET_CURRENT(%rax)
         push %rdi
         push %rsi
         push %rdx
         push %rcx
+        mov  VCPU_svm_vmcb(%rax),%rcx
         push %rax
         push %r8
         push %r9
         push %r10
         push %r11
         push %rbx
+        mov  %rax,%rbx
         push %rbp
         push %r12
         push %r13
         push %r14
         push %r15
 
-        GET_CURRENT(%rbx)
         movb $0,VCPU_svm_vmcb_in_sync(%rbx)
-        mov  VCPU_svm_vmcb(%rbx),%rcx
         mov  VMCB_rax(%rcx),%rax
         mov  %rax,UREGS_rax(%rsp)
         mov  VMCB_rip(%rcx),%rax
@@ -120,33 +126,14 @@ UNLIKELY_END(svm_trace)
         mov  VMCB_rflags(%rcx),%rax
         mov  %rax,UREGS_eflags(%rsp)
 
-#ifndef NDEBUG
-        mov  $0xbeef,%ax
-        mov  %ax,UREGS_error_code(%rsp)
-        mov  %ax,UREGS_entry_vector(%rsp)
-        mov  %ax,UREGS_saved_upcall_mask(%rsp)
-        mov  %ax,UREGS_cs(%rsp)
-        mov  %ax,UREGS_ds(%rsp)
-        mov  %ax,UREGS_es(%rsp)
-        mov  %ax,UREGS_fs(%rsp)
-        mov  %ax,UREGS_gs(%rsp)
-        mov  %ax,UREGS_ss(%rsp)
-#endif
-
         STGI
 .globl svm_stgi_label
 svm_stgi_label:
         mov  %rsp,%rdi
         call svm_vmexit_handler
-        jmp  svm_asm_do_resume
+        jmp  .Lsvm_do_resume
 
 .Lsvm_process_softirqs:
         STGI
         call do_softirq
-        jmp  svm_asm_do_resume
-
-.Lsvm_nsvm_no_p2m:
-        /* Someone shot down our nested p2m table; go round again
-         * and nsvm_vcpu_switch() will fix it for us. */
-        STGI
-        jmp  svm_asm_do_resume
+        jmp  .Lsvm_do_resume
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index f74265a..695b53a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2069,6 +2069,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     vintr_t intr;
     bool_t vcpu_guestmode = 0;
 
+    hvm_invalidate_regs_fields(regs);
+
     if ( paging_mode_hap(v->domain) )
         v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
             vmcb_get_cr3(vmcb);
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index 8fc1a2c..25032d5 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -39,11 +39,17 @@ void ret_from_intr(void);
         .subsection 1;            \
         .Lunlikely.tag:
 
-#define UNLIKELY_END(tag)         \
-        jmp .Llikely.tag;         \
+#define UNLIKELY_DONE(cond, tag)  \
+        j##cond .Llikely.tag
+
+#define __UNLIKELY_END(tag)       \
         .subsection 0;            \
         .Llikely.tag:
 
+#define UNLIKELY_END(tag)         \
+        UNLIKELY_DONE(mp, tag);   \
+        __UNLIKELY_END(tag)
+
 #define STACK_CPUINFO_FIELD(field) (STACK_SIZE-CPUINFO_sizeof+CPUINFO_##field)
 #define GET_STACK_BASE(reg)                       \
         movq $~(STACK_SIZE-1),reg;                \
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.