[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] svm: Rationalise register synchronisation to be similar to our vmx



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1178837344 -3600
# Node ID 60240a72e2b2676a6af652a869746dd6552eed2a
# Parent  e19ddfa781c5f9c8c4e30b63baf12d3cdefc4e0e
svm: Rationalise register synchronisation to be similar to our vmx
handling.
 1. Do not copy all VMCB register state in cpu_user_regs on every
 vmexit.
 2. Save/restore RAX inside asm stub (in particular, before STGI on
 vmexit).
 3. Simplify store/load_cpu_guest_regs() hook functions to synchronise
 precisely the same state as VMX.

By my measurements this reduces the round-trip latency for a null
hypercall by around 150 cycles. This is about 3% of the ~5000-cycle
total on my AMD X2 system. Not a great win, but a nice extra on top of
the code rationalisation.
---
 xen/arch/x86/hvm/svm/svm.c          |   29 --------
 xen/arch/x86/hvm/svm/x86_32/exits.S |   98 ++++++++--------------------
 xen/arch/x86/hvm/svm/x86_64/exits.S |  123 ++++++++++++------------------------
 3 files changed, 73 insertions(+), 177 deletions(-)

diff -r e19ddfa781c5 -r 60240a72e2b2 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu May 10 22:54:43 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu May 10 23:49:04 2007 +0100
@@ -110,15 +110,11 @@ static void svm_store_cpu_guest_regs(
 
     if ( regs != NULL )
     {
-        regs->eip    = vmcb->rip;
+        regs->ss     = vmcb->ss.sel;
         regs->esp    = vmcb->rsp;
         regs->eflags = vmcb->rflags;
         regs->cs     = vmcb->cs.sel;
-        regs->ds     = vmcb->ds.sel;
-        regs->es     = vmcb->es.sel;
-        regs->ss     = vmcb->ss.sel;
-        regs->gs     = vmcb->gs.sel;
-        regs->fs     = vmcb->fs.sel;
+        regs->eip    = vmcb->rip;
     }
 
     if ( crs != NULL )
@@ -752,28 +748,10 @@ static void svm_init_hypercall_page(stru
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
-static void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    ctxt->eax = vmcb->rax;
-    ctxt->ss = vmcb->ss.sel;
-    ctxt->esp = vmcb->rsp;
-    ctxt->eflags = vmcb->rflags;
-    ctxt->cs = vmcb->cs.sel;
-    ctxt->eip = vmcb->rip;
-    
-    ctxt->gs = vmcb->gs.sel;
-    ctxt->fs = vmcb->fs.sel;
-    ctxt->es = vmcb->es.sel;
-    ctxt->ds = vmcb->ds.sel;
-}
-
 static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    
-    vmcb->rax      = regs->eax;
+
     vmcb->ss.sel   = regs->ss;
     vmcb->rsp      = regs->esp;   
     vmcb->rflags   = regs->eflags | 2UL;
@@ -2242,7 +2220,6 @@ asmlinkage void svm_vmexit_handler(struc
     int inst_len, rc;
 
     exit_reason = vmcb->exitcode;
-    save_svm_cpu_user_regs(v, regs);
 
     HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
 
diff -r e19ddfa781c5 -r 60240a72e2b2 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S       Thu May 10 22:54:43 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S       Thu May 10 23:49:04 2007 +0100
@@ -16,6 +16,7 @@
  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  */
+
 #include <xen/config.h>
 #include <xen/errno.h>
 #include <xen/softirq.h>
@@ -25,60 +26,23 @@
 #include <public/xen.h>
 
 #define GET_CURRENT(reg)         \
-        movl $STACK_SIZE-4, reg; \
-        orl  %esp, reg;          \
+        movl $STACK_SIZE-4,reg;  \
+        orl  %esp,reg;           \
         andl $~3,reg;            \
         movl (reg),reg;
 
-/*
- * At VMExit time the processor saves the guest selectors, esp, eip, 
- * and eflags. Therefore we don't save them, but simply decrement 
- * the kernel stack pointer to make it consistent with the stack frame 
- * at usual interruption time. The eflags of the host is not saved by AMD-V, 
- * and we set it to the fixed value.
- *
- * We also need the room, especially because orig_eax field is used 
- * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
- *   (10) u32 gs;                 
- *   (9)  u32 fs;
- *   (8)  u32 ds;
- *   (7)  u32 es;
- *               <- get_stack_bottom() (= HOST_ESP)
- *   (6)  u32 ss;
- *   (5)  u32 esp;
- *   (4)  u32 eflags;
- *   (3)  u32 cs;
- *   (2)  u32 eip;
- * (2/1)  u16 entry_vector;
- * (1/1)  u16 error_code;
- * However, get_stack_bottom() actually returns 20 bytes before the real
- * bottom of the stack to allow space for:
- * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
- */
-
 #define HVM_MONITOR_EFLAGS 0x202 /* IF on */
-#define NR_SKIPPED_REGS    6     /* See the above explanation */
-#define HVM_SAVE_ALL_NOSEGREGS \
-        pushl $HVM_MONITOR_EFLAGS; \
-        popf; \
-        subl $(NR_SKIPPED_REGS*4), %esp; \
-        pushl %eax; \
-        pushl %ebp; \
-        pushl %edi; \
-        pushl %esi; \
-        pushl %edx; \
-        pushl %ecx; \
+#define NR_SKIPPED_REGS    7     /* Skip SS thru EAX */
+#define HVM_SAVE_ALL_NOSEGREGS                  \
+        pushl $HVM_MONITOR_EFLAGS;              \
+        popf;                                   \
+        subl $(NR_SKIPPED_REGS*4),%esp;         \
+        pushl %ebp;                             \
+        pushl %edi;                             \
+        pushl %esi;                             \
+        pushl %edx;                             \
+        pushl %ecx;                             \
         pushl %ebx;
-
-#define HVM_RESTORE_ALL_NOSEGREGS   \
-        popl %ebx;  \
-        popl %ecx;  \
-        popl %edx;  \
-        popl %esi;  \
-        popl %edi;  \
-        popl %ebp;  \
-        popl %eax;  \
-        addl $(NR_SKIPPED_REGS*4), %esp
 
 #define VMRUN  .byte 0x0F,0x01,0xD8
 #define VMLOAD .byte 0x0F,0x01,0xDA
@@ -88,12 +52,10 @@
 
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%ebx)
-        xorl %ecx,%ecx
-        notl %ecx
         cli                             # tests must not race interrupts
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
-        test %ecx,irq_stat(%eax,1)
+        testl $~0,irq_stat(%eax,1)
         jnz  svm_process_softirqs
         call svm_intr_assist
         call svm_load_cr2
@@ -101,39 +63,35 @@ ENTRY(svm_asm_do_resume)
         CLGI                
         sti
         GET_CURRENT(%ebx)
-        movl VCPU_svm_vmcb(%ebx), %ecx
-        movl 24(%esp), %eax
-        movl %eax, VMCB_rax(%ecx)
-        movl VCPU_processor(%ebx), %eax
-        movl root_vmcb_pa(,%eax,8), %eax
+        movl VCPU_svm_vmcb(%ebx),%ecx
+        movl UREGS_eax(%esp),%eax
+        movl %eax,VMCB_rax(%ecx)
+        movl VCPU_processor(%ebx),%eax
+        movl root_vmcb_pa(,%eax,8),%eax
         VMSAVE
 
-        movl VCPU_svm_vmcb_pa(%ebx), %eax
+        movl VCPU_svm_vmcb_pa(%ebx),%eax
         popl %ebx
         popl %ecx
         popl %edx
         popl %esi
         popl %edi
         popl %ebp
-
-        /* 
-         * Skip %eax, we need to have vmcb address in there.
-         * Don't worry, EAX is restored through the VMRUN instruction.
-         */
-        addl $4, %esp       
-        addl $(NR_SKIPPED_REGS*4), %esp
+        addl $(NR_SKIPPED_REGS*4),%esp
         VMLOAD
         VMRUN
         VMSAVE
-        /* eax is the only register we're allowed to touch here... */
 
-        GET_CURRENT(%eax)
+        HVM_SAVE_ALL_NOSEGREGS
 
-        movl VCPU_processor(%eax), %eax
-        movl root_vmcb_pa(,%eax,8), %eax
+        GET_CURRENT(%ebx)
+        movl VCPU_svm_vmcb(%ebx),%ecx
+        movl VMCB_rax(%ecx),%eax
+        movl %eax,UREGS_eax(%esp)
+        movl VCPU_processor(%ebx),%eax
+        movl root_vmcb_pa(,%eax,8),%eax
         VMLOAD
 
-        HVM_SAVE_ALL_NOSEGREGS
         STGI
 .globl svm_stgi_label;
 svm_stgi_label:
diff -r e19ddfa781c5 -r 60240a72e2b2 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S       Thu May 10 22:54:43 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S       Thu May 10 23:49:04 2007 +0100
@@ -16,6 +16,7 @@
  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  */
+
 #include <xen/config.h>
 #include <xen/errno.h>
 #include <xen/softirq.h>
@@ -25,72 +26,32 @@
 #include <public/xen.h>
 
 #define GET_CURRENT(reg)         \
-        movq $STACK_SIZE-8, reg; \
-        orq  %rsp, reg;          \
+        movq $STACK_SIZE-8,reg;  \
+        orq  %rsp,reg;           \
         andq $~7,reg;            \
         movq (reg),reg;
 
-/*
- * At VMExit time the processor saves the guest selectors, rsp, rip, 
- * and rflags. Therefore we don't save them, but simply decrement 
- * the kernel stack pointer to make it consistent with the stack frame 
- * at usual interruption time. The rflags of the host is not saved by AMD-V, 
- * and we set it to the fixed value.
- *
- * We also need the room, especially because orig_eax field is used 
- * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
- *   (10) u64 gs;                 
- *   (9)  u64 fs;
- *   (8)  u64 ds;
- *   (7)  u64 es;
- *               <- get_stack_bottom() (= HOST_ESP)
- *   (6)  u64 ss;
- *   (5)  u64 rsp;
- *   (4)  u64 rflags;
- *   (3)  u64 cs;
- *   (2)  u64 rip;
- * (2/1)  u32 entry_vector;
- * (1/1)  u32 error_code;
- */
 #define HVM_MONITOR_RFLAGS 0x202 /* IF on */
-#define NR_SKIPPED_REGS    6     /* See the above explanation */
-#define HVM_SAVE_ALL_NOSEGREGS \
-        pushq $HVM_MONITOR_RFLAGS; \
-        popfq; \
-        subq $(NR_SKIPPED_REGS*8), %rsp; \
-        pushq %rdi; \
-        pushq %rsi; \
-        pushq %rdx; \
-        pushq %rcx; \
-        pushq %rax; \
-        pushq %r8;  \
-        pushq %r9;  \
-        pushq %r10; \
-        pushq %r11; \
-        pushq %rbx; \
-        pushq %rbp; \
-        pushq %r12; \
-        pushq %r13; \
-        pushq %r14; \
-        pushq %r15; \
-
-#define HVM_RESTORE_ALL_NOSEGREGS \
-        popq %r15; \
-        popq %r14; \
-        popq %r13; \
-        popq %r12; \
-        popq %rbp; \
-        popq %rbx; \
-        popq %r11; \
-        popq %r10; \
-        popq %r9;  \
-        popq %r8;  \
-        popq %rax; \
-        popq %rcx; \
-        popq %rdx; \
-        popq %rsi; \
-        popq %rdi; \
-        addq $(NR_SKIPPED_REGS*8), %rsp; \
+#define NR_SKIPPED_REGS    6     /* Skip SS thru error_code */
+#define HVM_SAVE_ALL_NOSEGREGS                  \
+        pushq $HVM_MONITOR_RFLAGS;              \
+        popfq;                                  \
+        subq $(NR_SKIPPED_REGS*8),%rsp;         \
+        pushq %rdi;                             \
+        pushq %rsi;                             \
+        pushq %rdx;                             \
+        pushq %rcx;                             \
+        pushq %rax;                             \
+        pushq %r8;                              \
+        pushq %r9;                              \
+        pushq %r10;                             \
+        pushq %r11;                             \
+        pushq %rbx;                             \
+        pushq %rbp;                             \
+        pushq %r12;                             \
+        pushq %r13;                             \
+        pushq %r14;                             \
+        pushq %r15;
 
 #define VMRUN  .byte 0x0F,0x01,0xD8
 #define VMLOAD .byte 0x0F,0x01,0xDA
@@ -102,9 +63,9 @@ ENTRY(svm_asm_do_resume)
         GET_CURRENT(%rbx)
         cli                             # tests must not race interrupts
         movl VCPU_processor(%rbx),%eax
-        shl  $IRQSTAT_shift, %rax
-        leaq irq_stat(%rip), %rdx
-        testl $~0, (%rdx, %rax, 1)
+        shl  $IRQSTAT_shift,%rax
+        leaq irq_stat(%rip),%rdx
+        testl $~0,(%rdx,%rax,1)
         jnz  svm_process_softirqs
         call svm_intr_assist
         call svm_load_cr2
@@ -112,15 +73,15 @@ ENTRY(svm_asm_do_resume)
         CLGI                
         sti
         GET_CURRENT(%rbx)
-        movq VCPU_svm_vmcb(%rbx), %rcx
-        movq UREGS_rax(%rsp), %rax
-        movq %rax, VMCB_rax(%rcx)
-        leaq root_vmcb_pa(%rip), %rax
-        movl VCPU_processor(%rbx), %ecx
-        movq (%rax,%rcx,8), %rax
+        movq VCPU_svm_vmcb(%rbx),%rcx
+        movq UREGS_rax(%rsp),%rax
+        movq %rax,VMCB_rax(%rcx)
+        leaq root_vmcb_pa(%rip),%rax
+        movl VCPU_processor(%rbx),%ecx
+        movq (%rax,%rcx,8),%rax
         VMSAVE
 
-        movq VCPU_svm_vmcb_pa(%rbx), %rax
+        movq VCPU_svm_vmcb_pa(%rbx),%rax
         popq %r15
         popq %r14
         popq %r13
@@ -131,26 +92,26 @@ ENTRY(svm_asm_do_resume)
         popq %r10
         popq %r9
         popq %r8
-        /*
-         * Skip %rax, we need to have vmcb address in there.
-         * Don't worry, RAX is restored through the VMRUN instruction.
-         */
-        addq $8, %rsp
+        addq $8,%rsp /* Skip %rax: restored by VMRUN. */
         popq %rcx
         popq %rdx
         popq %rsi
         popq %rdi
-        addq $(NR_SKIPPED_REGS*8), %rsp
+        addq $(NR_SKIPPED_REGS*8),%rsp
 
         VMLOAD
         VMRUN
         VMSAVE
+
         HVM_SAVE_ALL_NOSEGREGS
 
         GET_CURRENT(%rbx)
-        leaq root_vmcb_pa(%rip), %rax
-        movl VCPU_processor(%rbx), %ecx
-        movq (%rax,%rcx,8), %rax
+        movq VCPU_svm_vmcb(%rbx),%rcx
+        movq VMCB_rax(%rcx),%rax
+        movq %rax,UREGS_rax(%rsp)
+        leaq root_vmcb_pa(%rip),%rax
+        movl VCPU_processor(%rbx),%ecx
+        movq (%rax,%rcx,8),%rax
         VMLOAD
 
         STGI

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.