[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] SVM: introduce a VM entry helper



commit 3abe241190af31760c506a9f32bf25e958ea060c
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 7 09:12:16 2018 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 7 09:12:16 2018 +0200

    SVM: introduce a VM entry helper
    
    Neither the register values copying nor the trace entry generation need
    doing in assembly. The VMLOAD invocation can also be further deferred
    (and centralized). Therefore replace the svm_asid_handle_vmrun()
    invocation with one of the new helper.
    
    Similarly move the VM exit side register value copying into
    svm_vmexit_handler().
    
    Now that we always make it out to guest context after VMLOAD,
    svm_sync_vmcb() no longer overrides vmcb_needs_vmsave, making
    svm_vmexit_handler() setting the field early unnecessary.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Release-acked-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/hvm/svm/entry.S       | 29 ++-----------------------
 xen/arch/x86/hvm/svm/svm.c         | 44 +++++++++++++++++++++++---------------
 xen/arch/x86/x86_64/asm-offsets.c  |  6 ------
 xen/include/asm-x86/hvm/svm/asid.h |  1 +
 4 files changed, 30 insertions(+), 50 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 7d68648c7d..0fa5501477 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -61,23 +61,8 @@ UNLIKELY_START(ne, nsvm_hap)
         jmp  .Lsvm_do_resume
 __UNLIKELY_END(nsvm_hap)
 
-        call svm_asid_handle_vmrun
-
-        cmpb $0,tb_init_done(%rip)
-UNLIKELY_START(nz, svm_trace)
-        call svm_trace_vmentry
-UNLIKELY_END(svm_trace)
-
-        mov  VCPU_svm_vmcb(%rbx),%rcx
-        mov  UREGS_rax(%rsp),%rax
-        mov  %rax,VMCB_rax(%rcx)
-        mov  UREGS_rip(%rsp),%rax
-        mov  %rax,VMCB_rip(%rcx)
-        mov  UREGS_rsp(%rsp),%rax
-        mov  %rax,VMCB_rsp(%rcx)
-        mov  UREGS_eflags(%rsp),%rax
-        or   $X86_EFLAGS_MBS,%rax
-        mov  %rax,VMCB_rflags(%rcx)
+        mov  %rsp, %rdi
+        call svm_vmenter_helper
 
         mov VCPU_arch_msr(%rbx), %rax
         mov VCPUMSR_spec_ctrl_raw(%rax), %eax
@@ -111,16 +96,6 @@ UNLIKELY_END(svm_trace)
         SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: 
acd */
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
-        mov  VCPU_svm_vmcb(%rbx),%rcx
-        mov  VMCB_rax(%rcx),%rax
-        mov  %rax,UREGS_rax(%rsp)
-        mov  VMCB_rip(%rcx),%rax
-        mov  %rax,UREGS_rip(%rsp)
-        mov  VMCB_rsp(%rcx),%rax
-        mov  %rax,UREGS_rsp(%rsp)
-        mov  VMCB_rflags(%rcx),%rax
-        mov  %rax,UREGS_eflags(%rsp)
-
         STGI
 GLOBAL(svm_stgi_label)
         mov  %rsp,%rdi
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 798f0bc4cf..673a38c574 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -687,10 +687,9 @@ static void svm_sync_vmcb(struct vcpu *v, enum 
vmcb_sync_state new_state)
     if ( new_state == vmcb_needs_vmsave )
     {
         if ( arch_svm->vmcb_sync_state == vmcb_needs_vmload )
-        {
             svm_vmload(arch_svm->vmcb);
-            arch_svm->vmcb_sync_state = vmcb_in_sync;
-        }
+
+        arch_svm->vmcb_sync_state = new_state;
     }
     else
     {
@@ -1171,11 +1170,29 @@ static void noreturn svm_do_resume(struct vcpu *v)
 
     hvm_do_resume(v);
 
-    svm_sync_vmcb(v, vmcb_needs_vmsave);
-
     reset_stack_and_jump(svm_asm_do_resume);
 }
 
+void svm_vmenter_helper(const struct cpu_user_regs *regs)
+{
+    struct vcpu *curr = current;
+    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+
+    svm_asid_handle_vmrun();
+
+    if ( unlikely(tb_init_done) )
+        HVMTRACE_ND(VMENTRY,
+                    nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
+                    1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
+
+    svm_sync_vmcb(curr, vmcb_needs_vmsave);
+
+    vmcb->rax = regs->rax;
+    vmcb->rip = regs->rip;
+    vmcb->rsp = regs->rsp;
+    vmcb->rflags = regs->rflags | X86_EFLAGS_MBS;
+}
+
 static void svm_guest_osvw_init(struct vcpu *vcpu)
 {
     if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
@@ -2621,7 +2638,11 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     bool_t vcpu_guestmode = 0;
     struct vlapic *vlapic = vcpu_vlapic(v);
 
-    v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
+    regs->rax = vmcb->rax;
+    regs->rip = vmcb->rip;
+    regs->rsp = vmcb->rsp;
+    regs->rflags = vmcb->rflags;
+
     hvm_invalidate_regs_fields(regs);
 
     if ( paging_mode_hap(v->domain) )
@@ -3108,8 +3129,6 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     }
 
   out:
-    svm_sync_vmcb(v, vmcb_needs_vmsave);
-
     if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
         return;
 
@@ -3118,17 +3137,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     intr.fields.tpr =
         (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
     vmcb_set_vintr(vmcb, intr);
-    ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
 }
 
-void svm_trace_vmentry(void)
-{
-    struct vcpu *curr = current;
-    HVMTRACE_ND(VMENTRY,
-                nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
-                1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
-}
-  
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index eb7e77619a..06028febc1 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -119,12 +119,6 @@ void __dummy__(void)
     OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
     BLANK();
 
-    OFFSET(VMCB_rax, struct vmcb_struct, rax);
-    OFFSET(VMCB_rip, struct vmcb_struct, rip);
-    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
-    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
-    BLANK();
-
     OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
     OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
     BLANK();
diff --git a/xen/include/asm-x86/hvm/svm/asid.h 
b/xen/include/asm-x86/hvm/svm/asid.h
index 154f4da5fb..d3a144cb6b 100644
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -23,6 +23,7 @@
 #include <asm/processor.h>
 
 void svm_asid_init(const struct cpuinfo_x86 *c);
+void svm_asid_handle_vmrun(void);
 
 static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
 {
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.