[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] SVM: re-work VMCB sync-ing



commit cb6ff207f7e0bbfe2d9ab3cb1a0866962cf17169
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 7 09:11:15 2018 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 7 09:11:15 2018 +0200

    SVM: re-work VMCB sync-ing
    
    While the main problem to be addressed here is the issue of what so far
    was named "vmcb_in_sync" starting out with the wrong value (should have
    been true instead of false, to prevent performing a VMSAVE without ever
    having VMLOADed the vCPU's state), go a step further and make the
    sync-ed state a tristate: CPU and memory may be in sync or an update
    may be required in either direction. Rename the field and introduce an
    enum. Callers of svm_sync_vmcb() now indicate the intended new state
    (with a slight "anomaly" when requesting VMLOAD: we could store
    vmcb_needs_vmsave in those cases as the callers request, but the VMCB
    really is in sync at that point, and hence there's no need to VMSAVE in
    case we don't make it out to guest context), and all syncing goes
    through that function.
    
    With that, there's no need to VMLOAD the state perhaps multiple times;
    all that's needed is loading it once before VM entry.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Release-acked-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/hvm/svm/entry.S       |  1 -
 xen/arch/x86/hvm/svm/svm.c         | 60 ++++++++++++++++++++------------------
 xen/arch/x86/hvm/svm/vmcb.c        |  2 ++
 xen/arch/x86/x86_64/asm-offsets.c  |  1 -
 xen/include/asm-x86/hvm/svm/vmcb.h | 18 +++++++++++-
 5 files changed, 50 insertions(+), 32 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index bf092fe071..7d68648c7d 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -112,7 +112,6 @@ UNLIKELY_END(svm_trace)
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
         mov  VCPU_svm_vmcb(%rbx),%rcx
-        movb $0,VCPU_svm_vmcb_in_sync(%rbx)
         mov  VMCB_rax(%rcx),%rax
         mov  %rax,UREGS_rax(%rsp)
         mov  VMCB_rip(%rcx),%rax
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 88938e6ae6..798f0bc4cf 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -680,16 +680,26 @@ static void svm_cpuid_policy_changed(struct vcpu *v)
                       cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
 }
 
-static void svm_sync_vmcb(struct vcpu *v)
+static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
 
-    if ( arch_svm->vmcb_in_sync )
-        return;
-
-    arch_svm->vmcb_in_sync = 1;
+    if ( new_state == vmcb_needs_vmsave )
+    {
+        if ( arch_svm->vmcb_sync_state == vmcb_needs_vmload )
+        {
+            svm_vmload(arch_svm->vmcb);
+            arch_svm->vmcb_sync_state = vmcb_in_sync;
+        }
+    }
+    else
+    {
+        if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
+            svm_vmsave(arch_svm->vmcb);
 
-    svm_vmsave(arch_svm->vmcb);
+        if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
+            arch_svm->vmcb_sync_state = new_state;
+    }
 }
 
 static unsigned int svm_get_cpl(struct vcpu *v)
@@ -707,7 +717,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
     switch ( seg )
     {
     case x86_seg_fs ... x86_seg_gs:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
 
         /* Fallthrough. */
     case x86_seg_es ... x86_seg_ds:
@@ -718,7 +728,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
 
     case x86_seg_tr:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
         *reg = vmcb->tr;
         break;
 
@@ -731,7 +741,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
 
     case x86_seg_ldtr:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
         *reg = vmcb->ldtr;
         break;
 
@@ -746,7 +756,6 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
                                      struct segment_register *reg)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    bool sync = false;
 
     ASSERT((v == current) || !vcpu_runnable(v));
 
@@ -768,7 +777,8 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
     case x86_seg_gs:
     case x86_seg_tr:
     case x86_seg_ldtr:
-        sync = (v == current);
+        if ( v == current )
+            svm_sync_vmcb(v, vmcb_needs_vmload);
         break;
 
     default:
@@ -777,9 +787,6 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         return;
     }
 
-    if ( sync )
-        svm_sync_vmcb(v);
-
     switch ( seg )
     {
     case x86_seg_ss:
@@ -813,9 +820,6 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         ASSERT_UNREACHABLE();
         break;
     }
-
-    if ( sync )
-        svm_vmload(vmcb);
 }
 
 static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
@@ -1086,7 +1090,7 @@ static void svm_ctxt_switch_from(struct vcpu *v)
     svm_lwp_save(v);
     svm_tsc_ratio_save(v);
 
-    svm_sync_vmcb(v);
+    svm_sync_vmcb(v, vmcb_needs_vmload);
     svm_vmload_pa(per_cpu(host_vmcb, cpu));
 
     /* Resume use of ISTs now that the host TR is reinstated. */
@@ -1114,7 +1118,6 @@ static void svm_ctxt_switch_to(struct vcpu *v)
     svm_restore_dr(v);
 
     svm_vmsave_pa(per_cpu(host_vmcb, cpu));
-    svm_vmload(vmcb);
     vmcb->cleanbits.bytes = 0;
     svm_lwp_load(v);
     svm_tsc_ratio_load(v);
@@ -1168,6 +1171,8 @@ static void noreturn svm_do_resume(struct vcpu *v)
 
     hvm_do_resume(v);
 
+    svm_sync_vmcb(v, vmcb_needs_vmsave);
+
     reset_stack_and_jump(svm_asm_do_resume);
 }
 
@@ -1895,7 +1900,7 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
     case MSR_FS_BASE:
     case MSR_GS_BASE:
     case MSR_SHADOW_GS_BASE:
-        svm_sync_vmcb(v);
+        svm_sync_vmcb(v, vmcb_in_sync);
         break;
     }
 
@@ -2067,7 +2072,6 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
     int ret, result = X86EMUL_OKAY;
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    bool sync = false;
 
     switch ( msr )
     {
@@ -2081,13 +2085,10 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
     case MSR_FS_BASE:
     case MSR_GS_BASE:
     case MSR_SHADOW_GS_BASE:
-        sync = true;
+        svm_sync_vmcb(v, vmcb_needs_vmload);
         break;
     }
 
-    if ( sync )
-        svm_sync_vmcb(v);
-
     switch ( msr )
     {
     case MSR_IA32_SYSENTER_ESP:
@@ -2261,9 +2262,6 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         break;
     }
 
-    if ( sync )
-        svm_vmload(vmcb);
-
     return result;
 
  gpf:
@@ -2413,7 +2411,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
     put_page(page);
 
     /* State in L1 VMCB is stale now */
-    v->arch.hvm_svm.vmcb_in_sync = 0;
+    v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
 
     __update_guest_eip(regs, inst_len);
 }
@@ -2623,6 +2621,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     bool_t vcpu_guestmode = 0;
     struct vlapic *vlapic = vcpu_vlapic(v);
 
+    v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
     hvm_invalidate_regs_fields(regs);
 
     if ( paging_mode_hap(v->domain) )
@@ -3109,6 +3108,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     }
 
   out:
+    svm_sync_vmcb(v, vmcb_needs_vmsave);
+
     if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
         return;
 
@@ -3117,6 +3118,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     intr.fields.tpr =
         (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
     vmcb_set_vintr(vmcb, intr);
+    ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
 }
 
 void svm_trace_vmentry(void)
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index ae60d8dc1c..f2d0f4c0c2 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -84,6 +84,8 @@ static int construct_vmcb(struct vcpu *v)
                              CR_INTERCEPT_CR8_READ |
                              CR_INTERCEPT_CR8_WRITE);
 
+    arch_svm->vmcb_sync_state = vmcb_needs_vmload;
+
     /* I/O and MSR permission bitmaps. */
     arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
     if ( arch_svm->msrpm == NULL )
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index 7ad024cf37..eb7e77619a 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -102,7 +102,6 @@ void __dummy__(void)
 
     OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
     OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
-    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
     BLANK();
 
     OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h 
b/xen/include/asm-x86/hvm/svm/vmcb.h
index de07429dff..6add818e5c 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -495,12 +495,28 @@ struct vmcb_struct {
 struct svm_domain {
 };
 
+/*
+ * VMRUN doesn't switch fs/gs/tr/ldtr and SHADOWGS/SYSCALL/SYSENTER state.
+ * Therefore, guest state is in the hardware registers when servicing a
+ * VMExit.
+ *
+ * Immediately after a VMExit, the vmcb is stale, and needs to be brought
+ * into sync by VMSAVE.  If state in the vmcb is modified, a VMLOAD is
+ * needed before the following VMRUN.
+ */
+enum vmcb_sync_state {
+    vmcb_in_sync,
+    vmcb_needs_vmsave,    /* VMCB out of sync (VMSAVE needed)? */
+    vmcb_needs_vmload     /* VMCB dirty (VMLOAD needed)? */
+};
+
 struct arch_svm_struct {
     struct vmcb_struct *vmcb;
     u64    vmcb_pa;
     unsigned long *msrpm;
     int    launch_core;
-    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
+
+    uint8_t vmcb_sync_state; /* enum vmcb_sync_state */
 
     /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
     uint8_t cached_insn_len; /* Zero if no cached instruction. */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.