[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 2/3] x86/svm: add EFER SVME support for VGIF/VLOAD



Only enable virtual VMLOAD/SAVE and VGIF if the guest EFER.SVME is set.

Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Brian Woods <brian.woods@xxxxxxx>
---
 xen/arch/x86/hvm/svm/nestedsvm.c        | 66 +++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c              |  6 +++
 xen/arch/x86/hvm/svm/vmcb.c             | 17 ---------
 xen/include/asm-x86/hvm/svm/nestedsvm.h |  1 +
 4 files changed, 73 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 1f7b0d3e88..9295e583d7 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1659,3 +1659,69 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, 
struct vcpu *v)
 
     __update_guest_eip(regs, inst_len);
 }
+
+/*
+ * This runs on EFER change to see if nested features need to either be
+ * turned off or on.
+ */
+void svm_nested_features_on_efer_update(struct vcpu *v)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct nestedsvm *svm = &vcpu_nestedsvm(v);
+    u32 general2_intercepts;
+    vintr_t vintr;
+
+    /*
+     * Need state for transfering the nested gif status so only write on
+     * the hvm_vcpu EFER.SVME changing.
+     */
+    if ( v->arch.hvm_vcpu.guest_efer & EFER_SVME )
+    {
+        if ( !vmcb->virt_ext.fields.vloadsave_enable &&
+             paging_mode_hap(v->domain) &&
+             cpu_has_svm_vloadsave )
+        {
+            vmcb->virt_ext.fields.vloadsave_enable = 1;
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts &= ~(GENERAL2_INTERCEPT_VMLOAD |
+                                     GENERAL2_INTERCEPT_VMSAVE);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+
+        if ( !vmcb->_vintr.fields.vgif_enable &&
+             cpu_has_svm_vgif )
+        {
+            vintr = vmcb_get_vintr(vmcb);
+            vintr.fields.vgif = svm->ns_gif;
+            vintr.fields.vgif_enable = 1;
+            vmcb_set_vintr(vmcb, vintr);
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts &= ~(GENERAL2_INTERCEPT_STGI |
+                                     GENERAL2_INTERCEPT_CLGI);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+    }
+    else
+    {
+        if ( vmcb->virt_ext.fields.vloadsave_enable )
+        {
+            vmcb->virt_ext.fields.vloadsave_enable = 0;
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts |= (GENERAL2_INTERCEPT_VMLOAD |
+                                    GENERAL2_INTERCEPT_VMSAVE);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+
+        if ( vmcb->_vintr.fields.vgif_enable )
+        {
+            vintr = vmcb_get_vintr(vmcb);
+            svm->ns_gif = vintr.fields.vgif;
+            vintr.fields.vgif_enable = 0;
+            vmcb_set_vintr(vmcb, vintr);
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts |= (GENERAL2_INTERCEPT_STGI |
+                                    GENERAL2_INTERCEPT_CLGI);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+    }
+}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c48fdfaa5d..be08a5aa5e 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -611,6 +611,12 @@ static void svm_update_guest_efer(struct vcpu *v)
     if ( lma )
         new_efer |= EFER_LME;
     vmcb_set_efer(vmcb, new_efer);
+
+    if ( !nestedhvm_enabled(v->domain) )
+        ASSERT(!(v->arch.hvm_vcpu.guest_efer & EFER_SVME));
+
+    if ( nestedhvm_enabled(v->domain) )
+        svm_nested_features_on_efer_update(v);
 }
 
 static void svm_cpuid_policy_changed(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 0e6cba5b7b..997e7597e0 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -200,29 +200,12 @@ static int construct_vmcb(struct vcpu *v)
 
         /* PAT is under complete control of SVM when using nested paging. */
         svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
-
-        /* Use virtual VMLOAD/VMSAVE if available. */
-        if ( cpu_has_svm_vloadsave )
-        {
-            vmcb->virt_ext.fields.vloadsave_enable = 1;
-            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMLOAD;
-            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMSAVE;
-        }
     }
     else
     {
         vmcb->_exception_intercepts |= (1U << TRAP_page_fault);
     }
 
-    /* if available, enable and configure virtual gif */
-    if ( cpu_has_svm_vgif )
-    {
-        vmcb->_vintr.fields.vgif = 1;
-        vmcb->_vintr.fields.vgif_enable = 1;
-        vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_STGI;
-        vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_CLGI;
-    }
-
     if ( cpu_has_pause_filter )
     {
         vmcb->_pause_filter_count = SVM_PAUSEFILTER_INIT;
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h 
b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index a619b6131b..abcf2e7c9c 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -104,6 +104,7 @@ nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs 
*regs);
 enum nestedhvm_vmexits
 nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs,
     uint64_t exitcode);
+void svm_nested_features_on_efer_update(struct vcpu *v);
 
 /* Interface methods */
 void nsvm_vcpu_destroy(struct vcpu *v);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.