[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES



Their VMCS fields aren't tied to the respective base CPU feature flags
but instead to VMX specific ones.

Note that while the VMCS GUEST_BNDCFGS field exists if either of the
two respective features is available, MPX continues to get exposed to
guests only with both features present.

Also add the so far missing handling of
- GUEST_BNDCFGS in construct_vmcs()
- MSR_IA32_BNDCFGS in vmx_msr_{read,write}_intercept()
and mirror the extra correctness checks during MSR write to
vmx_load_msr().

Reported-by: "Rockosov, Dmitry" <dmitry.rockosov@xxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Tested-by: "Rockosov, Dmitry" <dmitry.rockosov@xxxxxxxxx>

--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -168,8 +168,7 @@ static void __init calculate_hvm_feature
      */
     if ( cpu_has_vmx )
     {
-        if ( !(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
-             !(vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS) )
+        if ( !cpu_has_vmx_mpx )
             __clear_bit(X86_FEATURE_MPX, hvm_featureset);
 
         if ( !cpu_has_vmx_xsaves )
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1261,6 +1261,8 @@ static int construct_vmcs(struct vcpu *v
         __vmwrite(HOST_PAT, host_pat);
         __vmwrite(GUEST_PAT, guest_pat);
     }
+    if ( cpu_has_vmx_mpx )
+        __vmwrite(GUEST_BNDCFGS, 0);
     if ( cpu_has_vmx_xsaves )
         __vmwrite(XSS_EXIT_BITMAP, 0);
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -788,14 +788,15 @@ static int vmx_load_vmcs_ctxt(struct vcp
 
 static unsigned int __init vmx_init_msr(void)
 {
-    return !!cpu_has_mpx + !!cpu_has_xsaves;
+    return (cpu_has_mpx && cpu_has_vmx_mpx) +
+           (cpu_has_xsaves && cpu_has_vmx_xsaves);
 }
 
 static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 {
     vmx_vmcs_enter(v);
 
-    if ( cpu_has_mpx )
+    if ( cpu_has_mpx && cpu_has_vmx_mpx )
     {
         __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
         if ( ctxt->msr[ctxt->count].val )
@@ -804,7 +805,7 @@ static void vmx_save_msr(struct vcpu *v,
 
     vmx_vmcs_exit(v);
 
-    if ( cpu_has_xsaves )
+    if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
     {
         ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
         if ( ctxt->msr[ctxt->count].val )
@@ -824,13 +825,15 @@ static int vmx_load_msr(struct vcpu *v,
         switch ( ctxt->msr[i].index )
         {
         case MSR_IA32_BNDCFGS:
-            if ( cpu_has_mpx )
+            if ( cpu_has_mpx && cpu_has_vmx_mpx &&
+                 is_canonical_address(ctxt->msr[i].val) &&
+                 !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
                 __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
             else if ( ctxt->msr[i].val )
                 err = -ENXIO;
             break;
         case MSR_IA32_XSS:
-            if ( cpu_has_xsaves )
+            if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
                 v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
             else
                 err = -ENXIO;
@@ -2643,6 +2646,11 @@ static int vmx_msr_read_intercept(unsign
     case MSR_IA32_DEBUGCTLMSR:
         __vmread(GUEST_IA32_DEBUGCTL, msr_content);
         break;
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx )
+            goto gp_fault;
+        __vmread(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
         if ( !nvmx_msr_read_intercept(msr, msr_content) )
@@ -2869,6 +2877,13 @@ static int vmx_msr_write_intercept(unsig
 
         break;
     }
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
+             !is_canonical_address(msr_content) ||
+             (msr_content & IA32_BNDCFGS_RESERVED) )
+            goto gp_fault;
+        __vmwrite(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
         if ( !nvmx_msr_write_intercept(msr, msr_content) )
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -375,6 +375,9 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
 #define cpu_has_vmx_pml \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_mpx \
+    ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
+     (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
 #define cpu_has_vmx_xsaves \
     (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
 #define cpu_has_vmx_tsc_scaling \
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -56,7 +56,10 @@
 #define MSR_IA32_DS_AREA               0x00000600
 #define MSR_IA32_PERF_CAPABILITIES     0x00000345
 
-#define MSR_IA32_BNDCFGS               0x00000D90
+#define MSR_IA32_BNDCFGS               0x00000d90
+#define IA32_BNDCFGS_ENABLE            0x00000001
+#define IA32_BNDCFGS_PRESERVE          0x00000002
+#define IA32_BNDCFGS_RESERVED          0x00000ffc
 
 #define MSR_IA32_XSS                   0x00000da0
 


Attachment: VMX-feature-checks.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.