[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.7] x86/msr: Emulation of MSR_{SPEC_CTRL, PRED_CMD} for guests



commit b2b7fe128f6fbecf54e97cdd2d71923d0a852535
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Wed Feb 14 11:37:28 2018 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 14 11:37:28 2018 +0100

    x86/msr: Emulation of MSR_{SPEC_CTRL,PRED_CMD} for guests
    
    As per the spec currently available here:
    
    
https://software.intel.com/sites/default/files/managed/c5/63/336996-Speculative-Execution-Side-Channel-Mitigations.pdf
    
    MSR_ARCH_CAPABILITIES will only come into existence on new hardware, but is
    implemented as a straight #GP for now to avoid being leaky when new hardware
    arrives.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    master commit: ea58a679a6190e714a592f1369b660769a48a80c
    master date: 2018-01-26 14:10:21 +0000
---
 xen/arch/x86/hvm/hvm.c          | 52 ++++++++++++++++++++++++++++++++++++++++-
 xen/arch/x86/traps.c            | 52 +++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/domain.h    |  2 ++
 xen/include/asm-x86/msr-index.h |  2 ++
 4 files changed, 107 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 502f3dd..b6275df 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3791,6 +3791,21 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t 
*msr_content)
         *msr_content = var_range_base[index];
         break;
 
+    case MSR_PRED_CMD:
+        /* Write-only */
+        goto gp_fault;
+
+    case MSR_SPEC_CTRL:
+        hvm_cpuid(7, NULL, NULL, NULL, &edx);
+        if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+            goto gp_fault;
+        *msr_content = v->arch.spec_ctrl;
+        break;
+
+    case MSR_ARCH_CAPABILITIES:
+        /* Not implemented yet. */
+        goto gp_fault;
+
     case MSR_K8_ENABLE_C1E:
     case MSR_AMD64_NB_CFG:
          /*
@@ -3828,7 +3843,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content,
 {
     struct vcpu *v = current;
     bool_t mtrr;
-    unsigned int edx, index;
+    unsigned int edx, ebx, index;
     int ret = X86EMUL_OKAY;
     struct arch_domain *currad = &current->domain->arch;
 
@@ -3943,6 +3958,41 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content,
             goto gp_fault;
         break;
 
+    case MSR_SPEC_CTRL:
+        hvm_cpuid(7, NULL, NULL, NULL, &edx);
+        if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+            goto gp_fault; /* MSR available? */
+
+        /*
+         * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+         * when STIBP isn't enumerated in hardware.
+         */
+
+        if ( msr_content & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+            goto gp_fault; /* Rsvd bit set? */
+
+        v->arch.spec_ctrl = msr_content;
+        break;
+
+    case MSR_PRED_CMD:
+        hvm_cpuid(7, NULL, NULL, NULL, &edx);
+        hvm_cpuid(0x80000008, NULL, &ebx, NULL, NULL);
+        if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+             !(ebx & cpufeat_mask(X86_FEATURE_IBPB)) )
+            goto gp_fault; /* MSR available? */
+
+        /*
+         * The only defined behaviour is when writing PRED_CMD_IBPB.  In
+         * practice, real hardware accepts any value without faulting.
+         */
+        if ( msr_content & PRED_CMD_IBPB )
+            wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+        break;
+
+    case MSR_ARCH_CAPABILITIES:
+        /* Read-only */
+        goto gp_fault;
+
     case MSR_AMD64_NB_CFG:
         /* ignore the write */
         break;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 6d030e5..c15e026 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2742,6 +2742,8 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
         vpmu_msr = 0;
         switch ( regs->_ecx )
         {
+            uint32_t ebx, dummy;
+
         case MSR_FS_BASE:
             if ( is_pv_32bit_domain(currd) ||
                  !is_canonical_address(msr_content) )
@@ -2881,9 +2883,41 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             break;
 
         case MSR_INTEL_PLATFORM_INFO:
+        case MSR_ARCH_CAPABILITIES:
             /* The MSR is read-only. */
             goto fail;
 
+        case MSR_SPEC_CTRL:
+            domain_cpuid(currd, 7, 0, &dummy, &dummy, &dummy, &edx);
+            if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+                goto fail; /* MSR available? */
+
+            /*
+             * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+             * when STIBP isn't enumerated in hardware.
+             */
+
+            if ( eax & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+                goto fail; /* Rsvd bit set? */
+
+            v->arch.spec_ctrl = eax;
+            break;
+
+        case MSR_PRED_CMD:
+            domain_cpuid(currd, 7, 0, &dummy, &dummy, &dummy, &edx);
+            domain_cpuid(currd, 0x80000008, 0, &dummy, &ebx, &dummy, &dummy);
+            if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+                 !(ebx & cpufeat_mask(X86_FEATURE_IBPB)) )
+                goto fail; /* MSR available? */
+
+            /*
+             * The only defined behaviour is when writing PRED_CMD_IBPB.  In
+             * practice, real hardware accepts any value without faulting.
+             */
+            if ( eax & PRED_CMD_IBPB )
+                wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+            break;
+
         case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
         case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
         case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
@@ -2942,6 +2976,8 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
         vpmu_msr = 0;
         switch ( regs->_ecx )
         {
+            uint32_t edx, dummy;
+
         case MSR_FS_BASE:
             if ( is_pv_32bit_domain(currd) )
                 goto fail;
@@ -3012,12 +3048,28 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             regs->eax = regs->edx = 0;
             break;
 
+        case MSR_PRED_CMD:
+            /* Write-only */
+            goto fail;
+
+        case MSR_SPEC_CTRL:
+            domain_cpuid(currd, 7, 0, &dummy, &dummy, &dummy, &edx);
+            if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+                goto fail;
+            regs->eax = v->arch.spec_ctrl;
+            regs->edx = 0;
+            break;
+
         case MSR_INTEL_PLATFORM_INFO:
             if ( !boot_cpu_has(X86_FEATURE_MSR_PLATFORM_INFO) )
                 goto fail;
             regs->eax = regs->edx = 0;
             break;
 
+        case MSR_ARCH_CAPABILITIES:
+            /* Not implemented yet. */
+            goto fail;
+
         case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
         case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
         case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 643693e..af80e7d 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -571,6 +571,8 @@ struct arch_vcpu
 
     struct paging_vcpu paging;
 
+    uint32_t spec_ctrl;
+
     uint32_t gdbsx_vcpu_event;
 
     /* A secondary copy of the vcpu time info. */
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 96d3eb0..0fd34ed 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -39,6 +39,8 @@
 #define MSR_PRED_CMD                   0x00000049
 #define PRED_CMD_IBPB                  (_AC(1, ULL) << 0)
 
+#define MSR_ARCH_CAPABILITIES          0x0000010a
+
 /* Intel MSRs. Some also available on other CPUs */
 #define MSR_IA32_PERFCTR0              0x000000c1
 #define MSR_IA32_A_PERFCTR0            0x000004c1
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.7

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.