[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/svm: Fixes to OS Visible Workaround handling



commit c416666181684a28d9f11989b8b7ef4dfe2ab78b
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Feb 27 17:22:40 2018 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Aug 28 11:37:11 2018 +0100

    x86/svm: Fixes to OS Visible Workaround handling
    
    OSVW data is technically per-cpu, but it is the firmwares reponsibility to
    make it equivelent on each cpu.  A guests OSVW data is sourced from global
    data in Xen, clearly making it per-domain data rather than per-vcpu data.
    
    Move the data from struct arch_svm_struct to struct svm_domain, and call
    svm_guest_osvw_init() from svm_domain_initialise() instead of
    svm_vcpu_initialise().
    
    In svm_guest_osvw_init(), reading osvw_length and osvw_status must be done
    under the osvw_lock to avoid observing mismatched values.  The guests view 
of
    osvw_length also needs clipping at 64 as we only offer one status register 
(To
    date, 5 is the maximum index defined AFAICT).  Avoid opencoding max().
    
    Drop svm_handle_osvw() as it is shorter and simpler to implement the
    functionality inline in svm_msr_{read,write}_intercept().  As the OSVW MSRs
    are a contiguous block, we can access them as an array for simplicity.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         | 47 +++++++++++++++-----------------------
 xen/include/asm-x86/hvm/svm/vmcb.h | 14 +++++++-----
 2 files changed, 26 insertions(+), 35 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 37f782bc9b..a16f372a07 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1195,17 +1195,18 @@ void svm_vmenter_helper(const struct cpu_user_regs 
*regs)
     vmcb->rflags = regs->rflags | X86_EFLAGS_MBS;
 }
 
-static void svm_guest_osvw_init(struct vcpu *vcpu)
+static void svm_guest_osvw_init(struct domain *d)
 {
-    if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
-        return;
+    struct svm_domain *svm = &d->arch.hvm_domain.svm;
+
+    spin_lock(&osvw_lock);
 
     /*
      * Guests should see errata 400 and 415 as fixed (assuming that
      * HLT and IO instructions are intercepted).
      */
-    vcpu->arch.hvm_svm.osvw.length = (osvw_length >= 3) ? osvw_length : 3;
-    vcpu->arch.hvm_svm.osvw.status = osvw_status & ~(6ULL);
+    svm->osvw.length = min(max(3ul, osvw_length), 64ul);
+    svm->osvw.status = osvw_status & ~6;
 
     /*
      * By increasing VCPU's osvw.length to 3 we are telling the guest that
@@ -1216,7 +1217,9 @@ static void svm_guest_osvw_init(struct vcpu *vcpu)
      * is present (because we really don't know).
      */
     if ( osvw_length == 0 && boot_cpu_data.x86 == 0x10 )
-        vcpu->arch.hvm_svm.osvw.status |= 1;
+        svm->osvw.status |= 1;
+
+    spin_unlock(&osvw_lock);
 }
 
 void svm_host_osvw_reset()
@@ -1268,6 +1271,8 @@ static int svm_domain_initialise(struct domain *d)
 
     d->arch.ctxt_switch = &csw;
 
+    svm_guest_osvw_init(d);
+
     return 0;
 }
 
@@ -1289,8 +1294,6 @@ static int svm_vcpu_initialise(struct vcpu *v)
         return rc;
     }
 
-    svm_guest_osvw_init(v);
-
     return 0;
 }
 
@@ -1627,23 +1630,6 @@ static void svm_init_erratum_383(const struct 
cpuinfo_x86 *c)
     }
 }
 
-static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t 
read)
-{
-    if ( !v->domain->arch.cpuid->extd.osvw )
-        return -1;
-
-    if ( read )
-    {
-        if (msr == MSR_AMD_OSVW_ID_LENGTH)
-            *val = v->arch.hvm_svm.osvw.length;
-        else
-            *val = v->arch.hvm_svm.osvw.status;
-    }
-    /* Writes are ignored */
-
-    return 0;
-}
-
 static int _svm_cpu_up(bool bsp)
 {
     uint64_t msr_content;
@@ -1875,6 +1861,7 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
 {
     int ret;
     struct vcpu *v = current;
+    const struct domain *d = v->domain;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     switch ( msr )
@@ -2017,9 +2004,10 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
 
     case MSR_AMD_OSVW_ID_LENGTH:
     case MSR_AMD_OSVW_STATUS:
-        ret = svm_handle_osvw(v, msr, msr_content, 1);
-        if ( ret < 0 )
+        if ( !d->arch.cpuid->extd.osvw )
             goto gpf;
+        *msr_content =
+            d->arch.hvm_domain.svm.osvw.raw[msr - MSR_AMD_OSVW_ID_LENGTH];
         break;
 
     default:
@@ -2063,6 +2051,7 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 {
     int ret, result = X86EMUL_OKAY;
     struct vcpu *v = current;
+    struct domain *d = v->domain;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     switch ( msr )
@@ -2218,9 +2207,9 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 
     case MSR_AMD_OSVW_ID_LENGTH:
     case MSR_AMD_OSVW_STATUS:
-        ret = svm_handle_osvw(v, msr, &msr_content, 0);
-        if ( ret < 0 )
+        if ( !d->arch.cpuid->extd.osvw )
             goto gpf;
+        /* Write-discard */
         break;
 
     default:
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h 
b/xen/include/asm-x86/hvm/svm/vmcb.h
index 6add818e5c..f7974dab09 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -493,6 +493,14 @@ struct vmcb_struct {
 };
 
 struct svm_domain {
+    /* OSVW MSRs */
+    union {
+        uint64_t raw[2];
+        struct {
+            uint64_t length;
+            uint64_t status;
+        };
+    } osvw;
 };
 
 /*
@@ -536,12 +544,6 @@ struct arch_svm_struct {
 
     /* data breakpoint extension MSRs */
     uint32_t dr_mask[4];
-
-    /* OSVW MSRs */
-    struct {
-        u64 length;
-        u64 status;
-    } osvw;
 };
 
 struct vmcb_struct *alloc_vmcb(void);
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.