[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/svm: Fixes to nested-svm MSR handing



The intention of this patch was to remove the calls to nsvm_{rd,wr}msr() from
the default cases of svm_msr_{read,write}_intercept(), but it has turned into
a more corrective patch than just code motion.

First, collect the VM_CR bit definitions next to the main define, and simplify
the naming.  The SVM MSRs should be entirely unavailable when SVM isn't
enumerated in CPUID.

When SVM is available, Xen only supports the "Enabled" mode as described in
the BKGD/PPRs, which means VM_CR.LOCK should be set.  This in turn means the
MSR_SVM_LOCK_KEY should be implemented.  It is read-as-0/write-discard as Xen
doesn't implement the "disabled with user supplied key" mode.

The correct reset value for the HSAVE address is 0, not ~0.  Xen doesn't use
the guest frame for anything, and doesn't implement TSEG/ASEG/HyperTransport
in the guest physical address space.  Drop the arbitrary upper bounds check
which appears to be an incorrect attempt to exclude the HT range below the 4G
boundary, and accept any frame within MAXPHYSADDR.  When we get a usable
physmap layout in Xen, this should be restricted to RAM pages.

Remove the now-unused ret variables from the intercept functions, as well as
the unnecessary result variable in svm_msr_write_intercept().

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/nestedsvm.c        | 79 +------------------------------
 xen/arch/x86/hvm/svm/svm.c              | 82 +++++++++++++++++++++++++--------
 xen/include/asm-x86/hvm/svm/nestedsvm.h |  4 --
 xen/include/asm-x86/msr-index.h         | 13 +++---
 4 files changed, 72 insertions(+), 106 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 9660202..6357bea 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -47,22 +47,6 @@ nestedsvm_vcpu_stgi(struct vcpu *v)
     local_event_delivery_enable(); /* unmask events for PV drivers */
 }
 
-static int
-nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr)
-{
-    /* Address must be 4k aligned */
-    if ( (vmcxaddr & ~PAGE_MASK) != 0 )
-        return 0;
-
-    /* Maximum valid physical address.
-     * See AMD BKDG for HSAVE_PA MSR.
-     */
-    if ( vmcxaddr > 0xfd00000000ULL )
-        return 0;
-
-    return 1;
-}
-
 int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
@@ -164,7 +148,7 @@ int nsvm_vcpu_reset(struct vcpu *v)
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
 
-    svm->ns_msr_hsavepa = INVALID_PADDR;
+    svm->ns_msr_hsavepa = 0;
     svm->ns_ovvmcb_pa = INVALID_PADDR;
 
     svm->ns_tscratio = DEFAULT_TSC_RATIO;
@@ -1267,67 +1251,6 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
     return hvm_intblk_none;
 }
 
-/* MSR handling */
-int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
-{
-    struct nestedsvm *svm = &vcpu_nestedsvm(v);
-    int ret = 1;
-
-    *msr_content = 0;
-
-    switch (msr) {
-    case MSR_K8_VM_CR:
-        break;
-    case MSR_K8_VM_HSAVE_PA:
-        *msr_content = svm->ns_msr_hsavepa;
-        break;
-    case MSR_AMD64_TSC_RATIO:
-        *msr_content = svm->ns_tscratio;
-        break;
-    default:
-        ret = 0;
-        break;
-    }
-
-    return ret;
-}
-
-int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content)
-{
-    int ret = 1;
-    struct nestedsvm *svm = &vcpu_nestedsvm(v);
-
-    switch (msr) {
-    case MSR_K8_VM_CR:
-        /* ignore write. handle all bits as read-only. */
-        break;
-    case MSR_K8_VM_HSAVE_PA:
-        if (!nestedsvm_vmcb_isvalid(v, msr_content)) {
-            gdprintk(XENLOG_ERR,
-                "MSR_K8_VM_HSAVE_PA value invalid %#"PRIx64"\n", msr_content);
-            ret = -1; /* inject #GP */
-            break;
-        }
-        svm->ns_msr_hsavepa = msr_content;
-        break;
-    case MSR_AMD64_TSC_RATIO:
-        if ((msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content) {
-            gdprintk(XENLOG_ERR,
-                "reserved bits set in MSR_AMD64_TSC_RATIO %#"PRIx64"\n",
-                msr_content);
-            ret = -1; /* inject #GP */
-            break;
-        }
-        svm->ns_tscratio = msr_content;
-        break;
-    default:
-        ret = 0;
-        break;
-    }
-
-    return ret;
-}
-
 /* VMEXIT emulation */
 void
 nestedsvm_vmexit_defer(struct vcpu *v,
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b9a8900..fcedf3d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1675,8 +1675,8 @@ static int _svm_cpu_up(bool bsp)
     const struct cpuinfo_x86 *c = &cpu_data[cpu];
  
     /* Check whether SVM feature is disabled in BIOS */
-    rdmsrl(MSR_K8_VM_CR, msr_content);
-    if ( msr_content & K8_VMCR_SVME_DISABLE )
+    rdmsrl(MSR_VM_CR, msr_content);
+    if ( msr_content & VM_CR_SVME_DISABLE )
     {
         printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu);
         return -EINVAL;
@@ -1688,7 +1688,7 @@ static int _svm_cpu_up(bool bsp)
     write_efer(read_efer() | EFER_SVME);
 
     /* Initialize the HSA for this core. */
-    wrmsrl(MSR_K8_VM_HSAVE_PA, per_cpu(hsa, cpu));
+    wrmsrl(MSR_VM_HSAVE_ADDR, per_cpu(hsa, cpu));
 
     /* check for erratum 383 */
     svm_init_erratum_383(c);
@@ -1898,10 +1898,11 @@ static void svm_dr_access(struct vcpu *v, struct 
cpu_user_regs *regs)
 
 static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
 {
-    int ret;
     struct vcpu *v = current;
     const struct domain *d = v->domain;
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+    const struct nestedsvm *nsvm = &vcpu_nestedsvm(v);
+    const struct cpuid_policy *cp = d->arch.cpuid;
 
     switch ( msr )
     {
@@ -2028,6 +2029,34 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
             goto gpf;
         break;
 
+    case MSR_VM_CR:
+        if ( !cp->extd.svm )
+            goto gpf;
+
+        *msr_content = VM_CR_LOCK;
+        break;
+
+    case MSR_VM_HSAVE_ADDR:
+        if ( !cp->extd.svm )
+            goto gpf;
+
+        *msr_content = nsvm->ns_msr_hsavepa;
+        break;
+
+    case MSR_SVM_LOCK_KEY:
+        if ( !cp->extd.svm )
+            goto gpf;
+
+        *msr_content = 0;
+        break;
+
+    case MSR_AMD64_TSC_RATIO:
+        if ( !cp->extd.svm )
+            goto gpf;
+
+        *msr_content = nsvm->ns_tscratio;
+        break;
+
     case MSR_AMD_OSVW_ID_LENGTH:
     case MSR_AMD_OSVW_STATUS:
         if ( !d->arch.cpuid->extd.osvw )
@@ -2036,12 +2065,6 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
         break;
 
     default:
-        ret = nsvm_rdmsr(v, msr, msr_content);
-        if ( ret < 0 )
-            goto gpf;
-        else if ( ret )
-            break;
-
         if ( rdmsr_safe(msr, *msr_content) == 0 )
             break;
 
@@ -2070,10 +2093,11 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
 
 static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 {
-    int ret, result = X86EMUL_OKAY;
     struct vcpu *v = current;
     struct domain *d = v->domain;
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+    struct nestedsvm *nsvm = &vcpu_nestedsvm(v);
+    const struct cpuid_policy *cp = d->arch.cpuid;
 
     switch ( msr )
     {
@@ -2204,6 +2228,34 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
             goto gpf;
         break;
 
+    case MSR_VM_CR:
+        /* Xen always sets the Lock bit, making this MSR read-only. */
+        goto gpf;
+
+    case MSR_VM_HSAVE_ADDR:
+        if ( !cp->extd.svm || (msr_content & ~PAGE_MASK) ||
+             !gfn_valid(d, gaddr_to_gfn(msr_content)) )
+            goto gpf;
+        /*
+         * Xen doesn't actually use the guest-provided HSAVE area for
+         * anything.  Just remember what the guest wrote.
+         */
+        nsvm->ns_msr_hsavepa = msr_content;
+        break;
+
+    case MSR_SVM_LOCK_KEY:
+        if ( !cp->extd.svm )
+            goto gpf;
+        /* Write discard. SVM-Lock not implemented. */
+        break;
+
+    case MSR_AMD64_TSC_RATIO:
+        if ( !cp->extd.svm || (msr_content & TSC_RATIO_RSVD_BITS) )
+            goto gpf;
+
+        nsvm->ns_tscratio = msr_content;
+        break;
+
     case MSR_IA32_MCx_MISC(4): /* Threshold register */
     case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:
         /*
@@ -2221,12 +2273,6 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         break;
 
     default:
-        ret = nsvm_wrmsr(v, msr, msr_content);
-        if ( ret < 0 )
-            goto gpf;
-        else if ( ret )
-            break;
-
         /* Match up with the RDMSR side; ultimately this should go away. */
         if ( rdmsr_safe(msr, msr_content) == 0 )
             break;
@@ -2234,7 +2280,7 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         goto gpf;
     }
 
-    return result;
+    return X86EMUL_OKAY;
 
  gpf:
     return X86EMUL_EXCEPTION;
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h 
b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index 31fb4bf..0873698 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -118,10 +118,6 @@ bool_t nsvm_vmcb_guest_intercepts_event(
 bool_t nsvm_vmcb_hap_enabled(struct vcpu *v);
 enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
 
-/* MSRs */
-int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
-int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content);
-
 /* Interrupts, vGIF */
 void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
 void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 24d783a..4f43a16 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -199,8 +199,13 @@
 #define MSR_K8_PSTATE6                 0xc001006A
 #define MSR_K8_PSTATE7                 0xc001006B
 #define MSR_K8_ENABLE_C1E              0xc0010055
-#define MSR_K8_VM_CR                   0xc0010114
-#define MSR_K8_VM_HSAVE_PA             0xc0010117
+
+#define MSR_VM_CR                       0xc0010114
+#define  VM_CR_LOCK                     (_AC(1, ULL) << 3)
+#define  VM_CR_SVME_DISABLE             (_AC(1, ULL) << 4)
+
+#define MSR_VM_HSAVE_ADDR               0xc0010117
+#define MSR_SVM_LOCK_KEY                0xc0010118
 
 #define MSR_AMD_FAM15H_EVNTSEL0                0xc0010200
 #define MSR_AMD_FAM15H_PERFCTR0                0xc0010201
@@ -220,10 +225,6 @@
 #define MSR_K8_FEATURE_MASK            0xc0011004
 #define MSR_K8_EXT_FEATURE_MASK                0xc0011005
 
-/* MSR_K8_VM_CR bits: */
-#define _K8_VMCR_SVME_DISABLE          4
-#define K8_VMCR_SVME_DISABLE           (1 << _K8_VMCR_SVME_DISABLE)
-
 /* AMD64 MSRs */
 #define MSR_AMD64_NB_CFG               0xc001001f
 #define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT        46
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.