[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/svm: Drop the suggestion of Long Mode Segment Limit support



commit 8d13ba787c2963f9e3f100d791c5915389c957ec
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Jul 20 15:43:49 2018 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Jul 24 11:25:53 2018 +0100

    x86/svm: Drop the suggestion of Long Mode Segment Limit support
    
    Because of a bug in 2010, LMSL support isn't available to guests.
    
    c/s f2c608444 noticed but avoided fixing the issue for migration reasons.  
In
    addition to migration problems, changes to the segmentation logic for
    emulation would be needed before the feature could be enabled.
    
    This feature is entirely unused by operating systems (probably owing to its
    semantics which only cover half the segment registers), and no one has
    commented on its absence from Xen.  As supporting it would involve a large
    amount of effort, it seems better to remove the code entirely.
    
    If someone finds a valid usecase, we can resurrecting the code and
    implementing the remaining parts, but I doubt anyone will.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          |  3 ---
 xen/arch/x86/hvm/svm/svm.c      | 23 -----------------------
 xen/arch/x86/pv/emul-priv-op.c  |  2 +-
 xen/include/asm-x86/hvm/hvm.h   |  1 -
 xen/include/asm-x86/msr-index.h |  4 +---
 5 files changed, 2 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c099c617e8..67b99af334 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -925,9 +925,6 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t 
value,
     if ( (value & EFER_SVME) && (!p->extd.svm || !nestedhvm_enabled(d)) )
         return "SVME without nested virt";
 
-    if ( (value & EFER_LMSLE) && !cpu_has_lmsl )
-        return "LMSLE without support";
-
     if ( (value & EFER_FFXSE) && !p->extd.ffxsr )
         return "FFXSE without feature";
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 8acd0d0963..37f782bc9b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -67,9 +67,6 @@ void svm_asm_do_resume(void);
 
 u32 svm_feature_flags;
 
-/* Indicates whether guests may use EFER.LMSLE. */
-bool_t cpu_has_lmsl;
-
 static void svm_update_guest_efer(struct vcpu *);
 
 static struct hvm_function_table svm_function_table;
@@ -1676,26 +1673,6 @@ static int _svm_cpu_up(bool bsp)
     /* Initialize core's ASID handling. */
     svm_asid_init(c);
 
-    /*
-     * Check whether EFER.LMSLE can be written.
-     * Unfortunately there's no feature bit defined for this.
-     */
-    msr_content = read_efer();
-    if ( wrmsr_safe(MSR_EFER, msr_content | EFER_LMSLE) == 0 )
-        rdmsrl(MSR_EFER, msr_content);
-    if ( msr_content & EFER_LMSLE )
-    {
-        if ( 0 && /* FIXME: Migration! */ bsp )
-            cpu_has_lmsl = 1;
-        wrmsrl(MSR_EFER, msr_content ^ EFER_LMSLE);
-    }
-    else
-    {
-        if ( cpu_has_lmsl )
-            printk(XENLOG_WARNING "Inconsistent LMSLE support across CPUs!\n");
-        cpu_has_lmsl = 0;
-    }
-
     /* Initialize OSVW bits to be used by guests */
     svm_host_osvw_init();
 
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index ce2ec76cde..84f22ae988 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -874,7 +874,7 @@ static int read_msr(unsigned int reg, uint64_t *val,
          * vendor-dependent behaviour.
          */
         if ( is_pv_32bit_domain(currd) )
-            *val &= ~(EFER_LME | EFER_LMA | EFER_LMSLE |
+            *val &= ~(EFER_LME | EFER_LMA |
                       (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
                        ? EFER_SCE : 0));
         return X86EMUL_OKAY;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1ee273b075..4f720ade4b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -236,7 +236,6 @@ struct hvm_function_table {
 
 extern struct hvm_function_table hvm_funcs;
 extern bool_t hvm_enabled;
-extern bool_t cpu_has_lmsl;
 extern s8 hvm_port80_allowed;
 
 extern const struct hvm_function_table *start_svm(void);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 94bccf73a1..85efaab442 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -20,7 +20,6 @@
 #define _EFER_LMA              10 /* Long mode active (read-only) */
 #define _EFER_NX               11 /* No execute enable */
 #define _EFER_SVME             12 /* AMD: SVM enable */
-#define _EFER_LMSLE            13 /* AMD: Long-mode segment limit enable */
 #define _EFER_FFXSE            14 /* AMD: Fast FXSAVE/FXRSTOR enable */
 
 #define EFER_SCE               (1<<_EFER_SCE)
@@ -28,11 +27,10 @@
 #define EFER_LMA               (1<<_EFER_LMA)
 #define EFER_NX                        (1<<_EFER_NX)
 #define EFER_SVME              (1<<_EFER_SVME)
-#define EFER_LMSLE             (1<<_EFER_LMSLE)
 #define EFER_FFXSE             (1<<_EFER_FFXSE)
 
 #define EFER_KNOWN_MASK                (EFER_SCE | EFER_LME | EFER_LMA | 
EFER_NX | \
-                                EFER_SVME | EFER_LMSLE | EFER_FFXSE)
+                                EFER_SVME | EFER_FFXSE)
 
 /* Speculation Controls. */
 #define MSR_SPEC_CTRL                  0x00000048
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.