[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] svm: support EFER.LMSLE for guests
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1273142634 -3600 # Node ID 69c85f5b0a07e7a95945d117ea478a80d21c6b9e # Parent 26da9bb87405c64c02def8d5f11c66f15847bd02 svm: support EFER.LMSLE for guests Now that the feature is officially documented (see http://support.amd.com/us/Processor_TechDocs/24593.pdf), I think it makes sense to also allow HVM guests to make use of it. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 7 +++++-- xen/arch/x86/hvm/svm/svm.c | 26 ++++++++++++++++++++++++++ xen/include/asm-x86/hvm/hvm.h | 1 + 3 files changed, 32 insertions(+), 2 deletions(-) diff -r 26da9bb87405 -r 69c85f5b0a07 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Thu May 06 11:39:09 2010 +0100 +++ b/xen/arch/x86/hvm/hvm.c Thu May 06 11:43:54 2010 +0100 @@ -590,6 +590,7 @@ static bool_t hvm_efer_valid(uint64_t va ((sizeof(long) != 8) && (value & EFER_LME)) || (!cpu_has_nx && (value & EFER_NX)) || (!cpu_has_syscall && (value & EFER_SCE)) || + (!cpu_has_lmsl && (value & EFER_LMSLE)) || (!cpu_has_ffxsr && (value & EFER_FFXSE)) || ((value & (EFER_LME|EFER_LMA)) == EFER_LMA)); } @@ -641,7 +642,8 @@ static int hvm_load_cpu_ctxt(struct doma } if ( !hvm_efer_valid( - ctxt.msr_efer, EFER_FFXSE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) + ctxt.msr_efer, + EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) { gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n", ctxt.msr_efer); @@ -995,7 +997,8 @@ int hvm_set_efer(uint64_t value) value &= ~EFER_LMA; - if ( !hvm_efer_valid(value, EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE) ) + if ( !hvm_efer_valid(value, + EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE) ) { gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " "EFER: %"PRIx64"\n", value); diff -r 26da9bb87405 -r 69c85f5b0a07 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu May 06 11:39:09 2010 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Thu May 06 11:43:54 2010 +0100 @@ -57,6 +57,9 @@ u32 svm_feature_flags; +/* Indicates whether guests may use EFER.LMSLE. */ +bool_t cpu_has_lmsl; + #define set_segment_register(name, value) \ asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) ) @@ -847,6 +850,29 @@ static int svm_cpu_up(struct cpuinfo_x86 /* Initialize core's ASID handling. */ svm_asid_init(c); + +#ifdef __x86_64__ + /* + * Check whether EFER.LMSLE can be written. + * Unfortunately there's no feature bit defined for this. + */ + eax = read_efer(); + edx = read_efer() >> 32; + if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 ) + rdmsr(MSR_EFER, eax, edx); + if ( eax & EFER_LMSLE ) + { + if ( c == &boot_cpu_data ) + cpu_has_lmsl = 1; + wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx); + } + else + { + if ( cpu_has_lmsl ) + printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n"); + cpu_has_lmsl = 0; + } +#endif return 1; } diff -r 26da9bb87405 -r 69c85f5b0a07 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Thu May 06 11:39:09 2010 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Thu May 06 11:43:54 2010 +0100 @@ -135,6 +135,7 @@ struct hvm_function_table { extern struct hvm_function_table hvm_funcs; extern int hvm_enabled; +extern bool_t cpu_has_lmsl; int hvm_domain_initialise(struct domain *d); void hvm_domain_relinquish_resources(struct domain *d); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |