|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [V8 3/4] x86/xsaves: enable xsaves/xrstors for hvm guest
This patch enables xsaves for hvm guest, includes:
1.handle xsaves vmcs init and vmexit.
2.add logic to write/read the XSS msr.
Signed-off-by: Shuai Ruan <shuai.ruan@xxxxxxxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 27 +++++++++++++++++++++++++++
xen/arch/x86/hvm/vmx/vmcs.c | 5 ++++-
xen/arch/x86/hvm/vmx/vmx.c | 20 ++++++++++++++++++++
xen/arch/x86/xstate.c | 4 ++--
xen/include/asm-x86/hvm/vmx/vmcs.h | 4 ++++
xen/include/asm-x86/hvm/vmx/vmx.h | 2 ++
xen/include/asm-x86/xstate.h | 1 +
7 files changed, 60 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0140d34..5f1d993 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4561,6 +4561,20 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
*ebx = _eax + _ebx;
}
}
+ if ( count == 1 )
+ {
+ if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
+ {
+ *ebx = XSTATE_AREA_MIN_SIZE;
+ if ( v->arch.xcr0 | v->arch.hvm_vcpu.msr_xss )
+ for ( sub_leaf = 2; sub_leaf < 63; sub_leaf++ )
+ if ( (v->arch.xcr0 | v->arch.hvm_vcpu.msr_xss)
+ & (1ULL << sub_leaf) )
+ *ebx += xstate_sizes[sub_leaf];
+ }
+ else
+ *ebx = *ecx = *edx = 0;
+ }
break;
case 0x80000001:
@@ -4660,6 +4674,12 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
*msr_content = v->arch.hvm_vcpu.guest_efer;
break;
+ case MSR_IA32_XSS:
+ if ( !cpu_has_xsaves )
+ goto gp_fault;
+ *msr_content = v->arch.hvm_vcpu.msr_xss;
+ break;
+
case MSR_IA32_TSC:
*msr_content = _hvm_rdtsc_intercept();
break;
@@ -4792,6 +4812,13 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
return X86EMUL_EXCEPTION;
break;
+ case MSR_IA32_XSS:
+ /* No XSS features currently supported for guests. */
+ if ( !cpu_has_xsaves || msr_content != 0 )
+ goto gp_fault;
+ v->arch.hvm_vcpu.msr_xss = msr_content;
+ break;
+
case MSR_IA32_TSC:
hvm_set_guest_tsc(v, msr_content);
break;
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 3592a88..7185d55 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -240,7 +240,8 @@ static int vmx_init_vmcs_config(void)
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_ENABLE_VM_FUNCTIONS |
- SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS);
+ SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS |
+ SECONDARY_EXEC_XSAVES);
rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap);
if ( _vmx_misc_cap & VMX_MISC_VMWRITE_ALL )
opt |= SECONDARY_EXEC_ENABLE_VMCS_SHADOWING;
@@ -1249,6 +1250,8 @@ static int construct_vmcs(struct vcpu *v)
__vmwrite(HOST_PAT, host_pat);
__vmwrite(GUEST_PAT, guest_pat);
}
+ if ( cpu_has_vmx_xsaves )
+ __vmwrite(XSS_EXIT_BITMAP, 0);
vmx_vmcs_exit(v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bbec0e8..5d723e8 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2852,6 +2852,18 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
}
}
+static void vmx_handle_xsaves(void)
+{
+ gdprintk(XENLOG_ERR, "xsaves should not cause vmexit\n");
+ domain_crash(current->domain);
+}
+
+static void vmx_handle_xrstors(void)
+{
+ gdprintk(XENLOG_ERR, "xrstors should not cause vmexit\n");
+ domain_crash(current->domain);
+}
+
static int vmx_handle_apic_write(void)
{
unsigned long exit_qualification;
@@ -3423,6 +3435,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
vmx_vcpu_flush_pml_buffer(v);
break;
+ case EXIT_REASON_XSAVES:
+ vmx_handle_xsaves();
+ break;
+
+ case EXIT_REASON_XRSTORS:
+ vmx_handle_xrstors();
+ break;
+
case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
case EXIT_REASON_ACCESS_LDTR_OR_TR:
case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED:
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index ce65c11..d2d2993 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -23,8 +23,8 @@ static u32 __read_mostly xsave_cntxt_size;
/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
u64 __read_mostly xfeature_mask;
-static unsigned int * __read_mostly xstate_offsets;
-static unsigned int * __read_mostly xstate_sizes;
+unsigned int * __read_mostly xstate_offsets;
+unsigned int * __read_mostly xstate_sizes;
static unsigned int __read_mostly xstate_features;
static unsigned int __read_mostly xstate_comp_offsets[sizeof(xfeature_mask)*8];
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index f1126d4..79c2c58 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -225,6 +225,7 @@ extern u32 vmx_vmentry_control;
#define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS 0x00040000
+#define SECONDARY_EXEC_XSAVES 0x00100000
extern u32 vmx_secondary_exec_control;
#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001
@@ -291,6 +292,8 @@ extern u32 vmx_secondary_exec_control;
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
#define cpu_has_vmx_pml \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_xsaves \
+ (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
#define VMCS_RID_TYPE_MASK 0x80000000
@@ -365,6 +368,7 @@ enum vmcs_field {
VMREAD_BITMAP = 0x00002026,
VMWRITE_BITMAP = 0x00002028,
VIRT_EXCEPTION_INFO = 0x0000202a,
+ XSS_EXIT_BITMAP = 0x0000202c,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
VMCS_LINK_POINTER = 0x00002800,
GUEST_IA32_DEBUGCTL = 0x00002802,
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 2ed62f9..cb66925 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -188,6 +188,8 @@ static inline unsigned long pi_get_pir(struct pi_desc
*pi_desc, int group)
#define EXIT_REASON_INVPCID 58
#define EXIT_REASON_VMFUNC 59
#define EXIT_REASON_PML_FULL 62
+#define EXIT_REASON_XSAVES 63
+#define EXIT_REASON_XRSTORS 64
/*
* Interruption-information format
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 414cc99..3de88bd 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -45,6 +45,7 @@
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
extern u64 xfeature_mask;
+extern unsigned int *xstate_offsets, *xstate_sizes;
/* extended state save area */
struct __packed __attribute__((aligned (64))) xsave_struct
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |