|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [V6 1/4] x86/xsaves: add basic definitions/helpers to support xsaves
This patch add basic definitions/helpers which will be used in
later patches.
Signed-off-by: Shuai Ruan <shuai.ruan@xxxxxxxxxxxxxxx>
---
xen/arch/x86/xstate.c | 16 ++++++++++++++++
xen/include/asm-x86/hvm/vcpu.h | 1 +
xen/include/asm-x86/msr-index.h | 2 ++
xen/include/asm-x86/xstate.h | 25 ++++++++++++++++++++++++-
4 files changed, 43 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 9ddff90..730368a 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -23,6 +23,11 @@ static u32 __read_mostly xsave_cntxt_size;
/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
u64 __read_mostly xfeature_mask;
+unsigned int * __read_mostly xstate_offsets;
+unsigned int * __read_mostly xstate_sizes;
+
+/* Cached xss for fast read */
+static DEFINE_PER_CPU(uint64_t, xss);
/* Cached xcr0 for fast read */
static DEFINE_PER_CPU(uint64_t, xcr0);
@@ -60,6 +65,17 @@ uint64_t get_xcr0(void)
return this_cpu(xcr0);
}
+void set_msr_xss(u64 xss)
+{
+ wrmsrl(MSR_IA32_XSS, xss);
+ this_cpu(xss) = xss;
+}
+
+uint64_t get_msr_xss(void)
+{
+ return this_cpu(xss);
+}
+
void xsave(struct vcpu *v, uint64_t mask)
{
struct xsave_struct *ptr = v->arch.xsave_area;
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index f553814..de81f8a 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -173,6 +173,7 @@ struct hvm_vcpu {
u32 msr_tsc_aux;
u64 msr_tsc_adjust;
+ u64 msr_xss;
union {
struct arch_vmx_struct vmx;
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index e9c4723..4e5b31f 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -58,6 +58,8 @@
#define MSR_IA32_BNDCFGS 0x00000D90
+#define MSR_IA32_XSS 0x00000da0
+
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index f0d8f0b..f23435a 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -19,7 +19,11 @@
#define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */
+#define XSAVE_HDR_SIZE 64
+#define XSAVE_SSE_OFFSET 160
#define XSTATE_YMM_SIZE 256
+#define FXSAVE_SIZE 512
+#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSTATE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */
#define XSTATE_FP (1ULL << 0)
@@ -38,8 +42,24 @@
#define XSTATE_ALL (~(1ULL << 63))
#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS | XSTATE_BNDCSR)
#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY)
+#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
+
+#define XSTATE_FIXUP ".section .fixup,\"ax\" \n" \
+ "2: mov %5,%%ecx \n" \
+ " xor %1,%1 \n" \
+ " rep stosb \n" \
+ " lea %2,%0 \n" \
+ " mov %3,%1 \n" \
+ " jmp 1b \n" \
+ ".previous \n" \
+ _ASM_EXTABLE(1b, 2b) \
+ : "+&D" (ptr), "+&a" (lmask) \
+ : "m" (*ptr), "g" (lmask), "d" (hmask), \
+ "m" (xsave_cntxt_size) \
+ : "ecx"
extern u64 xfeature_mask;
+extern unsigned int *xstate_offsets, *xstate_sizes;
/* extended state save area */
struct __packed __attribute__((aligned (64))) xsave_struct
@@ -68,7 +88,8 @@ struct __packed __attribute__((aligned (64))) xsave_struct
struct {
u64 xstate_bv;
- u64 reserved[7];
+ u64 xcomp_bv;
+ u64 reserved[6];
} xsave_hdr; /* The 64-byte header */
struct { char x[XSTATE_YMM_SIZE]; } ymm; /* YMM */
@@ -78,6 +99,8 @@ struct __packed __attribute__((aligned (64))) xsave_struct
/* extended state operations */
bool_t __must_check set_xcr0(u64 xfeatures);
uint64_t get_xcr0(void);
+void set_msr_xss(u64 xss);
+uint64_t get_msr_xss(void);
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |