[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH XTF] also test AVX exceptions



... as they're different from SSE and FPU ones.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/include/arch/x86/cpuid.h
+++ b/include/arch/x86/cpuid.h
@@ -75,6 +75,7 @@ static inline bool cpu_has(unsigned int
 #define cpu_has_smx             cpu_has(X86_FEATURE_SMX)
 #define cpu_has_pcid            cpu_has(X86_FEATURE_PCID)
 #define cpu_has_xsave           cpu_has(X86_FEATURE_XSAVE)
+#define cpu_has_avx             cpu_has(X86_FEATURE_AVX)
 
 #define cpu_has_syscall         cpu_has(X86_FEATURE_SYSCALL)
 #define cpu_has_nx              cpu_has(X86_FEATURE_NX)
--- a/include/arch/x86/lib.h
+++ b/include/arch/x86/lib.h
@@ -396,6 +396,33 @@ static inline unsigned int str(void)
     return sel;
 }
 
+static inline uint64_t xgetbv(uint32_t index)
+{
+    uint32_t feat_lo;
+    uint64_t feat_hi;
+
+    asm volatile ("xgetbv" : "=a" (feat_lo), "=d" (feat_hi)
+                           :  "c" (index) );
+
+    return feat_lo | (feat_hi << 32);
+}
+
+static inline void xsetbv(uint32_t index, uint64_t value)
+{
+    asm volatile ("xsetbv" :: "a" ((uint32_t)value), "d" (value >> 32),
+                              "c" (index) );
+}
+
+static inline uint64_t read_xcr0(void)
+{
+    return xgetbv(0);
+}
+
+static inline void write_xcr0(uint64_t xcr0)
+{
+    xsetbv(0, xcr0);
+}
+
 #endif /* XTF_X86_LIB_H */
 
 /*
--- a/include/arch/x86/processor.h
+++ b/include/arch/x86/processor.h
@@ -72,6 +72,30 @@
 #define X86_DR6_BT              (1u << 15)  /* Task switch             */
 
 /*
+ * CPU features in XCR0.
+ */
+#define _XSTATE_FP                0
+#define XSTATE_FP                 (1ULL << _XSTATE_FP)
+#define _XSTATE_SSE               1
+#define XSTATE_SSE                (1ULL << _XSTATE_SSE)
+#define _XSTATE_YMM               2
+#define XSTATE_YMM                (1ULL << _XSTATE_YMM)
+#define _XSTATE_BNDREGS           3
+#define XSTATE_BNDREGS            (1ULL << _XSTATE_BNDREGS)
+#define _XSTATE_BNDCSR            4
+#define XSTATE_BNDCSR             (1ULL << _XSTATE_BNDCSR)
+#define _XSTATE_OPMASK            5
+#define XSTATE_OPMASK             (1ULL << _XSTATE_OPMASK)
+#define _XSTATE_ZMM               6
+#define XSTATE_ZMM                (1ULL << _XSTATE_ZMM)
+#define _XSTATE_HI_ZMM            7
+#define XSTATE_HI_ZMM             (1ULL << _XSTATE_HI_ZMM)
+#define _XSTATE_PKRU              9
+#define XSTATE_PKRU               (1ULL << _XSTATE_PKRU)
+#define _XSTATE_LWP               62
+#define XSTATE_LWP                (1ULL << _XSTATE_LWP)
+
+/*
  * Exception mnemonics.
  */
 #define X86_EXC_DE             0 /* Divide Error. */
--- a/tests/fpu-exception-emulation/main.c
+++ b/tests/fpu-exception-emulation/main.c
@@ -163,6 +163,37 @@ exinfo_t probe_sse(bool force)
     return fault;
 }
 
+/**
+ * AVX instructions.  The emulation flag is meaningless,
+ * but @#NM should be raised if the task has been switched.
+ */
+static const struct test_cfg avx[] =
+{
+    { CR0_SYM(          ), 0 },
+    { CR0_SYM(        TS), EXINFO_SYM(NM, 0) },
+    { CR0_SYM(    MP    ), 0 },
+    { CR0_SYM(    MP, TS), EXINFO_SYM(NM, 0) },
+    { CR0_SYM(EM        ), 0 },
+    { CR0_SYM(EM,     TS), EXINFO_SYM(NM, 0) },
+    { CR0_SYM(EM, MP    ), 0 },
+    { CR0_SYM(EM, MP, TS), EXINFO_SYM(NM, 0) },
+};
+
+static exinfo_t probe_avx(bool force)
+{
+    exinfo_t fault = 0;
+
+    asm volatile ("test %[fep], %[fep];"
+                  "jz 1f;"
+                  _ASM_XEN_FEP
+                  "1: vmovups %%xmm0, %%xmm0; 2:"
+                  _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
+                  : "+a" (fault)
+                  : [fep] "q" (force));
+
+    return fault;
+}
+
 void run_sequence(const struct test_cfg *seq, unsigned int nr,
                   unsigned int (*fn)(bool), bool force, exinfo_t override)
 {
@@ -226,6 +257,31 @@ void run_tests(bool force)
 
         write_cr4(cr4);
     }
+
+    if ( cpu_has_avx )
+    {
+        unsigned long cr4 = read_cr4();
+        unsigned long xcr0;
+
+        printk("Testing%s AVX\n", force ? " emulated" : "");
+        write_cr4(cr4 & ~X86_CR4_OSXSAVE);
+        run_sequence(avx, ARRAY_SIZE(avx), probe_avx, force,
+                     EXINFO_SYM(UD, 0));
+
+        printk("Testing%s AVX (CR4.OSXSAVE)\n", force ? " emulated" : "");
+        write_cr4(cr4 | X86_CR4_OSXSAVE);
+        xcr0 = read_xcr0();
+        write_xcr0(xcr0 & ~XSTATE_YMM);
+        run_sequence(avx, ARRAY_SIZE(avx), probe_avx, force,
+                     EXINFO_SYM(UD, 0));
+
+        printk("Testing%s AVX (CR4.OSXSAVE+XCR0.YMM)\n", force ? " emulated" : 
"");
+        write_xcr0(xcr0 | XSTATE_SSE | XSTATE_YMM);
+        run_sequence(avx, ARRAY_SIZE(avx), probe_avx, force, 0);
+
+        write_xcr0(xcr0);
+        write_cr4(cr4);
+    }
 }
 
 void test_main(void)


Attachment: xtf-AVX-exn.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.