[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86emul/test: rename "cp"



commit 3c93a0da6bc201ebeb8a08d6bd339c9982985fcc
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Sep 4 16:06:21 2024 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Sep 4 16:06:21 2024 +0200

    x86emul/test: rename "cp"
    
    In preparation of introducing a const struct cpu_policy * local in
    x86_emulate(), rename that global variable to something more suitable:
    "cp" is our commonly used name for function parameters or local
    variables of type struct cpu_policy *, and the present name of the
    global could hence have interfered already.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/fuzz/x86_instruction_emulator/fuzz-emul.c |   2 +-
 tools/tests/x86_emulator/test_x86_emulator.c    |  19 ++--
 tools/tests/x86_emulator/x86-emulate.c          |  16 +--
 tools/tests/x86_emulator/x86-emulate.h          | 135 +++++++++++++-----------
 4 files changed, 95 insertions(+), 77 deletions(-)

diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c 
b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
index 2ba9ca9e0b..28c4fb13c5 100644
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -899,7 +899,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data_p, size_t 
size)
     int rc;
 
     /* Not part of the initializer, for old gcc to cope. */
-    ctxt.cpu_policy = &cp;
+    ctxt.cpu_policy = &cpu_policy;
 
     /* Reset all global state variables */
     memset(&input, 0, sizeof(input));
diff --git a/tools/tests/x86_emulator/test_x86_emulator.c 
b/tools/tests/x86_emulator/test_x86_emulator.c
index 2d52d21742..94a2bbab93 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -779,7 +779,8 @@ static void zap_fpsel(unsigned int *env, bool is_32bit)
         env[3] &= ~0xffff;
     }
 
-    if ( cp.x86_vendor != X86_VENDOR_AMD && cp.x86_vendor != X86_VENDOR_HYGON )
+    if ( cpu_policy.x86_vendor != X86_VENDOR_AMD &&
+         cpu_policy.x86_vendor != X86_VENDOR_HYGON )
         return;
 
     if ( is_32bit )
@@ -913,7 +914,7 @@ int main(int argc, char **argv)
 
     ctxt.regs = &regs;
     ctxt.force_writeback = 0;
-    ctxt.cpu_policy = &cp;
+    ctxt.cpu_policy = &cpu_policy;
     ctxt.lma       = sizeof(void *) == 8;
     ctxt.addr_size = 8 * sizeof(void *);
     ctxt.sp_size   = 8 * sizeof(void *);
@@ -1487,11 +1488,11 @@ int main(int argc, char **argv)
         goto fail;
     printf("okay\n");
 
-    vendor_native = cp.x86_vendor;
-    for ( cp.x86_vendor = X86_VENDOR_AMD; ; )
+    vendor_native = cpu_policy.x86_vendor;
+    for ( cpu_policy.x86_vendor = X86_VENDOR_AMD; ; )
     {
-        unsigned int v = cp.x86_vendor == X86_VENDOR_INTEL;
-        const char *vendor = cp.x86_vendor == X86_VENDOR_INTEL ? "Intel" : 
"AMD";
+        unsigned int v = cpu_policy.x86_vendor == X86_VENDOR_INTEL;
+        const char *vendor = cpu_policy.x86_vendor == X86_VENDOR_INTEL ? 
"Intel" : "AMD";
         uint64_t *stk = (void *)res + MMAP_SZ - 16;
 
         regs.rcx = 2;
@@ -1527,11 +1528,11 @@ int main(int argc, char **argv)
             printf("okay\n");
         }
 
-        if ( cp.x86_vendor == X86_VENDOR_INTEL )
+        if ( cpu_policy.x86_vendor == X86_VENDOR_INTEL )
             break;
-        cp.x86_vendor = X86_VENDOR_INTEL;
+        cpu_policy.x86_vendor = X86_VENDOR_INTEL;
     }
-    cp.x86_vendor = vendor_native;
+    cpu_policy.x86_vendor = vendor_native;
 #endif /* x86-64 */
 
     printf("%-40s", "Testing shld $1,%ecx,(%edx)...");
diff --git a/tools/tests/x86_emulator/x86-emulate.c 
b/tools/tests/x86_emulator/x86-emulate.c
index 70ce15be50..379e206dd3 100644
--- a/tools/tests/x86_emulator/x86-emulate.c
+++ b/tools/tests/x86_emulator/x86-emulate.c
@@ -25,7 +25,7 @@
 #endif
 
 uint32_t mxcsr_mask = 0x0000ffbf;
-struct cpu_policy cp;
+struct cpu_policy cpu_policy;
 
 static char fpu_save_area[0x4000] __attribute__((__aligned__((64))));
 static bool use_xsave;
@@ -75,18 +75,18 @@ bool emul_test_init(void)
 
     unsigned long sp;
 
-    x86_cpu_policy_fill_native(&cp);
+    x86_cpu_policy_fill_native(&cpu_policy);
 
     /*
      * The emulator doesn't use these instructions, so can always emulate
      * them.
      */
-    cp.basic.movbe = true;
-    cp.feat.invpcid = true;
-    cp.feat.adx = true;
-    cp.feat.rdpid = true;
-    cp.feat.wrmsrns = true;
-    cp.extd.clzero = true;
+    cpu_policy.basic.movbe = true;
+    cpu_policy.feat.invpcid = true;
+    cpu_policy.feat.adx = true;
+    cpu_policy.feat.rdpid = true;
+    cpu_policy.feat.wrmsrns = true;
+    cpu_policy.extd.clzero = true;
 
     if ( cpu_has_xsave )
     {
diff --git a/tools/tests/x86_emulator/x86-emulate.h 
b/tools/tests/x86_emulator/x86-emulate.h
index 4fca051152..112ec53d99 100644
--- a/tools/tests/x86_emulator/x86-emulate.h
+++ b/tools/tests/x86_emulator/x86-emulate.h
@@ -68,7 +68,7 @@
 #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63))
 
 extern uint32_t mxcsr_mask;
-extern struct cpu_policy cp;
+extern struct cpu_policy cpu_policy;
 
 #define MMAP_SZ 16384
 bool emul_test_init(void);
@@ -122,7 +122,7 @@ static inline uint64_t xgetbv(uint32_t xcr)
 }
 
 /* Intentionally checking OSXSAVE here. */
-#define cpu_has_xsave     (cp.basic.raw[1].c & (1u << 27))
+#define cpu_has_xsave     (cpu_policy.basic.raw[1].c & (1u << 27))
 
 static inline bool xcr0_mask(uint64_t mask)
 {
@@ -132,63 +132,80 @@ static inline bool xcr0_mask(uint64_t mask)
 unsigned int rdpkru(void);
 void wrpkru(unsigned int val);
 
-#define cache_line_size() (cp.basic.clflush_size * 8)
-#define cpu_has_fpu        cp.basic.fpu
-#define cpu_has_mmx        cp.basic.mmx
-#define cpu_has_fxsr       cp.basic.fxsr
-#define cpu_has_sse        cp.basic.sse
-#define cpu_has_sse2       cp.basic.sse2
-#define cpu_has_sse3       cp.basic.sse3
-#define cpu_has_pclmulqdq  cp.basic.pclmulqdq
-#define cpu_has_ssse3      cp.basic.ssse3
-#define cpu_has_fma       (cp.basic.fma && xcr0_mask(6))
-#define cpu_has_sse4_1     cp.basic.sse4_1
-#define cpu_has_sse4_2     cp.basic.sse4_2
-#define cpu_has_popcnt     cp.basic.popcnt
-#define cpu_has_aesni      cp.basic.aesni
-#define cpu_has_avx       (cp.basic.avx  && xcr0_mask(6))
-#define cpu_has_f16c      (cp.basic.f16c && xcr0_mask(6))
-
-#define cpu_has_avx2      (cp.feat.avx2 && xcr0_mask(6))
-#define cpu_has_bmi1       cp.feat.bmi1
-#define cpu_has_bmi2       cp.feat.bmi2
-#define cpu_has_avx512f   (cp.feat.avx512f  && xcr0_mask(0xe6))
-#define cpu_has_avx512dq  (cp.feat.avx512dq && xcr0_mask(0xe6))
-#define cpu_has_avx512_ifma (cp.feat.avx512_ifma && xcr0_mask(0xe6))
-#define cpu_has_avx512cd  (cp.feat.avx512cd && xcr0_mask(0xe6))
-#define cpu_has_sha        cp.feat.sha
-#define cpu_has_avx512bw  (cp.feat.avx512bw && xcr0_mask(0xe6))
-#define cpu_has_avx512vl  (cp.feat.avx512vl && xcr0_mask(0xe6))
-#define cpu_has_avx512_vbmi (cp.feat.avx512_vbmi && xcr0_mask(0xe6))
-#define cpu_has_avx512_vbmi2 (cp.feat.avx512_vbmi2 && xcr0_mask(0xe6))
-#define cpu_has_gfni       cp.feat.gfni
-#define cpu_has_vaes      (cp.feat.vaes && xcr0_mask(6))
-#define cpu_has_vpclmulqdq (cp.feat.vpclmulqdq && xcr0_mask(6))
-#define cpu_has_avx512_vnni (cp.feat.avx512_vnni && xcr0_mask(0xe6))
-#define cpu_has_avx512_bitalg (cp.feat.avx512_bitalg && xcr0_mask(0xe6))
-#define cpu_has_avx512_vpopcntdq (cp.feat.avx512_vpopcntdq && xcr0_mask(0xe6))
-#define cpu_has_movdiri    cp.feat.movdiri
-#define cpu_has_movdir64b  cp.feat.movdir64b
-#define cpu_has_avx512_vp2intersect (cp.feat.avx512_vp2intersect && 
xcr0_mask(0xe6))
-#define cpu_has_serialize  cp.feat.serialize
-#define cpu_has_avx512_fp16 (cp.feat.avx512_fp16 && xcr0_mask(0xe6))
-#define cpu_has_sha512     (cp.feat.sha512 && xcr0_mask(6))
-#define cpu_has_sm3        (cp.feat.sm3 && xcr0_mask(6))
-#define cpu_has_sm4        (cp.feat.sm4 && xcr0_mask(6))
-#define cpu_has_avx_vnni   (cp.feat.avx_vnni && xcr0_mask(6))
-#define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
-#define cpu_has_avx_ifma   (cp.feat.avx_ifma && xcr0_mask(6))
-#define cpu_has_avx_vnni_int8 (cp.feat.avx_vnni_int8 && xcr0_mask(6))
-#define cpu_has_avx_ne_convert (cp.feat.avx_ne_convert && xcr0_mask(6))
-#define cpu_has_avx_vnni_int16 (cp.feat.avx_vnni_int16 && xcr0_mask(6))
-
-#define cpu_has_xgetbv1   (cpu_has_xsave && cp.xstate.xgetbv1)
-
-#define cpu_has_3dnow_ext  cp.extd._3dnowext
-#define cpu_has_sse4a      cp.extd.sse4a
-#define cpu_has_xop       (cp.extd.xop  && xcr0_mask(6))
-#define cpu_has_fma4      (cp.extd.fma4 && xcr0_mask(6))
-#define cpu_has_tbm        cp.extd.tbm
+#define cache_line_size()           (cpu_policy.basic.clflush_size * 8)
+#define cpu_has_fpu                  cpu_policy.basic.fpu
+#define cpu_has_mmx                  cpu_policy.basic.mmx
+#define cpu_has_fxsr                 cpu_policy.basic.fxsr
+#define cpu_has_sse                  cpu_policy.basic.sse
+#define cpu_has_sse2                 cpu_policy.basic.sse2
+#define cpu_has_sse3                 cpu_policy.basic.sse3
+#define cpu_has_pclmulqdq            cpu_policy.basic.pclmulqdq
+#define cpu_has_ssse3                cpu_policy.basic.ssse3
+#define cpu_has_fma                 (cpu_policy.basic.fma && xcr0_mask(6))
+#define cpu_has_sse4_1               cpu_policy.basic.sse4_1
+#define cpu_has_sse4_2               cpu_policy.basic.sse4_2
+#define cpu_has_popcnt               cpu_policy.basic.popcnt
+#define cpu_has_aesni                cpu_policy.basic.aesni
+#define cpu_has_avx                 (cpu_policy.basic.avx  && xcr0_mask(6))
+#define cpu_has_f16c                (cpu_policy.basic.f16c && xcr0_mask(6))
+
+#define cpu_has_avx2                (cpu_policy.feat.avx2 && xcr0_mask(6))
+#define cpu_has_bmi1                 cpu_policy.feat.bmi1
+#define cpu_has_bmi2                 cpu_policy.feat.bmi2
+#define cpu_has_avx512f             (cpu_policy.feat.avx512f && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512dq            (cpu_policy.feat.avx512dq && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512_ifma         (cpu_policy.feat.avx512_ifma && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512cd            (cpu_policy.feat.avx512cd && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_sha                  cpu_policy.feat.sha
+#define cpu_has_avx512bw            (cpu_policy.feat.avx512bw && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512vl            (cpu_policy.feat.avx512vl && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512_vbmi         (cpu_policy.feat.avx512_vbmi && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512_vbmi2        (cpu_policy.feat.avx512_vbmi2 && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_gfni                 cpu_policy.feat.gfni
+#define cpu_has_vaes                (cpu_policy.feat.vaes && xcr0_mask(6))
+#define cpu_has_vpclmulqdq          (cpu_policy.feat.vpclmulqdq && 
xcr0_mask(6))
+#define cpu_has_avx512_vnni         (cpu_policy.feat.avx512_vnni && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512_bitalg       (cpu_policy.feat.avx512_bitalg && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx512_vpopcntdq    (cpu_policy.feat.avx512_vpopcntdq && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_movdiri              cpu_policy.feat.movdiri
+#define cpu_has_movdir64b            cpu_policy.feat.movdir64b
+#define cpu_has_avx512_vp2intersect (cpu_policy.feat.avx512_vp2intersect && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_serialize            cpu_policy.feat.serialize
+#define cpu_has_avx512_fp16         (cpu_policy.feat.avx512_fp16 && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_sha512              (cpu_policy.feat.sha512 && xcr0_mask(6))
+#define cpu_has_sm3                 (cpu_policy.feat.sm3 && xcr0_mask(6))
+#define cpu_has_sm4                 (cpu_policy.feat.sm4 && xcr0_mask(6))
+#define cpu_has_avx_vnni            (cpu_policy.feat.avx_vnni && xcr0_mask(6))
+#define cpu_has_avx512_bf16         (cpu_policy.feat.avx512_bf16 && \
+                                     xcr0_mask(0xe6))
+#define cpu_has_avx_ifma            (cpu_policy.feat.avx_ifma && xcr0_mask(6))
+#define cpu_has_avx_vnni_int8       (cpu_policy.feat.avx_vnni_int8 && \
+                                     xcr0_mask(6))
+#define cpu_has_avx_ne_convert      (cpu_policy.feat.avx_ne_convert && \
+                                     xcr0_mask(6))
+#define cpu_has_avx_vnni_int16      (cpu_policy.feat.avx_vnni_int16 && \
+                                     xcr0_mask(6))
+
+#define cpu_has_xgetbv1             (cpu_has_xsave && 
cpu_policy.xstate.xgetbv1)
+
+#define cpu_has_3dnow_ext            cpu_policy.extd._3dnowext
+#define cpu_has_sse4a                cpu_policy.extd.sse4a
+#define cpu_has_xop                 (cpu_policy.extd.xop  && xcr0_mask(6))
+#define cpu_has_fma4                (cpu_policy.extd.fma4 && xcr0_mask(6))
+#define cpu_has_tbm                  cpu_policy.extd.tbm
 
 int emul_test_cpuid(
     uint32_t leaf,
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.