[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] x86emul: recognize CLDEMOTE



On 25/01/2022 15:08, Jan Beulich wrote:
On 25.01.2022 15:22, Jan Beulich wrote:
We claim to support the insn, but so far the emulator has been handling
it as a NOP.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

I'm sorry, I should have Cc-ed Paul here as well.


Acked-by: Paul Durrant <paul@xxxxxxx>

Jan

---
While handling x86emul_cldemote separately in hvmemul_cache_op() means
to carry some redundant code, folding it with CLFLUSH{,OPT} / CLWB
didn't seem very attractive either.

--- a/xen/arch/x86/arch.mk
+++ b/xen/arch/x86/arch.mk
@@ -23,6 +23,7 @@ $(call as-option-add,CFLAGS,CC,"xsaveopt
  $(call as-option-add,CFLAGS,CC,"rdseed %eax",-DHAVE_AS_RDSEED)
  $(call as-option-add,CFLAGS,CC,"clac",-DHAVE_AS_CLAC_STAC)
  $(call as-option-add,CFLAGS,CC,"clwb (%rax)",-DHAVE_AS_CLWB)
+$(call as-option-add,CFLAGS,CC,"cldemote (%rax)",-DHAVE_AS_CLDEMOTE)
  $(call as-option-add,CFLAGS,CC,".equ \"x\"$$(comma)1",-DHAVE_AS_QUOTED_SYM)
  $(call as-option-add,CFLAGS,CC,"invpcid 
(%rax)$$(comma)%rax",-DHAVE_AS_INVPCID)
  $(call as-option-add,CFLAGS,CC,"movdiri %rax$$(comma)(%rax)",-DHAVE_AS_MOVDIR)
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2351,6 +2351,28 @@ static int hvmemul_cache_op(
           * to be sensibly used is in (virtualization unaware) firmware.
           */
          break;
+
+    case x86emul_cldemote:
+        ASSERT(!is_x86_system_segment(seg));
+
+        if ( !boot_cpu_has(X86_FEATURE_CLDEMOTE) ||
+             hvmemul_virtual_to_linear(seg, offset, 0, NULL, hvm_access_none,
+                                       hvmemul_ctxt, &addr) != X86EMUL_OKAY )
+            break;
+
+        if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+            pfec |= PFEC_user_mode;
+
+        mapping = hvmemul_map_linear_addr(addr, 0, pfec, hvmemul_ctxt);
+        if ( mapping == ERR_PTR(~X86EMUL_EXCEPTION) )
+            x86_emul_reset_event(&hvmemul_ctxt->ctxt);
+        if ( IS_ERR_OR_NULL(mapping) )
+            break;
+
+        cldemote(mapping);
+
+        hvmemul_unmap_linear_addr(mapping, addr, 0, hvmemul_ctxt);
+        break;
      }
return X86EMUL_OKAY;
--- a/xen/arch/x86/include/asm/system.h
+++ b/xen/arch/x86/include/asm/system.h
@@ -37,6 +37,16 @@ static inline void clwb(const void *p)
  #endif
  }
+static inline void cldemote(const void *p)
+{
+#if defined(HAVE_AS_CLDEMOTE)
+    asm volatile ( "cldemote %0" :: "m" (*(const char *)p) );
+#else
+    asm volatile ( ".byte 0x0f, 0x1c, 0x02"
+                   :: "d" (p), "m" (*(const char *)p) );
+#endif
+}
+
  #define xchg(ptr,v) \
      ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -6290,7 +6290,8 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x0d): /* GrpP (prefetch) */
      case X86EMUL_OPC(0x0f, 0x18): /* Grp16 (prefetch/nop) */
-    case X86EMUL_OPC(0x0f, 0x19) ... X86EMUL_OPC(0x0f, 0x1f): /* nop */
+    case X86EMUL_OPC(0x0f, 0x19) ... X86EMUL_OPC(0x0f, 0x1b): /* nop */
+    case X86EMUL_OPC(0x0f, 0x1d) ... X86EMUL_OPC(0x0f, 0x1f): /* nop */
          break;
#ifndef X86EMUL_NO_MMX
@@ -6627,6 +6628,12 @@ x86_emulate(
#endif /* !X86EMUL_NO_SIMD */ + case X86EMUL_OPC(0x0f, 0x1c): /* cldemote / nop */
+        if ( ctxt->cpuid->feat.cldemote && !vex.pfx && !modrm_reg &&
+             ops->cache_op )
+            ops->cache_op(x86emul_cldemote, ea.mem.seg, ea.mem.off, ctxt);
+        break;
+
      case X86EMUL_OPC(0x0f, 0x20): /* mov cr,reg */
      case X86EMUL_OPC(0x0f, 0x21): /* mov dr,reg */
      case X86EMUL_OPC(0x0f, 0x22): /* mov reg,cr */
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -177,6 +177,7 @@ enum x86_emulate_fpu_type {
  };
enum x86emul_cache_op {
+    x86emul_cldemote,
      x86emul_clflush,
      x86emul_clflushopt,
      x86emul_clwb,







 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.