[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 1/6] x86/vmx: remove HAVE_AS_{EPT,VMX}, GAS_VMX_OP() and *_OPCODE
On 03/04/2025 7:23 pm, dmkhn@xxxxxxxxx wrote: > From: Denis Mukhin <dmukhin@xxxxxxxx> > > The new toolchain baseline knows the VMX instructions, > no need to carry the workaround in the code. > > Move asm for vmxoff directly on the only callsite in vmcs.c Ideally VMXOFF in capitals as it's an instruction name. But, this type of thing is more commonly phrased as "Inline __vmxoff() into it's single caller", or so. > > Updated formatting for all __xxx() calls to be consistent. I'd suggest "for the other wrappers to be". > > Resolves: https://gitlab.com/xen-project/xen/-/work_items/202 > Signed-off-by: Denis Mukhin <dmukhin@xxxxxxxx> > --- > xen/arch/x86/arch.mk | 4 +- > xen/arch/x86/hvm/vmx/vmcs.c | 2 +- > xen/arch/x86/include/asm/hvm/vmx/vmx.h | 119 ++++--------------------- Just as a note, you're CC-ing The Rest, but this is an x86-only change, so should really only be CCing myself, Jan and Roger. > diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c > index 1d427100ce..aef746a293 100644 > --- a/xen/arch/x86/hvm/vmx/vmcs.c > +++ b/xen/arch/x86/hvm/vmx/vmcs.c > @@ -811,7 +811,7 @@ void cf_check vmx_cpu_down(void) > > BUG_ON(!(read_cr4() & X86_CR4_VMXE)); > this_cpu(vmxon) = 0; > - __vmxoff(); > + asm volatile ("vmxoff" : : : "memory"); asm volatile ( "vmxoff" ::: "memory" ); > > local_irq_restore(flags); > } > diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h > b/xen/arch/x86/include/asm/hvm/vmx/vmx.h > index 7c6ba73407..ed6a6986b9 100644 > --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h > +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h > @@ -310,97 +292,54 @@ extern uint8_t posted_intr_vector; > #define INVVPID_ALL_CONTEXT 2 > #define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3 > > -#ifdef HAVE_AS_VMX > -# define GAS_VMX_OP(yes, no) yes > -#else > -# define GAS_VMX_OP(yes, no) no > -#endif > - > static always_inline void __vmptrld(u64 addr) > { > - asm volatile ( > -#ifdef HAVE_AS_VMX > - "vmptrld %0\n" > -#else > - VMPTRLD_OPCODE MODRM_EAX_06 > -#endif > + asm volatile ( "vmptrld %0\n" As you're changing the line anyway, this ought to be \n\t. It's cosmetic, but comes in handy if you need to read the intermediate assembly. > /* CF==1 or ZF==1 --> BUG() */ > UNLIKELY_START(be, vmptrld) > _ASM_BUGFRAME_TEXT(0) > UNLIKELY_END_SECTION > : > -#ifdef HAVE_AS_VMX > : "m" (addr), > -#else > - : "a" (&addr), > -#endif > _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) > - : "memory"); > + : "memory" ); > } > > static always_inline void __vmpclear(u64 addr) > { > - asm volatile ( > -#ifdef HAVE_AS_VMX > - "vmclear %0\n" > -#else > - VMCLEAR_OPCODE MODRM_EAX_06 > -#endif > + asm volatile ( "vmclear %0\n" > /* CF==1 or ZF==1 --> BUG() */ > UNLIKELY_START(be, vmclear) > _ASM_BUGFRAME_TEXT(0) > UNLIKELY_END_SECTION > : > -#ifdef HAVE_AS_VMX > : "m" (addr), > -#else > - : "a" (&addr), > -#endif > _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) > - : "memory"); > + : "memory" ); > } > > static always_inline void __vmread(unsigned long field, unsigned long *value) > { > - asm volatile ( > -#ifdef HAVE_AS_VMX > - "vmread %1, %0\n\t" > -#else > - VMREAD_OPCODE MODRM_EAX_ECX > -#endif > + asm volatile ( "vmread %1, %0\n\t" > /* CF==1 or ZF==1 --> BUG() */ > UNLIKELY_START(be, vmread) > _ASM_BUGFRAME_TEXT(0) > UNLIKELY_END_SECTION > -#ifdef HAVE_AS_VMX > : "=rm" (*value) > : "r" (field), > -#else > - : "=c" (*value) > - : "a" (field), > -#endif > _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) > ); Fold this onto the previous line, as you're fixing up all the other closing brackets. > @@ -494,24 +422,14 @@ static always_inline void __invvpid(unsigned long type, > u16 vpid, u64 gva) > } operand = {vpid, 0, gva}; > > /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. > */ I find this comment deeply troubling, but lets not go changing that right now. I'm happy to fix this all on commit. ~Andrew
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |