VMX: don't crash processing 'd' debug key There's a window during scheduling where "current" and the active VMCS may disagree: The former gets set much earlier than the latter. Since both vmx_vmcs_enter() and vmx_vmcs_exit() immediately return when the subject vCPU is "current", accessing VMCS fields would, depending on whether there is any currently active VMCS, either read wrong data, or cause a crash. Going forward we might want to consider reducing the window during which vmx_vmcs_enter() might fail (e.g. doing a plain __vmptrld() when v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) but arch_vmx->active_cpu == -1), but that would add complexities (acquiring and - more importantly - properly dropping v->arch.hvm_vmx.vmcs_lock) that don't look worthwhile adding right now. Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper --- v3: Introduce vmx_vmcs_try_enter() (as suggested by Tim). --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -601,16 +601,16 @@ struct foreign_vmcs { }; static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs); -void vmx_vmcs_enter(struct vcpu *v) +bool_t vmx_vmcs_try_enter(struct vcpu *v) { struct foreign_vmcs *fv; /* * NB. We must *always* run an HVM VCPU on its own VMCS, except for - * vmx_vmcs_enter/exit critical regions. + * vmx_vmcs_enter/exit and scheduling tail critical regions. */ if ( likely(v == current) ) - return; + return v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs); fv = &this_cpu(foreign_vmcs); @@ -633,6 +633,15 @@ void vmx_vmcs_enter(struct vcpu *v) } fv->count++; + + return 1; +} + +void vmx_vmcs_enter(struct vcpu *v) +{ + bool_t okay = vmx_vmcs_try_enter(v); + + ASSERT(okay); } void vmx_vmcs_exit(struct vcpu *v) --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -675,7 +675,27 @@ void vmx_get_segment_register(struct vcp { unsigned long attr = 0, sel = 0, limit; - vmx_vmcs_enter(v); + /* + * We may get here in the context of dump_execstate(), which may have + * interrupted context switching between setting "current" and + * vmx_do_resume() reaching the end of vmx_load_vmcs(). That would make + * all the VMREADs below fail if we don't bail right away. + */ + if ( unlikely(!vmx_vmcs_try_enter(v)) ) + { + static bool_t warned; + + if ( !warned ) + { + warned = 1; + printk(XENLOG_WARNING "Segment register inaccessible for d%dv%d\n" + "(If you see this outside of debugging activity," + " please report to xen-devel@xxxxxxxxxxxxxxxxxxxx)\n", + v->domain->domain_id, v->vcpu_id); + } + memset(reg, 0, sizeof(*reg)); + return; + } switch ( seg ) { --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -144,6 +144,7 @@ struct arch_vmx_struct { int vmx_create_vmcs(struct vcpu *v); void vmx_destroy_vmcs(struct vcpu *v); void vmx_vmcs_enter(struct vcpu *v); +bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v); void vmx_vmcs_exit(struct vcpu *v); #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004