[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] hvm vmx: Various cleanups and clarifications around event injection.
# HG changeset patch # User Keir Fraser <keir@xxxxxxxxxxxxx> # Date 1181769868 -3600 # Node ID 4d838167960694f1b9fcaec54590aef0e1f0a7ee # Parent b643179d7452a91cd874ee713c78bf30f8df3d2d hvm vmx: Various cleanups and clarifications around event injection. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/vmx/intr.c | 73 +++++++++++++++++++++---------------- xen/arch/x86/hvm/vmx/vmcs.c | 5 +- xen/arch/x86/hvm/vmx/vmx.c | 18 +++++++-- xen/include/asm-x86/hvm/vmx/vmcs.h | 10 ++++- xen/include/asm-x86/hvm/vmx/vmx.h | 14 +++++-- 5 files changed, 80 insertions(+), 40 deletions(-) diff -r b643179d7452 -r 4d8381679606 xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c Wed Jun 13 11:28:13 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/intr.c Wed Jun 13 22:24:28 2007 +0100 @@ -1,6 +1,7 @@ /* - * io.c: handling I/O, interrupts related VMX entry/exit + * intr.c: handling I/O, interrupts related VMX entry/exit * Copyright (c) 2004, Intel Corporation. + * Copyright (c) 2004-2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +15,6 @@ * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. - * */ #include <xen/config.h> @@ -24,7 +24,6 @@ #include <xen/errno.h> #include <xen/trace.h> #include <xen/event.h> - #include <asm/current.h> #include <asm/cpufeature.h> #include <asm/processor.h> @@ -39,32 +38,48 @@ #include <public/hvm/ioreq.h> #include <asm/hvm/trace.h> +/* + * A few notes on virtual NMI and INTR delivery, and interactions with + * interruptibility states: + * + * We can only inject an ExtInt if EFLAGS.IF = 1 and no blocking by + * STI nor MOV SS. Otherwise the VM entry fails. The 'virtual interrupt + * pending' control causes a VM exit when all these checks succeed. It will + * exit immediately after VM entry if the checks succeed at that point. + * + * We can only inject an NMI if no blocking by MOV SS (also, depending on + * implementation, if no blocking by STI). If pin-based 'virtual NMIs' + * control is specified then the NMI-blocking interruptibility flag is + * also checked. The 'virtual NMI pending' control (available only in + * conjunction with 'virtual NMIs') causes a VM exit when all these checks + * succeed. It will exit immediately after VM entry if the checks succeed + * at that point. + * + * Because a processor may or may not check blocking-by-STI when injecting + * a virtual NMI, it will be necessary to convert that to block-by-MOV-SS + * before specifying the 'virtual NMI pending' control. Otherwise we could + * enter an infinite loop where we check blocking-by-STI in software and + * thus delay delivery of a virtual NMI, but the processor causes immediate + * VM exit because it does not check blocking-by-STI. + * + * Injecting a virtual NMI sets the NMI-blocking interruptibility flag only + * if the 'virtual NMIs' control is set. Injecting *any* kind of event clears + * the STI- and MOV-SS-blocking interruptibility-state flags. + * + * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect + * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect, + * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking). + */ -static inline void -enable_irq_window(struct vcpu *v) +static void enable_irq_window(struct vcpu *v) { u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control; - if (!(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING)) { + if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) ) + { *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control); } -} - -static inline void -disable_irq_window(struct vcpu *v) -{ - u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control; - - if ( *cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING ) { - *cpu_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control); - } -} - -static inline int is_interruptibility_state(void) -{ - return __vmread(GUEST_INTERRUPTIBILITY_INFO); } static void update_tpr_threshold(struct vlapic *vlapic) @@ -87,13 +102,11 @@ static void update_tpr_threshold(struct asmlinkage void vmx_intr_assist(void) { - int intr_type = 0; - int intr_vector; - unsigned long eflags; + int has_ext_irq, intr_vector, intr_type = 0; + unsigned long eflags, intr_shadow; struct vcpu *v = current; unsigned int idtv_info_field; unsigned long inst_len; - int has_ext_irq; pt_update_irq(v); @@ -125,10 +138,10 @@ asmlinkage void vmx_intr_assist(void) inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */ __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); - if (unlikely(idtv_info_field & 0x800)) /* valid error code */ + if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */ __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, __vmread(IDT_VECTORING_ERROR_CODE)); - if (unlikely(has_ext_irq)) + if ( unlikely(has_ext_irq) ) enable_irq_window(v); HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); @@ -138,9 +151,9 @@ asmlinkage void vmx_intr_assist(void) if ( likely(!has_ext_irq) ) return; - if ( unlikely(is_interruptibility_state()) ) + intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); + if ( unlikely(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)) ) { - /* pre-cleared for emulated instruction */ enable_irq_window(v); HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility"); return; diff -r b643179d7452 -r 4d8381679606 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Jun 13 11:28:13 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Jun 13 22:24:28 2007 +0100 @@ -70,8 +70,9 @@ void vmx_init_vmcs_config(void) u32 _vmx_vmexit_control; u32 _vmx_vmentry_control; - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; - opt = 0; + min = (PIN_BASED_EXT_INTR_MASK | + PIN_BASED_NMI_EXITING); + opt = 0; /*PIN_BASED_VIRTUAL_NMIS*/ _vmx_pin_based_exec_control = adjust_vmx_controls( min, opt, MSR_IA32_VMX_PINBASED_CTLS); diff -r b643179d7452 -r 4d8381679606 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 13 11:28:13 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 13 22:24:28 2007 +0100 @@ -1300,11 +1300,17 @@ static int __get_instruction_length(void static void inline __update_guest_eip(unsigned long inst_len) { - unsigned long current_eip; + unsigned long current_eip, intr_shadow; current_eip = __vmread(GUEST_RIP); __vmwrite(GUEST_RIP, current_eip + inst_len); - __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); + + intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); + if ( intr_shadow & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) ) + { + intr_shadow &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS); + __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow); + } } static void vmx_do_no_device_fault(void) @@ -2902,9 +2908,15 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_TRIPLE_FAULT: hvm_triple_fault(); break; - case EXIT_REASON_PENDING_INTERRUPT: + case EXIT_REASON_PENDING_VIRT_INTR: /* Disable the interrupt window. */ v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; + __vmwrite(CPU_BASED_VM_EXEC_CONTROL, + v->arch.hvm_vcpu.u.vmx.exec_control); + break; + case EXIT_REASON_PENDING_VIRT_NMI: + /* Disable the NMI window. */ + v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control); break; diff -r b643179d7452 -r 4d8381679606 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jun 13 11:28:13 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jun 13 22:24:28 2007 +0100 @@ -104,6 +104,7 @@ void vmx_vmcs_exit(struct vcpu *v); #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 #define CPU_BASED_CR8_STORE_EXITING 0x00100000 #define CPU_BASED_TPR_SHADOW 0x00200000 +#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 #define CPU_BASED_MOV_DR_EXITING 0x00800000 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 #define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 @@ -115,6 +116,7 @@ extern u32 vmx_cpu_based_exec_control; #define PIN_BASED_EXT_INTR_MASK 0x00000001 #define PIN_BASED_NMI_EXITING 0x00000008 +#define PIN_BASED_VIRTUAL_NMIS 0x00000020 extern u32 vmx_pin_based_exec_control; #define VM_EXIT_IA32E_MODE 0x00000200 @@ -137,7 +139,13 @@ extern u32 vmx_secondary_exec_control; (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP) extern char *vmx_msr_bitmap; -/* VMCS Encordings */ +/* GUEST_INTERRUPTIBILITY_INFO flags. */ +#define VMX_INTR_SHADOW_STI 0x00000001 +#define VMX_INTR_SHADOW_MOV_SS 0x00000002 +#define VMX_INTR_SHADOW_SMI 0x00000004 +#define VMX_INTR_SHADOW_NMI 0x00000008 + +/* VMCS field encodings. */ enum vmcs_field { GUEST_ES_SELECTOR = 0x00000800, GUEST_CS_SELECTOR = 0x00000802, diff -r b643179d7452 -r 4d8381679606 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Jun 13 11:28:13 2007 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Jun 13 22:24:28 2007 +0100 @@ -46,8 +46,8 @@ void vmx_vlapic_msr_changed(struct vcpu #define EXIT_REASON_SIPI 4 #define EXIT_REASON_IO_SMI 5 #define EXIT_REASON_OTHER_SMI 6 -#define EXIT_REASON_PENDING_INTERRUPT 7 - +#define EXIT_REASON_PENDING_VIRT_INTR 7 +#define EXIT_REASON_PENDING_VIRT_NMI 8 #define EXIT_REASON_TASK_SWITCH 9 #define EXIT_REASON_CPUID 10 #define EXIT_REASON_HLT 12 @@ -295,7 +295,14 @@ static inline void __vmx_inject_exceptio { unsigned long intr_fields; - /* Reflect it back into the guest */ + /* + * NB. Callers do not need to worry about clearing STI/MOV-SS blocking: + * "If the VM entry is injecting, there is no blocking by STI or by + * MOV SS following the VM entry, regardless of the contents of the + * interruptibility-state field [in the guest-state area before the + * VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State). + */ + intr_fields = (INTR_INFO_VALID_MASK | type | trap); if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) { __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); @@ -332,7 +339,6 @@ static inline void vmx_inject_extint(str static inline void vmx_inject_extint(struct vcpu *v, int trap, int error_code) { __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0); - __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); } #endif /* __ASM_X86_HVM_VMX_VMX_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |