Index: 2007-11-13/xen/arch/x86/hvm/io.c =================================================================== --- 2007-11-13.orig/xen/arch/x86/hvm/io.c 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/arch/x86/hvm/io.c 2007-11-20 16:27:57.000000000 +0100 @@ -863,6 +863,8 @@ void hvm_io_assist(void) /* Copy register changes back into current guest state. */ regs->eflags &= ~X86_EFLAGS_RF; memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES); + if ( regs->eflags & X86_EFLAGS_TF ) + hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0); out: vcpu_end_shutdown_deferral(v); Index: 2007-11-13/xen/arch/x86/hvm/platform.c =================================================================== --- 2007-11-13.orig/xen/arch/x86/hvm/platform.c 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/arch/x86/hvm/platform.c 2007-11-20 16:37:27.000000000 +0100 @@ -1061,7 +1061,6 @@ void handle_mmio(unsigned long gpa) } regs->eip += inst_len; /* advance %eip */ - regs->eflags &= ~X86_EFLAGS_RF; switch ( mmio_op->instr ) { case INSTR_MOV: @@ -1121,7 +1120,6 @@ void handle_mmio(unsigned long gpa) /* The guest does not have the non-mmio address mapped. * Need to send in a page fault */ regs->eip -= inst_len; /* do not advance %eip */ - regs->eflags |= X86_EFLAGS_RF; /* RF was set by original #PF */ hvm_inject_exception(TRAP_page_fault, pfec, addr); return; } @@ -1150,7 +1148,6 @@ void handle_mmio(unsigned long gpa) /* Failed on the page-spanning copy. Inject PF into * the guest for the address where we failed */ regs->eip -= inst_len; /* do not advance %eip */ - regs->eflags |= X86_EFLAGS_RF; /* RF was set by #PF */ /* Must set CR2 at the failing address */ addr += size - rv; gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a " Index: 2007-11-13/xen/arch/x86/hvm/svm/svm.c =================================================================== --- 2007-11-13.orig/xen/arch/x86/hvm/svm/svm.c 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/arch/x86/hvm/svm/svm.c 2007-11-20 16:46:55.000000000 +0100 @@ -78,14 +78,6 @@ static void *root_vmcb[NR_CPUS] __read_m static void svm_update_guest_efer(struct vcpu *v); -static void inline __update_guest_eip( - struct cpu_user_regs *regs, int inst_len) -{ - ASSERT(inst_len > 0); - regs->eip += inst_len; - regs->eflags &= ~X86_EFLAGS_RF; -} - static void svm_inject_exception( struct vcpu *v, int trap, int ev, int error_code) { @@ -107,6 +99,20 @@ static void svm_inject_exception( vmcb->eventinj = event; } +static void inline __update_guest_eip(struct vcpu *v, + struct cpu_user_regs *regs, int inst_len) +{ + ASSERT(inst_len > 0); + regs->eip += inst_len; + regs->eflags &= ~X86_EFLAGS_RF; + if ( v && (regs->eflags & X86_EFLAGS_TF) ) + { + v->arch.guest_context.debugreg[6] = + v->arch.hvm_svm.vmcb->dr6 |= 0x4000; + svm_inject_exception(v, TRAP_debug, 0, 0); + } +} + static void svm_cpu_down(void) { write_efer(read_efer() & ~EFER_SVME); @@ -868,7 +874,12 @@ static void svm_hvm_inject_exception( struct vcpu *v = current; if ( trapnr == TRAP_page_fault ) v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2; - svm_inject_exception(v, trapnr, (errcode != -1), errcode); + if ( trapnr == TRAP_debug && + (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) ) + v->arch.guest_context.debugreg[6] = + v->arch.hvm_svm.vmcb->dr6 |= 0x4000; + svm_inject_exception(v, trapnr, + (errcode != HVM_DELIVER_NO_ERROR_CODE), errcode); } static int svm_event_pending(struct vcpu *v) @@ -1083,7 +1094,7 @@ static void svm_vmexit_do_cpuid(struct v inst_len = __get_instruction_length(v, INSTR_CPUID, NULL); ASSERT(inst_len > 0); - __update_guest_eip(regs, inst_len); + __update_guest_eip(v, regs, inst_len); } static unsigned long *get_reg_p( @@ -1769,7 +1780,7 @@ static void svm_cr_access( ASSERT(inst_len); if ( result ) - __update_guest_eip(regs, inst_len); + __update_guest_eip(v, regs, inst_len); } static void svm_do_msr_access( @@ -1940,7 +1951,7 @@ static void svm_do_msr_access( inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL); } - __update_guest_eip(regs, inst_len); + __update_guest_eip(v, regs, inst_len); } static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb, @@ -1948,7 +1959,7 @@ static void svm_vmexit_do_hlt(struct vmc { struct hvm_intack intack = hvm_vcpu_has_pending_irq(current); - __update_guest_eip(regs, 1); + __update_guest_eip(NULL, regs, 1); /* Check for interrupt not handled or new interrupt. */ if ( vmcb->eventinj.fields.v || @@ -1978,7 +1989,7 @@ static void svm_vmexit_do_invalidate_cac inst_len = __get_instruction_length_from_list( curr, list, ARRAY_SIZE(list), NULL, NULL); - __update_guest_eip(regs, inst_len); + __update_guest_eip(curr, regs, inst_len); } void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs) @@ -2003,7 +2014,7 @@ void svm_handle_invlpg(const short invlp { inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode); ASSERT(inst_len > 0); - __update_guest_eip(regs, inst_len); + __update_guest_eip(v, regs, inst_len); /* * The address is implicit on this instruction. At the moment, we don't @@ -2030,7 +2041,7 @@ void svm_handle_invlpg(const short invlp &opcode[inst_len], &length); inst_len += length; - __update_guest_eip(regs, inst_len); + __update_guest_eip(v, regs, inst_len); } HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0)); @@ -2186,7 +2197,7 @@ asmlinkage void svm_vmexit_handler(struc goto exit_and_crash; /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */ inst_len = __get_instruction_length(v, INSTR_INT3, NULL); - __update_guest_eip(regs, inst_len); + __update_guest_eip(NULL, regs, inst_len); domain_pause_for_debugger(); break; @@ -2268,7 +2279,7 @@ asmlinkage void svm_vmexit_handler(struc rc = hvm_do_hypercall(regs); if ( rc != HVM_HCALL_preempted ) { - __update_guest_eip(regs, inst_len); + __update_guest_eip(v, regs, inst_len); if ( rc == HVM_HCALL_invalidate ) send_invalidate_req(); } Index: 2007-11-13/xen/arch/x86/hvm/vmx/vmx.c =================================================================== --- 2007-11-13.orig/xen/arch/x86/hvm/vmx/vmx.c 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/arch/x86/hvm/vmx/vmx.c 2007-11-20 16:32:57.000000000 +0100 @@ -1093,6 +1093,14 @@ static void vmx_inject_exception( vmx_inject_hw_exception(v, trapnr, errcode); if ( trapnr == TRAP_page_fault ) v->arch.hvm_vcpu.guest_cr[2] = cr2; + if ( trapnr == TRAP_debug && + (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) ) + { + unsigned long dr6 = read_debugreg(6); + + v->arch.guest_context.debugreg[6] = dr6 |= 0x4000; + write_debugreg(6, dr6); + } } static void vmx_update_vtpr(struct vcpu *v, unsigned long value) @@ -1184,13 +1192,20 @@ static int __get_instruction_length(void return len; } -static void __update_guest_eip(unsigned long inst_len) +static void __update_guest_eip(struct vcpu *v, unsigned long inst_len) { struct cpu_user_regs *regs = guest_cpu_user_regs(); unsigned long x; regs->eip += inst_len; regs->eflags &= ~X86_EFLAGS_RF; + if ( v && (regs->eflags & X86_EFLAGS_TF) ) + { + x = read_debugreg(6); + v->arch.guest_context.debugreg[6] = x |= 0x4000; + write_debugreg(6, x); + vmx_inject_hw_exception(v, TRAP_debug, HVM_DELIVER_NO_ERROR_CODE); + } x = __vmread(GUEST_INTERRUPTIBILITY_INFO); if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) ) @@ -2824,18 +2839,18 @@ asmlinkage void vmx_vmexit_handler(struc } case EXIT_REASON_CPUID: inst_len = __get_instruction_length(); /* Safe: CPUID */ - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); vmx_do_cpuid(regs); break; case EXIT_REASON_HLT: inst_len = __get_instruction_length(); /* Safe: HLT */ - __update_guest_eip(inst_len); + __update_guest_eip(NULL, inst_len); vmx_do_hlt(regs); break; case EXIT_REASON_INVLPG: { inst_len = __get_instruction_length(); /* Safe: INVLPG */ - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); exit_qualification = __vmread(EXIT_QUALIFICATION); vmx_do_invlpg(exit_qualification); break; @@ -2848,7 +2863,7 @@ asmlinkage void vmx_vmexit_handler(struc rc = hvm_do_hypercall(regs); if ( rc != HVM_HCALL_preempted ) { - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); if ( rc == HVM_HCALL_invalidate ) send_invalidate_req(); } @@ -2859,7 +2874,7 @@ asmlinkage void vmx_vmexit_handler(struc exit_qualification = __vmread(EXIT_QUALIFICATION); inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */ if ( vmx_cr_access(exit_qualification, regs) ) - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); break; } case EXIT_REASON_DR_ACCESS: @@ -2874,12 +2889,12 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_MSR_READ: inst_len = __get_instruction_length(); /* Safe: RDMSR */ if ( vmx_do_msr_read(regs) ) - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); break; case EXIT_REASON_MSR_WRITE: inst_len = __get_instruction_length(); /* Safe: WRMSR */ if ( vmx_do_msr_write(regs) ) - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); break; case EXIT_REASON_MWAIT_INSTRUCTION: @@ -2893,7 +2908,7 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_VMWRITE: case EXIT_REASON_VMXOFF: case EXIT_REASON_VMXON: - vmx_inject_hw_exception(v, TRAP_invalid_op, VMX_DELIVER_NO_ERROR_CODE); + vmx_inject_hw_exception(v, TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); break; case EXIT_REASON_TPR_BELOW_THRESHOLD: @@ -2911,7 +2926,7 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_INVD: { inst_len = __get_instruction_length(); /* Safe: INVD */ - __update_guest_eip(inst_len); + __update_guest_eip(v, inst_len); if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) wbinvd(); break; Index: 2007-11-13/xen/arch/x86/traps.c =================================================================== --- 2007-11-13.orig/xen/arch/x86/traps.c 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/arch/x86/traps.c 2007-11-20 15:46:19.000000000 +0100 @@ -412,6 +412,17 @@ static void do_guest_trap( regs->error_code); } +static void instruction_done(struct cpu_user_regs *regs, unsigned long eip) +{ + regs->eip = eip; + regs->eflags &= ~X86_EFLAGS_RF; + if ( regs->eflags & X86_EFLAGS_TF ) + { + current->arch.guest_context.debugreg[6] |= 0xffff4ff0; + do_guest_trap(TRAP_debug, regs, 0); + } +} + /* * Called from asm to set up the NMI trapbounce info. * Returns 0 if no callback is set up, else 1. @@ -657,8 +668,7 @@ static int emulate_forced_invalid_op(str regs->ebx = b; regs->ecx = c; regs->edx = d; - regs->eip = eip; - regs->eflags &= ~X86_EFLAGS_RF; + instruction_done(regs, eip); trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->eip); @@ -1950,8 +1960,7 @@ static int emulate_privileged_op(struct #undef rd_ad done: - regs->eip = eip; - regs->eflags &= ~X86_EFLAGS_RF; + instruction_done(regs, eip); skip: return EXCRET_fault_fixed; @@ -2320,8 +2329,8 @@ static void emulate_gate_op(struct cpu_u else sel |= (regs->cs & 3); - regs->eip = off; regs->cs = sel; + instruction_done(regs, off); #endif } Index: 2007-11-13/xen/arch/x86/x86_emulate.c =================================================================== --- 2007-11-13.orig/xen/arch/x86/x86_emulate.c 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/arch/x86/x86_emulate.c 2007-11-20 15:38:30.000000000 +0100 @@ -1625,6 +1625,7 @@ x86_emulate( /* Commit shadow register state. */ _regs.eflags &= ~EF_RF; *ctxt->regs = _regs; + /* FIXME generate_exception_if(_regs.eflags & EF_TF, EXC_DB); */ done: return rc; Index: 2007-11-13/xen/include/asm-x86/hvm/support.h =================================================================== --- 2007-11-13.orig/xen/include/asm-x86/hvm/support.h 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/include/asm-x86/hvm/support.h 2007-11-20 15:58:09.000000000 +0100 @@ -50,7 +50,7 @@ static inline vcpu_iodata_t *get_ioreq(s #define TYPE_CLTS (2 << 4) #define TYPE_LMSW (3 << 4) -#define VMX_DELIVER_NO_ERROR_CODE -1 +#define HVM_DELIVER_NO_ERROR_CODE -1 #if HVM_DEBUG #define DBG_LEVEL_0 (1 << 0) Index: 2007-11-13/xen/include/asm-x86/hvm/vmx/vmx.h =================================================================== --- 2007-11-13.orig/xen/include/asm-x86/hvm/vmx/vmx.h 2007-11-21 08:47:45.000000000 +0100 +++ 2007-11-13/xen/include/asm-x86/hvm/vmx/vmx.h 2007-11-20 15:58:49.000000000 +0100 @@ -272,7 +272,7 @@ static inline void __vmx_inject_exceptio */ intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap); - if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) { + if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) { __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_fields |= INTR_INFO_DELIVER_CODE_MASK; } @@ -294,13 +294,13 @@ static inline void vmx_inject_hw_excepti static inline void vmx_inject_extint(struct vcpu *v, int trap) { __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR, - VMX_DELIVER_NO_ERROR_CODE); + HVM_DELIVER_NO_ERROR_CODE); } static inline void vmx_inject_nmi(struct vcpu *v) { __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI, - VMX_DELIVER_NO_ERROR_CODE); + HVM_DELIVER_NO_ERROR_CODE); } #endif /* __ASM_X86_HVM_VMX_VMX_H__ */