x86/traps: honor EXT bit in error codes The specification does not explicitly limit the use of this bit to exceptions that can have selector style error codes, so to be on the safe side we should deal with it being set even on error codes formally documented to be always zero (if they're indeed always zero, the change is simply dead code in those cases). Introduce and use (where suitable) X86_XEC_* constants to make the code easier to read. To match the placement of the "hardware_trap" lable, the "hardware_gp" one gets moved slightly too. Signed-off-by: Jan Beulich --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -431,16 +431,16 @@ static enum mce_result mce_action(const /* * Return: - * -1: if system can't be recovered + * 1: if system can't be recovered * 0: Continue to next step */ -static int mce_urgent_action(const struct cpu_user_regs *regs, - mctelem_cookie_t mctc) +static bool_t mce_urgent_action(const struct cpu_user_regs *regs, + mctelem_cookie_t mctc) { uint64_t gstatus; - if ( mctc == NULL) - return 0; + if ( regs->error_code & X86_XEC_EXT ) + return 1; gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS); @@ -455,9 +455,9 @@ static int mce_urgent_action(const struc */ if ( !(gstatus & MCG_STATUS_RIPV) && (!(gstatus & MCG_STATUS_EIPV) || !guest_mode(regs)) ) - return -1; + return 1; - return mce_action(regs, mctc) == MCER_RESET ? -1 : 0; + return mctc && mce_action(regs, mctc) == MCER_RESET; } /* Shared #MC handler. */ --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -618,6 +618,9 @@ static void do_trap(struct cpu_user_regs unsigned int trapnr = regs->entry_vector; unsigned long fixup; + if ( use_error_code && (regs->error_code & X86_XEC_EXT) ) + goto hardware_trap; + DEBUGGER_trap_entry(trapnr, regs); if ( guest_mode(regs) ) @@ -644,6 +647,7 @@ static void do_trap(struct cpu_user_regs return; } + hardware_trap: DEBUGGER_trap_fatal(trapnr, regs); show_execution_state(regs); @@ -1265,13 +1269,14 @@ static int handle_gdt_ldt_mapping_fault( tb = propagate_page_fault(curr->arch.pv_vcpu.ldt_base + offset, regs->error_code); if ( tb ) - tb->error_code = ((u16)offset & ~3) | 4; + tb->error_code = (offset & ~(X86_XEC_EXT | X86_XEC_IDT)) | + X86_XEC_TI; } } else { /* GDT fault: handle the fault as #GP(selector). */ - regs->error_code = (u16)offset & ~7; + regs->error_code = offset & ~(X86_XEC_EXT | X86_XEC_IDT | X86_XEC_TI); (void)do_general_protection(regs); } @@ -3231,7 +3236,7 @@ void do_general_protection(struct cpu_us DEBUGGER_trap_entry(TRAP_gp_fault, regs); - if ( regs->error_code & 1 ) + if ( regs->error_code & X86_XEC_EXT ) goto hardware_gp; if ( !guest_mode(regs) ) @@ -3257,7 +3262,7 @@ void do_general_protection(struct cpu_us * instruction. The DPL specified by the guest OS for these vectors is NOT * CHECKED!! */ - if ( (regs->error_code & 3) == 2 ) + if ( regs->error_code & X86_XEC_IDT ) { /* This fault must be due to instruction. */ const struct trap_info *ti; @@ -3299,9 +3304,9 @@ void do_general_protection(struct cpu_us return; } + hardware_gp: DEBUGGER_trap_fatal(TRAP_gp_fault, regs); - hardware_gp: show_execution_state(regs); panic("GENERAL PROTECTION FAULT\n[error_code=%04x]", regs->error_code); } --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -338,7 +338,7 @@ int80_slow_path: * Setup entry vector and error code as if this was a GPF caused by an * IDT entry with DPL==0. */ - movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp) + movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rsp) SAVE_PRESERVED movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) /* A GPF wouldn't have incremented the instruction pointer. */ --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -143,6 +143,11 @@ #define PFEC_page_paged (1U<<5) #define PFEC_page_shared (1U<<6) +/* Other exception error code values. */ +#define X86_XEC_EXT (_AC(1,U) << 0) +#define X86_XEC_IDT (_AC(1,U) << 1) +#define X86_XEC_TI (_AC(1,U) << 2) + #define XEN_MINIMAL_CR4 (X86_CR4_PGE | X86_CR4_PAE) #define XEN_SYSCALL_MASK (X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| \