[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] x86emul: enforce privilege level restrictions when loading CS
commit 301bd3e8c53f2478da537a223386e153d046a063 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Nov 18 14:33:55 2014 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Nov 18 14:33:55 2014 +0100 x86emul: enforce privilege level restrictions when loading CS Privilege level checks were basically missing for the CS case, the only check that was done (RPL == DPL for nonconforming segments) was solely covering a single special case (return to non-conforming segment). Additionally in long mode the L bit set requires the D bit to be clear, as was recently pointed out for KVM by Nadav Amit <namit@xxxxxxxxxxxxxxxxx>. Finally we also need to force the loaded selector's RPL to CPL (at least as long as lret/retf emulation doesn't support privilege level changes). This is CVE-2014-8595 / XSA-110. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Tim Deegan <tim@xxxxxxx> --- xen/arch/x86/x86_emulate/x86_emulate.c | 42 +++++++++++++++++++++---------- 1 files changed, 28 insertions(+), 14 deletions(-) diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index 45a39b6..6480a27 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -1107,7 +1107,7 @@ realmode_load_seg( static int protmode_load_seg( enum x86_segment seg, - uint16_t sel, + uint16_t sel, bool_t is_ret, struct x86_emulate_ctxt *ctxt, const struct x86_emulate_ops *ops) { @@ -1179,9 +1179,23 @@ protmode_load_seg( /* Code segment? */ if ( !(desc.b & (1u<<11)) ) goto raise_exn; - /* Non-conforming segment: check DPL against RPL. */ - if ( ((desc.b & (6u<<9)) != (6u<<9)) && (dpl != rpl) ) + if ( is_ret + ? /* + * Really rpl < cpl, but our sole caller doesn't handle + * privilege level changes. + */ + rpl != cpl || (desc.b & (1 << 10) ? dpl > rpl : dpl != rpl) + : desc.b & (1 << 10) + /* Conforming segment: check DPL against CPL. */ + ? dpl > cpl + /* Non-conforming segment: check RPL and DPL against CPL. */ + : rpl > cpl || dpl != cpl ) + goto raise_exn; + /* 64-bit code segments (L bit set) must have D bit clear. */ + if ( in_longmode(ctxt, ops) && + (desc.b & (1 << 21)) && (desc.b & (1 << 22)) ) goto raise_exn; + sel = (sel ^ rpl) | cpl; break; case x86_seg_ss: /* Writable data segment? */ @@ -1246,7 +1260,7 @@ protmode_load_seg( static int load_seg( enum x86_segment seg, - uint16_t sel, + uint16_t sel, bool_t is_ret, struct x86_emulate_ctxt *ctxt, const struct x86_emulate_ops *ops) { @@ -1255,7 +1269,7 @@ load_seg( return X86EMUL_UNHANDLEABLE; if ( in_protmode(ctxt, ops) ) - return protmode_load_seg(seg, sel, ctxt, ops); + return protmode_load_seg(seg, sel, is_ret, ctxt, ops); return realmode_load_seg(seg, sel, ctxt, ops); } @@ -1852,7 +1866,7 @@ x86_emulate( if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &dst.val, op_bytes, ctxt, ops)) != 0 ) goto done; - if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 ) + if ( (rc = load_seg(src.val, dst.val, 0, ctxt, ops)) != 0 ) return rc; break; @@ -2225,7 +2239,7 @@ x86_emulate( enum x86_segment seg = decode_segment(modrm_reg); generate_exception_if(seg == decode_segment_failed, EXC_UD, -1); generate_exception_if(seg == x86_seg_cs, EXC_UD, -1); - if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 ) + if ( (rc = load_seg(seg, src.val, 0, ctxt, ops)) != 0 ) goto done; if ( seg == x86_seg_ss ) ctxt->retire.flags.mov_ss = 1; @@ -2306,7 +2320,7 @@ x86_emulate( &_regs.eip, op_bytes, ctxt)) ) goto done; - if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 ) + if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 ) goto done; _regs.eip = eip; break; @@ -2529,7 +2543,7 @@ x86_emulate( if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes, &sel, 2, ctxt, ops)) != 0 ) goto done; - if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 ) + if ( (rc = load_seg(dst.val, sel, 0, ctxt, ops)) != 0 ) goto done; dst.val = src.val; break; @@ -2603,7 +2617,7 @@ x86_emulate( &dst.val, op_bytes, ctxt, ops)) || (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset), &src.val, op_bytes, ctxt, ops)) || - (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) ) + (rc = load_seg(x86_seg_cs, src.val, 1, ctxt, ops)) ) goto done; _regs.eip = dst.val; break; @@ -2650,7 +2664,7 @@ x86_emulate( _regs.eflags &= mask; _regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02; _regs.eip = eip; - if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 ) + if ( (rc = load_seg(x86_seg_cs, cs, 1, ctxt, ops)) != 0 ) goto done; break; } @@ -3280,7 +3294,7 @@ x86_emulate( generate_exception_if(mode_64bit(), EXC_UD, -1); eip = insn_fetch_bytes(op_bytes); sel = insn_fetch_type(uint16_t); - if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 ) + if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 ) goto done; _regs.eip = eip; break; @@ -3593,7 +3607,7 @@ x86_emulate( goto done; } - if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 ) + if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 ) goto done; _regs.eip = dst.val; @@ -3674,7 +3688,7 @@ x86_emulate( generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1); generate_exception_if(!mode_ring0(), EXC_GP, 0); if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr, - src.val, ctxt, ops)) != 0 ) + src.val, 0, ctxt, ops)) != 0 ) goto done; break; -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.2 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |