[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86emul/fuzz: add rudimentary limit checking
fuzz_insn_fetch() is the only data access helper where it is possible to see offsets larger than 4Gb in 16- or 32-bit modes, as we leave the incoming rIP untouched in the emulator itself. The check is needed here as otherwise, after successfully fetching insn bytes, we may end up zero-extending EIP soon after complete_insn, which collides with the X86EMUL_EXCEPTION-conditional respective ASSERT() in x86_emulate_wrapper(). (NB: put_rep_prefix() is what allows complete_insn to be reached with rc set to other than X86EMUL_OKAY or X86EMUL_DONE. See also commit 53f87c03b4 ["x86emul: generalize exception handling for rep_* hooks"].) Add assert()-s for all other (data) access routines, as effective address generation in the emulator ought to guarantee in-range values. For them to not trigger, an adjustment to the emulator's DstBitBase handling is needed. While adding a truncate_ea() invocation there, also convert open coded instances of it. Reported-by: George Dunlap <george.dunlap@xxxxxxxxxx> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Correct system segment related assert()-s. --- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c +++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c @@ -139,7 +139,17 @@ static int fuzz_read( struct x86_emulate_ctxt *ctxt) { /* Reads expected for all user and system segments. */ - assert(is_x86_user_segment(seg) || is_x86_system_segment(seg)); + if ( is_x86_user_segment(seg) ) + assert(ctxt->addr_size == 64 || !(offset >> 32)); + else if ( seg == x86_seg_tr ) + /* + * The TSS is special in that accesses below the segment base are + * possible, as the Interrupt Redirection Bitmap starts 32 bytes + * ahead of the I/O Bitmap, regardless of the value of the latter. + */ + assert((long)offset < 0 ? (long)offset > -32 : !(offset >> 17)); + else + assert(is_x86_system_segment(seg) && !(offset >> 16)); return data_read(ctxt, seg, "read", p_data, bytes); } @@ -162,6 +172,13 @@ static int fuzz_insn_fetch( { assert(seg == x86_seg_cs); + /* Minimal segment limit checking, until full one is being put in place. */ + if ( ctxt->addr_size < 64 && (offset >> 32) ) + { + x86_emul_hw_exception(13, 0, ctxt); + return X86EMUL_EXCEPTION; + } + /* * Zero-length instruction fetches are made at the destination of jumps, * to perform segmentation checks. No data needs returning. @@ -232,6 +249,7 @@ static int fuzz_rep_ins( struct x86_emulate_ctxt *ctxt) { assert(dst_seg == x86_seg_es); + assert(ctxt->addr_size == 64 || !(dst_offset >> 32)); return _fuzz_rep_read(ctxt, "rep_ins", reps); } @@ -247,6 +265,7 @@ static int fuzz_rep_movs( { assert(is_x86_user_segment(src_seg)); assert(dst_seg == x86_seg_es); + assert(ctxt->addr_size == 64 || !((src_offset | dst_offset) >> 32)); return _fuzz_rep_read(ctxt, "rep_movs", reps); } @@ -260,6 +279,7 @@ static int fuzz_rep_outs( struct x86_emulate_ctxt *ctxt) { assert(is_x86_user_segment(src_seg)); + assert(ctxt->addr_size == 64 || !(src_offset >> 32)); return _fuzz_rep_write(ctxt, "rep_outs", reps); } @@ -277,6 +297,7 @@ static int fuzz_rep_stos( * for CLZERO. */ assert(is_x86_user_segment(seg)); + assert(ctxt->addr_size == 64 || !(offset >> 32)); return _fuzz_rep_write(ctxt, "rep_stos", reps); } @@ -290,6 +311,7 @@ static int fuzz_write( { /* Writes not expected for any system segments. */ assert(is_x86_user_segment(seg)); + assert(ctxt->addr_size == 64 || !(offset >> 32)); return maybe_fail(ctxt, "write", true); } @@ -306,8 +328,10 @@ static int fuzz_cmpxchg( * Cmpxchg expected for user segments, and setting accessed/busy bits in * GDT/LDT enties, but not expected for any IDT or TR accesses. */ - assert(is_x86_user_segment(seg) || - seg == x86_seg_gdtr || seg == x86_seg_ldtr); + if ( is_x86_user_segment(seg) ) + assert(ctxt->addr_size == 64 || !(offset >> 32)); + else + assert((seg == x86_seg_gdtr || seg == x86_seg_ldtr) && !(offset >> 16)); return maybe_fail(ctxt, "cmpxchg", true); } @@ -319,6 +343,7 @@ static int fuzz_invlpg( { /* invlpg(), unlike all other hooks, may be called with x86_seg_none. */ assert(is_x86_user_segment(seg) || seg == x86_seg_none); + assert(ctxt->addr_size == 64 || !(offset >> 32)); return maybe_fail(ctxt, "invlpg", false); } --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -1249,10 +1249,10 @@ static void __put_rep_prefix( /* Clip maximum repetitions so that the index register at most just wraps. */ #define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({ \ - unsigned long todo__, ea__ = truncate_word(ea, ad_bytes); \ + unsigned long todo__, ea__ = truncate_ea(ea); \ if ( !(_regs.eflags & X86_EFLAGS_DF) ) \ - todo__ = truncate_word(-(ea), ad_bytes) / (bytes_per_rep); \ - else if ( truncate_word((ea) + (bytes_per_rep) - 1, ad_bytes) < ea__ )\ + todo__ = truncate_ea(-ea__) / (bytes_per_rep); \ + else if ( truncate_ea(ea__ + (bytes_per_rep) - 1) < ea__ ) \ todo__ = 1; \ else \ todo__ = ea__ / (bytes_per_rep) + 1; \ @@ -3128,6 +3128,7 @@ x86_emulate( op_bytes + (((-src.val - 1) >> 3) & ~(op_bytes - 1L)); else ea.mem.off += (src.val >> 3) & ~(op_bytes - 1L); + ea.mem.off = truncate_ea(ea.mem.off); } /* Bit index always truncated to within range. */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |