[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.6] x86/segment: Bounds check accesses to emulation ctxt->seg_reg[]
commit 6b5bb502a93bcb7617ea1f3f5a8712f2b9f33d90 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Mon Sep 12 16:00:56 2016 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Sep 12 16:00:56 2016 +0200 x86/segment: Bounds check accesses to emulation ctxt->seg_reg[] HVM HAP codepaths have space for all segment registers in the seg_reg[] cache (with x86_seg_none still risking an array overrun), while the shadow codepaths only have space for the user segments. Range check the input segment of *_get_seg_reg() against the size of the array used to cache the results, to avoid overruns in the case that the callers don't filter their input suitably. Subsume the is_x86_user_segment(seg) checks from the shadow code, which were an incomplete attempt at range checking, and are now superceeded. Make hvm_get_seg_reg() static, as it is not used outside of shadow/common.c No functional change, but far easier to reason that no overflow is possible. Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> xen/x86: Fix build with clang following c/s 4fa0105 https://travis-ci.org/xen-project/xen/jobs/158494027#L2344 Clang complains: emulate.c:2016:14: error: comparison of unsigned enum expression < 0 is always false [-Werror,-Wtautological-compare] if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) ) ~~~ ^ ~ Clang is wrong to raise a warning like this. The signed-ness of an enum is implementation defined in C, and robust code must not assume the choices made by the compiler. In this case, dropping the < 0 check creates a latent bug which would result in an array underflow when compiled with a compiler which chooses a signed enum. Work around the bug by explicitly pulling seg into an unsigned integer, and only perform the upper bounds check. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx> master commit: 4fa0105d95be6e7145a1f6fd1036ccd43976228c master date: 2016-09-08 16:39:46 +0100 master commit: 4c47c47938ea24c73d9459f9f0b6923513772b5d master date: 2016-09-09 15:31:01 +0100 --- xen/arch/x86/hvm/emulate.c | 24 +++++++++++++++++++++--- xen/arch/x86/mm/shadow/common.c | 31 +++++++++++++++---------------- xen/include/asm-x86/hvm/emulate.h | 1 + 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 848f46e..771cb45 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -526,6 +526,8 @@ static int hvmemul_virtual_to_linear( ? 1 : 4096); reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); + if ( IS_ERR(reg) ) + return -PTR_ERR(reg); if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) ) { @@ -1360,6 +1362,10 @@ static int hvmemul_read_segment( struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); + + if ( IS_ERR(sreg) ) + return -PTR_ERR(sreg); + memcpy(reg, sreg, sizeof(struct segment_register)); return X86EMUL_OKAY; } @@ -1373,6 +1379,9 @@ static int hvmemul_write_segment( container_of(ctxt, struct hvm_emulate_ctxt, ctxt); struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); + if ( IS_ERR(sreg) ) + return -PTR_ERR(sreg); + memcpy(sreg, reg, sizeof(struct segment_register)); __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty); @@ -1911,13 +1920,22 @@ void hvm_emulate_writeback( } } +/* + * Callers which pass a known in-range x86_segment can rely on the return + * pointer being valid. Other callers must explicitly check for errors. + */ struct segment_register *hvmemul_get_seg_reg( enum x86_segment seg, struct hvm_emulate_ctxt *hvmemul_ctxt) { - if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) ) - hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]); - return &hvmemul_ctxt->seg_reg[seg]; + unsigned int idx = seg; + + if ( idx >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) ) + return ERR_PTR(-X86EMUL_UNHANDLEABLE); + + if ( !__test_and_set_bit(idx, &hvmemul_ctxt->seg_reg_accessed) ) + hvm_get_segment_register(current, idx, &hvmemul_ctxt->seg_reg[idx]); + return &hvmemul_ctxt->seg_reg[idx]; } static const char *guest_x86_mode_to_str(int mode) diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 83f6938..74684cb 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -125,12 +125,22 @@ __initcall(shadow_audit_key_init); /* x86 emulator support for the shadow code */ +/* + * Callers which pass a known in-range x86_segment can rely on the return + * pointer being valid. Other callers must explicitly check for errors. + */ struct segment_register *hvm_get_seg_reg( enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt) { - struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg]; - if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) ) - hvm_get_segment_register(current, seg, seg_reg); + unsigned int idx = seg; + struct segment_register *seg_reg; + + if ( idx >= ARRAY_SIZE(sh_ctxt->seg_reg) ) + return ERR_PTR(-X86EMUL_UNHANDLEABLE); + + seg_reg = &sh_ctxt->seg_reg[idx]; + if ( !__test_and_set_bit(idx, &sh_ctxt->valid_seg_regs) ) + hvm_get_segment_register(current, idx, seg_reg); return seg_reg; } @@ -145,14 +155,9 @@ static int hvm_translate_linear_addr( struct segment_register *reg; int okay; - /* - * Can arrive here with non-user segments. However, no such cirucmstance - * is part of a legitimate pagetable update, so fail the emulation. - */ - if ( !is_x86_user_segment(seg) ) - return X86EMUL_UNHANDLEABLE; - reg = hvm_get_seg_reg(seg, sh_ctxt); + if ( IS_ERR(reg) ) + return -PTR_ERR(reg); okay = hvm_virtual_to_linear_addr( seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); @@ -254,9 +259,6 @@ hvm_emulate_write(enum x86_segment seg, unsigned long addr; int rc; - if ( !is_x86_user_segment(seg) ) - return X86EMUL_UNHANDLEABLE; - /* How many emulations could we save if we unshadowed on stack writes? */ if ( seg == x86_seg_ss ) perfc_incr(shadow_fault_emulate_stack); @@ -284,9 +286,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg, unsigned long addr, old[2], new[2]; int rc; - if ( !is_x86_user_segment(seg) ) - return X86EMUL_UNHANDLEABLE; - rc = hvm_translate_linear_addr( seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); if ( rc ) diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h index 142d1b6..3aabcbe 100644 --- a/xen/include/asm-x86/hvm/emulate.h +++ b/xen/include/asm-x86/hvm/emulate.h @@ -13,6 +13,7 @@ #define __ASM_X86_HVM_EMULATE_H__ #include <xen/config.h> +#include <xen/err.h> #include <asm/hvm/hvm.h> #include <asm/x86_emulate.h> -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.6 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |