[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/hvm: Fix segmentation logic for system segments
commit ce07f6d8cb580a9dbb744f7e92073777b79af22a Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Mar 31 17:03:26 2017 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Thu Apr 6 18:12:59 2017 +0100 x86/hvm: Fix segmentation logic for system segments c/s c785f759718 "x86/emul: Prepare to allow use of system segments for memory references" made alterations to hvm_virtual_to_linear_addr() to allow for the use of system segments. However, the determination of which segmentation mode to use was based on the current address size from emulation. In particular, it is wrong for system segment accesses while executing in a compatibility mode code segment. When long mode is active, all system segments have a 64-bit base, and this must not be truncated during the calculation of the linear address. (Note that the presence and limit checks for system segments behave the same, and are already uniformly applied in both cases.) Replace the existing addr_size parameter with active_cs, which gets used in combination with current to work out which segmentation logic to use. While here, also fix the determination of segmentation to use for vm86 mode, which is a protected mode facility but which uses real mode segmentation. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 6 ++-- xen/arch/x86/hvm/hvm.c | 69 +++++++++++++++++++++-------------------- xen/arch/x86/mm/shadow/common.c | 3 +- xen/include/asm-x86/hvm/hvm.h | 2 +- 4 files changed, 41 insertions(+), 39 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 3d084ca..87ca801 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -549,7 +549,7 @@ static int hvmemul_virtual_to_linear( okay = hvm_virtual_to_linear_addr( seg, reg, offset - (*reps - 1) * bytes_per_rep, *reps * bytes_per_rep, access_type, - hvmemul_ctxt->ctxt.addr_size, linear); + hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear); *linear += (*reps - 1) * bytes_per_rep; if ( hvmemul_ctxt->ctxt.addr_size != 64 ) *linear = (uint32_t)*linear; @@ -558,7 +558,7 @@ static int hvmemul_virtual_to_linear( { okay = hvm_virtual_to_linear_addr( seg, reg, offset, *reps * bytes_per_rep, access_type, - hvmemul_ctxt->ctxt.addr_size, linear); + hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear); } if ( okay ) @@ -2075,7 +2075,7 @@ void hvm_emulate_init_per_insn( hvmemul_ctxt->insn_buf_eip, sizeof(hvmemul_ctxt->insn_buf), hvm_access_insn_fetch, - hvmemul_ctxt->ctxt.addr_size, + &hvmemul_ctxt->seg_reg[x86_seg_cs], &addr) && hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf), diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index dbc3b8a..fdf13db 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2394,9 +2394,10 @@ bool_t hvm_virtual_to_linear_addr( unsigned long offset, unsigned int bytes, enum hvm_access_type access_type, - unsigned int addr_size, + const struct segment_register *active_cs, unsigned long *linear_addr) { + const struct vcpu *curr = current; unsigned long addr = offset, last_byte; bool_t okay = 0; @@ -2408,10 +2409,11 @@ bool_t hvm_virtual_to_linear_addr( */ ASSERT(seg < x86_seg_none); - if ( !(current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ) + if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) || + (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) ) { /* - * REAL MODE: Don't bother with segment access checks. + * REAL/VM86 MODE: Don't bother with segment access checks. * Certain of them are not done in native real mode anyway. */ addr = (uint32_t)(addr + reg->base); @@ -2419,10 +2421,33 @@ bool_t hvm_virtual_to_linear_addr( if ( last_byte < addr ) goto out; } - else if ( addr_size != 64 ) + else if ( hvm_long_mode_active(curr) && + (is_x86_system_segment(seg) || active_cs->attr.fields.l) ) { /* - * COMPATIBILITY MODE: Apply segment checks and add base. + * User segments are always treated as present. System segment may + * not be, and also incur limit checks. + */ + if ( is_x86_system_segment(seg) && + (!reg->attr.fields.p || (offset + bytes - !!bytes) > reg->limit) ) + goto out; + + /* + * LONG MODE: FS, GS and system segments: add segment base. All + * addresses must be canonical. + */ + if ( seg >= x86_seg_fs ) + addr += reg->base; + + last_byte = addr + bytes - !!bytes; + if ( !is_canonical_address(addr) || last_byte < addr || + !is_canonical_address(last_byte) ) + goto out; + } + else + { + /* + * PROTECTED/COMPATIBILITY MODE: Apply segment checks and add base. */ /* @@ -2469,28 +2494,6 @@ bool_t hvm_virtual_to_linear_addr( else if ( (last_byte > reg->limit) || (last_byte < offset) ) goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */ } - else - { - /* - * User segments are always treated as present. System segment may - * not be, and also incur limit checks. - */ - if ( is_x86_system_segment(seg) && - (!reg->attr.fields.p || (offset + bytes - !!bytes) > reg->limit) ) - goto out; - - /* - * LONG MODE: FS, GS and system segments: add segment base. All - * addresses must be canonical. - */ - if ( seg >= x86_seg_fs ) - addr += reg->base; - - last_byte = addr + bytes - !!bytes; - if ( !is_canonical_address(addr) || last_byte < addr || - !is_canonical_address(last_byte) ) - goto out; - } /* All checks ok. */ okay = 1; @@ -3026,11 +3029,12 @@ void hvm_task_switch( if ( errcode >= 0 ) { + struct segment_register cs; unsigned long linear_addr; unsigned int opsz, sp; - hvm_get_segment_register(v, x86_seg_cs, &segr); - opsz = segr.attr.fields.db ? 4 : 2; + hvm_get_segment_register(v, x86_seg_cs, &cs); + opsz = cs.attr.fields.db ? 4 : 2; hvm_get_segment_register(v, x86_seg_ss, &segr); if ( segr.attr.fields.db ) sp = regs->esp -= opsz; @@ -3038,8 +3042,7 @@ void hvm_task_switch( sp = regs->sp -= opsz; if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz, hvm_access_write, - 16 << segr.attr.fields.db, - &linear_addr) ) + &cs, &linear_addr) ) { rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0, &pfinfo); @@ -3619,9 +3622,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs) if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip, sizeof(sig), hvm_access_insn_fetch, - (hvm_long_mode_active(cur) && - cs->attr.fields.l) ? 64 : - cs->attr.fields.db ? 32 : 16, &addr) && + cs, &addr) && (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig), walk, NULL) == HVMCOPY_okay) && (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) ) diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 14a07dd..574337c 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -152,7 +152,8 @@ static int hvm_translate_virtual_addr( return -PTR_ERR(reg); okay = hvm_virtual_to_linear_addr( - seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, linear); + seg, reg, offset, bytes, access_type, + hvm_get_seg_reg(x86_seg_cs, sh_ctxt), linear); if ( !okay ) { diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 49c8001..97f9771 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -472,7 +472,7 @@ bool_t hvm_virtual_to_linear_addr( unsigned long offset, unsigned int bytes, enum hvm_access_type access_type, - unsigned int addr_size, + const struct segment_register *active_cs, unsigned long *linear_addr); void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent, -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |