[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.2-testing] vmx realmode: Emulate protected-mode transition while CS and SS have
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1202291961 0 # Node ID 2f870774a5e2b9d31b27f42fc6b40307253b625a # Parent b01f5d834755e443aa61add49cbdaa37a883f02f vmx realmode: Emulate protected-mode transition while CS and SS have bad selector values (bottom two bits non-zero). Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> xen-unstable changeset: 16979:92734271810aaa32d27fce777684649995fb1665 xen-unstable date: Tue Feb 05 15:45:10 2008 +0000 --- xen/arch/x86/hvm/svm/svm.c | 4 xen/arch/x86/hvm/vmx/realmode.c | 140 ++++++++++++++++++++-------- xen/arch/x86/hvm/vmx/vmx.c | 4 xen/arch/x86/hvm/vmx/x86_32/exits.S | 4 xen/arch/x86/hvm/vmx/x86_64/exits.S | 4 xen/arch/x86/mm/shadow/common.c | 22 ++++ xen/arch/x86/x86_32/asm-offsets.c | 2 xen/arch/x86/x86_64/asm-offsets.c | 2 xen/arch/x86/x86_emulate.c | 175 ++++++++++++++++++++++++++++++++---- xen/include/asm-x86/hvm/vmx/vmcs.h | 10 ++ xen/include/asm-x86/x86_emulate.h | 12 ++ 11 files changed, 314 insertions(+), 65 deletions(-) diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Feb 06 09:59:21 2008 +0000 @@ -547,8 +547,8 @@ static unsigned long svm_get_segment_bas case x86_seg_gdtr: return vmcb->gdtr.base; case x86_seg_idtr: return vmcb->idtr.base; case x86_seg_ldtr: svm_sync_vmcb(v); return vmcb->ldtr.base; - } - BUG(); + default: BUG(); + } return 0; } diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/hvm/vmx/realmode.c --- a/xen/arch/x86/hvm/vmx/realmode.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/realmode.c Wed Feb 06 09:59:21 2008 +0000 @@ -118,6 +118,18 @@ static void realmode_deliver_exception( } } +static uint32_t virtual_to_linear( + enum x86_segment seg, + uint32_t offset, + struct realmode_emulate_ctxt *rm_ctxt) +{ + uint32_t addr = offset; + if ( seg == x86_seg_none ) + return addr; + ASSERT(is_x86_user_segment(seg)); + return addr + rm_ctxt->seg_reg[seg].base; +} + static int realmode_read( enum x86_segment seg, @@ -127,13 +139,16 @@ realmode_read( enum hvm_access_type access_type, struct realmode_emulate_ctxt *rm_ctxt) { - uint32_t addr = rm_ctxt->seg_reg[seg].base + offset; + uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt); *val = 0; - if ( hvm_copy_from_guest_phys(val, addr, bytes) ) + if ( hvm_copy_from_guest_virt_nofault(val, addr, bytes) ) { struct vcpu *curr = current; + + if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) + return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; @@ -202,11 +217,14 @@ realmode_emulate_write( { struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); - uint32_t addr = rm_ctxt->seg_reg[seg].base + offset; - - if ( hvm_copy_to_guest_phys(addr, &val, bytes) ) + uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt); + + if ( hvm_copy_to_guest_virt_nofault(addr, &val, bytes) ) { struct vcpu *curr = current; + + if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) + return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; @@ -244,7 +262,10 @@ realmode_rep_ins( struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); struct vcpu *curr = current; - uint32_t paddr = rm_ctxt->seg_reg[dst_seg].base + dst_offset; + uint32_t paddr = virtual_to_linear(dst_seg, dst_offset, rm_ctxt); + + if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) + return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; @@ -277,7 +298,10 @@ realmode_rep_outs( struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); struct vcpu *curr = current; - uint32_t paddr = rm_ctxt->seg_reg[src_seg].base + src_offset; + uint32_t paddr = virtual_to_linear(src_seg, src_offset, rm_ctxt); + + if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) + return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; @@ -310,9 +334,29 @@ realmode_write_segment( { struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); + struct vcpu *curr = current; + + if ( seg == x86_seg_cs ) + { + if ( reg->attr.fields.dpl != 0 ) + return X86EMUL_UNHANDLEABLE; + curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS; + if ( reg->sel & 3 ) + curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS; + } + + if ( seg == x86_seg_ss ) + { + if ( reg->attr.fields.dpl != 0 ) + return X86EMUL_UNHANDLEABLE; + curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS; + if ( reg->sel & 3 ) + curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS; + rm_ctxt->flags.mov_ss = 1; + } + memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register)); - if ( seg == x86_seg_ss ) - rm_ctxt->flags.mov_ss = 1; + return X86EMUL_OKAY; } @@ -336,7 +380,7 @@ realmode_read_io( if ( !curr->arch.hvm_vmx.real_mode_io_completed ) return X86EMUL_RETRY; - + *val = curr->arch.hvm_vmx.real_mode_io_data; curr->arch.hvm_vmx.real_mode_io_completed = 0; @@ -506,10 +550,18 @@ static int realmode_hlt( static int realmode_inject_hw_exception( uint8_t vector, - struct x86_emulate_ctxt *ctxt) -{ - struct realmode_emulate_ctxt *rm_ctxt = - container_of(ctxt, struct realmode_emulate_ctxt, ctxt); + uint16_t error_code, + struct x86_emulate_ctxt *ctxt) +{ + struct realmode_emulate_ctxt *rm_ctxt = + container_of(ctxt, struct realmode_emulate_ctxt, ctxt); + + /* We don't emulate protected-mode exception delivery. */ + if ( current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) + return X86EMUL_UNHANDLEABLE; + + if ( error_code != 0 ) + return X86EMUL_UNHANDLEABLE; rm_ctxt->exn_vector = vector; rm_ctxt->exn_insn_len = 0; @@ -524,6 +576,10 @@ static int realmode_inject_sw_interrupt( { struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); + + /* We don't emulate protected-mode exception delivery. */ + if ( current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) + return X86EMUL_UNHANDLEABLE; rm_ctxt->exn_vector = vector; rm_ctxt->exn_insn_len = insn_len; @@ -568,12 +624,22 @@ static void realmode_emulate_one(struct struct vcpu *curr = current; u32 new_intr_shadow; int rc, io_completed; - - rm_ctxt->insn_buf_eip = regs->eip; - (void)hvm_copy_from_guest_phys( - rm_ctxt->insn_buf, - (uint32_t)(rm_ctxt->seg_reg[x86_seg_cs].base + regs->eip), - sizeof(rm_ctxt->insn_buf)); + unsigned long addr; + + rm_ctxt->ctxt.addr_size = + rm_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16; + rm_ctxt->ctxt.sp_size = + rm_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16; + + rm_ctxt->insn_buf_eip = (uint32_t)regs->eip; + addr = virtual_to_linear(x86_seg_cs, regs->eip, rm_ctxt); + if ( hvm_fetch_from_guest_virt_nofault(rm_ctxt->insn_buf, addr, + sizeof(rm_ctxt->insn_buf)) + != HVMCOPY_okay ) + { + gdprintk(XENLOG_ERR, "Failed to pre-fetch instruction bytes.\n"); + goto fail; + } rm_ctxt->flag_word = 0; @@ -670,39 +736,35 @@ void vmx_realmode(struct cpu_user_regs * for ( i = 0; i < 10; i++ ) hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]); - rm_ctxt.ctxt.addr_size = - rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16; - rm_ctxt.ctxt.sp_size = - rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16; - rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); if ( curr->arch.hvm_vmx.real_mode_io_in_progress || curr->arch.hvm_vmx.real_mode_io_completed ) realmode_emulate_one(&rm_ctxt); - if ( intr_info & INTR_INFO_VALID_MASK ) + /* Only deliver interrupts into emulated real mode. */ + if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) && + (intr_info & INTR_INFO_VALID_MASK) ) { realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt); __vmwrite(VM_ENTRY_INTR_INFO, 0); } - while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) && + while ( curr->arch.hvm_vmx.vmxemul && !softirq_pending(smp_processor_id()) && - !hvm_local_events_need_delivery(curr) && - !curr->arch.hvm_vmx.real_mode_io_in_progress ) + !curr->arch.hvm_vmx.real_mode_io_in_progress && + /* Check for pending interrupts only in proper real mode. */ + ((curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) || + !hvm_local_events_need_delivery(curr)) ) realmode_emulate_one(&rm_ctxt); - /* - * Cannot enter protected mode with bogus selector RPLs and DPLs. Hence we - * fix up as best we can, even though this deviates from native execution - */ - if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) - { - /* CS.RPL == SS.RPL == SS.DPL == 0. */ - rm_ctxt.seg_reg[x86_seg_cs].sel &= ~3; - rm_ctxt.seg_reg[x86_seg_ss].sel &= ~3; - /* DS,ES,FS,GS: The most uninvasive trick is to set DPL == RPL. */ + if ( !curr->arch.hvm_vmx.vmxemul ) + { + /* + * Cannot enter protected mode with bogus selector RPLs and DPLs. + * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For + * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL. + */ rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl = rm_ctxt.seg_reg[x86_seg_ds].sel & 3; rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl = diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 06 09:59:21 2008 +0000 @@ -1039,6 +1039,10 @@ static void vmx_update_guest_cr(struct v __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device); } + v->arch.hvm_vmx.vmxemul &= ~VMXEMUL_REALMODE; + if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ) + v->arch.hvm_vmx.vmxemul |= VMXEMUL_REALMODE; + v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0] | X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE; diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/hvm/vmx/x86_32/exits.S --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Wed Feb 06 09:59:21 2008 +0000 @@ -116,8 +116,8 @@ ENTRY(vmx_asm_do_vmentry) VMWRITE(UREGS_eflags) #ifndef VMXASSIST - testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%ebx) - jz vmx_goto_realmode + testb $0xff,VCPU_vmx_emul(%ebx) + jnz vmx_goto_realmode #endif cmpb $0,VCPU_vmx_launched(%ebx) diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/hvm/vmx/x86_64/exits.S --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Wed Feb 06 09:59:21 2008 +0000 @@ -135,8 +135,8 @@ ENTRY(vmx_asm_do_vmentry) VMWRITE(UREGS_eflags) #ifndef VMXASSIST - testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%rbx) - jz vmx_goto_realmode + testb $0xff,VCPU_vmx_emul(%rbx) + jnz vmx_goto_realmode #endif cmpb $0,VCPU_vmx_launched(%rbx) diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/mm/shadow/common.c Wed Feb 06 09:59:21 2008 +0000 @@ -176,6 +176,8 @@ hvm_emulate_read(enum x86_segment seg, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; return hvm_read(seg, offset, val, bytes, hvm_access_read, container_of(ctxt, struct sh_emulate_ctxt, ctxt)); } @@ -191,6 +193,8 @@ hvm_emulate_insn_fetch(enum x86_segment container_of(ctxt, struct sh_emulate_ctxt, ctxt); unsigned int insn_off = offset - sh_ctxt->insn_buf_eip; + ASSERT(seg == x86_seg_cs); + /* Fall back if requested bytes are not in the prefetch cache. */ if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) ) return hvm_read(seg, offset, val, bytes, @@ -214,6 +218,9 @@ hvm_emulate_write(enum x86_segment seg, struct vcpu *v = current; unsigned long addr; int rc; + + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; /* How many emulations could we save if we unshadowed on stack writes? */ if ( seg == x86_seg_ss ) @@ -241,6 +248,9 @@ hvm_emulate_cmpxchg(enum x86_segment seg struct vcpu *v = current; unsigned long addr; int rc; + + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; rc = hvm_translate_linear_addr( seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); @@ -266,6 +276,9 @@ hvm_emulate_cmpxchg8b(enum x86_segment s unsigned long addr; int rc; + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; + rc = hvm_translate_linear_addr( seg, offset, 8, hvm_access_write, sh_ctxt, &addr); if ( rc ) @@ -292,6 +305,9 @@ pv_emulate_read(enum x86_segment seg, { unsigned int rc; + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; + *val = 0; if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 ) { @@ -312,6 +328,8 @@ pv_emulate_write(enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_write( v, offset, &val, bytes, sh_ctxt); } @@ -327,6 +345,8 @@ pv_emulate_cmpxchg(enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_cmpxchg( v, offset, old, new, bytes, sh_ctxt); } @@ -343,6 +363,8 @@ pv_emulate_cmpxchg8b(enum x86_segment se struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; + if ( !is_x86_user_segment(seg) ) + return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b( v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt); } diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/x86_32/asm-offsets.c --- a/xen/arch/x86/x86_32/asm-offsets.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Feb 06 09:59:21 2008 +0000 @@ -84,7 +84,7 @@ void __dummy__(void) BLANK(); OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched); - OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]); + OFFSET(VCPU_vmx_emul, struct vcpu, arch.hvm_vmx.vmxemul); OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]); BLANK(); diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/x86_64/asm-offsets.c --- a/xen/arch/x86/x86_64/asm-offsets.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Feb 06 09:59:21 2008 +0000 @@ -103,7 +103,7 @@ void __dummy__(void) BLANK(); OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched); - OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]); + OFFSET(VCPU_vmx_emul, struct vcpu, arch.hvm_vmx.vmxemul); OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]); BLANK(); diff -r b01f5d834755 -r 2f870774a5e2 xen/arch/x86/x86_emulate.c --- a/xen/arch/x86/x86_emulate.c Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/arch/x86/x86_emulate.c Wed Feb 06 09:59:21 2008 +0000 @@ -303,7 +303,11 @@ struct operand { #define EXC_OF 4 #define EXC_BR 5 #define EXC_UD 6 +#define EXC_TS 10 +#define EXC_NP 11 +#define EXC_SS 12 #define EXC_GP 13 +#define EXC_PF 14 /* * Instruction emulation: @@ -500,12 +504,12 @@ do { if ( rc ) goto done; \ } while (0) -#define generate_exception_if(p, e) \ -({ if ( (p) ) { \ - fail_if(ops->inject_hw_exception == NULL); \ - rc = ops->inject_hw_exception(e, ctxt) ? : X86EMUL_EXCEPTION; \ - goto done; \ - } \ +#define generate_exception_if(p, e) \ +({ if ( (p) ) { \ + fail_if(ops->inject_hw_exception == NULL); \ + rc = ops->inject_hw_exception(e, 0, ctxt) ? : X86EMUL_EXCEPTION; \ + goto done; \ + } \ }) /* @@ -774,7 +778,7 @@ in_realmode( } static int -load_seg( +realmode_load_seg( enum x86_segment seg, uint16_t sel, struct x86_emulate_ctxt *ctxt, @@ -783,18 +787,155 @@ load_seg( struct segment_register reg; int rc; - if ( !in_realmode(ctxt, ops) || - (ops->read_segment == NULL) || + if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) + return rc; + + reg.sel = sel; + reg.base = (uint32_t)sel << 4; + + return ops->write_segment(seg, ®, ctxt); +} + +static int +protmode_load_seg( + enum x86_segment seg, + uint16_t sel, + struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + struct segment_register desctab, cs, segr; + struct { uint32_t a, b; } desc; + unsigned long val; + uint8_t dpl, rpl, cpl; + int rc, fault_type = EXC_TS; + + /* NULL selector? */ + if ( (sel & 0xfffc) == 0 ) + { + if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) ) + goto raise_exn; + memset(&segr, 0, sizeof(segr)); + return ops->write_segment(seg, &segr, ctxt); + } + + /* LDT descriptor must be in the GDT. */ + if ( (seg == x86_seg_ldtr) && (sel & 4) ) + goto raise_exn; + + if ( (rc = ops->read_segment(x86_seg_cs, &cs, ctxt)) || + (rc = ops->read_segment((sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, + &desctab, ctxt)) ) + return rc; + + /* Check against descriptor table limit. */ + if ( ((sel & 0xfff8) + 7) > desctab.limit ) + goto raise_exn; + + do { + if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8), + &val, 4, ctxt)) ) + return rc; + desc.a = val; + if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4, + &val, 4, ctxt)) ) + return rc; + desc.b = val; + + /* Segment present in memory? */ + if ( !(desc.b & (1u<<15)) ) + { + fault_type = EXC_NP; + goto raise_exn; + } + + /* LDT descriptor is a system segment. All others are code/data. */ + if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) ) + goto raise_exn; + + dpl = (desc.b >> 13) & 3; + rpl = sel & 3; + cpl = cs.sel & 3; + + switch ( seg ) + { + case x86_seg_cs: + /* Code segment? */ + if ( !(desc.b & (1u<<11)) ) + goto raise_exn; + /* Non-conforming segment: check DPL against RPL. */ + if ( ((desc.b & (6u<<9)) != 6) && (dpl != rpl) ) + goto raise_exn; + break; + case x86_seg_ss: + /* Writable data segment? */ + if ( (desc.b & (5u<<9)) != (1u<<9) ) + goto raise_exn; + if ( (dpl != cpl) || (dpl != rpl) ) + goto raise_exn; + break; + case x86_seg_ldtr: + /* LDT system segment? */ + if ( (desc.b & (15u<<8)) != (2u<<8) ) + goto raise_exn; + goto skip_accessed_flag; + default: + /* Readable code or data segment? */ + if ( (desc.b & (5u<<9)) == (4u<<9) ) + goto raise_exn; + /* Non-conforming segment: check DPL against RPL and CPL. */ + if ( ((desc.b & (6u<<9)) != 6) && ((dpl < cpl) || (dpl < rpl)) ) + goto raise_exn; + break; + } + + /* Ensure Accessed flag is set. */ + rc = ((desc.b & 0x100) ? X86EMUL_OKAY : + ops->cmpxchg( + x86_seg_none, desctab.base + (sel & 0xfff8) + 4, desc.b, + desc.b | 0x100, 4, ctxt)); + } while ( rc == X86EMUL_CMPXCHG_FAILED ); + + if ( rc ) + return rc; + + /* Force the Accessed flag in our local copy. */ + desc.b |= 0x100; + + skip_accessed_flag: + segr.base = (((desc.b << 0) & 0xff000000u) | + ((desc.b << 16) & 0x00ff0000u) | + ((desc.a >> 16) & 0x0000ffffu)); + segr.attr.bytes = (((desc.b >> 8) & 0x00ffu) | + ((desc.b >> 12) & 0x0f00u)); + segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu); + if ( segr.attr.fields.g ) + segr.limit = (segr.limit << 12) | 0xfffu; + segr.sel = sel; + return ops->write_segment(seg, &segr, ctxt); + + raise_exn: + if ( ops->inject_hw_exception == NULL ) + return X86EMUL_UNHANDLEABLE; + if ( (rc = ops->inject_hw_exception(fault_type, sel & 0xfffc, ctxt)) ) + return rc; + return X86EMUL_EXCEPTION; +} + +static int +load_seg( + enum x86_segment seg, + uint16_t sel, + struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + if ( (ops->read_segment == NULL) || (ops->write_segment == NULL) ) return X86EMUL_UNHANDLEABLE; - if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) - return rc; - - reg.sel = sel; - reg.base = (uint32_t)sel << 4; - - return ops->write_segment(seg, ®, ctxt); + if ( in_realmode(ctxt, ops) ) + return realmode_load_seg(seg, sel, ctxt, ops); + + return protmode_load_seg(seg, sel, ctxt, ops); } void * @@ -1858,7 +1999,7 @@ x86_emulate( if ( (_regs.eflags & EFLG_TF) && (rc == X86EMUL_OKAY) && (ops->inject_hw_exception != NULL) ) - rc = ops->inject_hw_exception(EXC_DB, ctxt) ? : X86EMUL_EXCEPTION; + rc = ops->inject_hw_exception(EXC_DB, 0, ctxt) ? : X86EMUL_EXCEPTION; done: return rc; diff -r b01f5d834755 -r 2f870774a5e2 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Feb 06 09:59:21 2008 +0000 @@ -89,10 +89,20 @@ struct arch_vmx_struct { struct vmx_msr_entry *host_msr_area; #ifdef VMXASSIST + unsigned long vmxassist_enabled:1; unsigned long irqbase_mode:1; unsigned char pm_irqbase[2]; + #else + + /* Are we emulating rather than VMENTERing? */ +#define VMXEMUL_REALMODE 1 /* Yes, because CR0.PE == 0 */ +#define VMXEMUL_BAD_CS 2 /* Yes, because CS.RPL != CPL */ +#define VMXEMUL_BAD_SS 4 /* Yes, because SS.RPL != CPL */ + uint8_t vmxemul; + + /* I/O request in flight to device model. */ bool_t real_mode_io_in_progress; bool_t real_mode_io_completed; unsigned long real_mode_io_data; diff -r b01f5d834755 -r 2f870774a5e2 xen/include/asm-x86/x86_emulate.h --- a/xen/include/asm-x86/x86_emulate.h Wed Feb 06 09:58:38 2008 +0000 +++ b/xen/include/asm-x86/x86_emulate.h Wed Feb 06 09:59:21 2008 +0000 @@ -39,8 +39,17 @@ enum x86_segment { x86_seg_tr, x86_seg_ldtr, x86_seg_gdtr, - x86_seg_idtr + x86_seg_idtr, + /* + * Dummy: used to emulate direct processor accesses to management + * structures (TSS, GDT, LDT, IDT, etc.) which use linear addressing + * (no segment component) and bypass usual segment- and page-level + * protection checks. + */ + x86_seg_none }; + +#define is_x86_user_segment(seg) ((unsigned)(seg) <= x86_seg_gs) /* * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the @@ -333,6 +342,7 @@ struct x86_emulate_ops /* inject_hw_exception */ int (*inject_hw_exception)( uint8_t vector, + uint16_t error_code, struct x86_emulate_ctxt *ctxt); /* inject_sw_interrupt */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |