[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] x86: correct LDT checks
commit 0f72e5d7608e01a79f26a8601a3ea289fa52589f Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Oct 22 12:05:45 2013 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Oct 22 12:05:45 2013 +0200 x86: correct LDT checks - MMUEXT_SET_LDT should behave as similarly to the LLDT instruction as possible: fail only if the base address is non-canonical - instead LDT descriptor accesses should fault if the descriptor address ends up being non-canonical (by ensuring this we at once avoid reading an entry from the mach-to-phys table and consider it a page table entry) - fault propagation on using LDT selectors must distinguish #PF and #GP (the latter must be raised for a non-canonical descriptor address, which also applies to several other uses of propagate_page_fault(), and hence the problem is being fixed there) - map_ldt_shadow_page() should properly wrap addresses for 32-bit VMs At once remove the odd invokation of map_ldt_shadow_page() from the MMUEXT_SET_LDT handler: There's nothing really telling us that the first LDT page is going to be preferred over others. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> master commit: 40d66baa46ca8a9ffa6df3e063a967d08ec92bcf master date: 2013-10-11 09:28:26 +0200 --- xen/arch/x86/domain.c | 20 ++++++-------------- xen/arch/x86/mm.c | 9 ++++----- xen/arch/x86/traps.c | 29 ++++++++++++++++++++++++----- xen/include/asm-x86/mm.h | 2 +- xen/include/asm-x86/paging.h | 3 ++- xen/include/asm-x86/x86_32/uaccess.h | 1 + 6 files changed, 38 insertions(+), 26 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 89dd3cb..68c435e 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -779,12 +779,7 @@ int arch_set_info_guest( fixup_guest_code_selector(d, c.nat->trap_ctxt[i].cs); } - /* LDT safety checks. */ - if ( ((c.nat->ldt_base & (PAGE_SIZE-1)) != 0) || - (c.nat->ldt_ents > 8192) || - !array_access_ok(c.nat->ldt_base, - c.nat->ldt_ents, - LDT_ENTRY_SIZE) ) + if ( !__addr_ok(c.nat->ldt_base) ) return -EINVAL; } #ifdef CONFIG_COMPAT @@ -798,16 +793,13 @@ int arch_set_info_guest( for ( i = 0; i < 256; i++ ) fixup_guest_code_selector(d, c.cmp->trap_ctxt[i].cs); - - /* LDT safety checks. */ - if ( ((c.cmp->ldt_base & (PAGE_SIZE-1)) != 0) || - (c.cmp->ldt_ents > 8192) || - !compat_array_access_ok(c.cmp->ldt_base, - c.cmp->ldt_ents, - LDT_ENTRY_SIZE) ) - return -EINVAL; } #endif + + /* LDT safety checks. */ + if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) || + (c(ldt_ents) > 8192) ) + return -EINVAL; } v->fpu_initialised = !!(flags & VGCF_I387_VALID); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index fcd7939..df687af 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -660,6 +660,8 @@ int map_ldt_shadow_page(unsigned int off) BUG_ON(unlikely(in_irq())); + if ( is_pv_32bit_domain(d) ) + gva = (u32)gva; guest_get_eff_kern_l1e(v, gva, &l1e); if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) ) return 0; @@ -3495,9 +3497,8 @@ long do_mmuext_op( MEM_LOG("ignoring SET_LDT hypercall from external domain"); okay = 0; } - else if ( ((ptr & (PAGE_SIZE-1)) != 0) || - (ents > 8192) || - !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) ) + else if ( ((ptr & (PAGE_SIZE - 1)) != 0) || !__addr_ok(ptr) || + (ents > 8192) ) { okay = 0; MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents); @@ -3510,8 +3511,6 @@ long do_mmuext_op( curr->arch.pv_vcpu.ldt_base = ptr; curr->arch.pv_vcpu.ldt_ents = ents; load_LDT(curr); - if ( ents != 0 ) - (void)map_ldt_shadow_page(0); } break; } diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 2fc9927..fd8bd8c 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -1107,12 +1107,26 @@ static void reserved_bit_page_fault( show_execution_state(regs); } -void propagate_page_fault(unsigned long addr, u16 error_code) +struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code) { struct trap_info *ti; struct vcpu *v = current; struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce; +#ifdef __x86_64__ + if ( unlikely(!is_canonical_address(addr)) ) + { + ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault]; + tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; + tb->error_code = 0; + tb->cs = ti->cs; + tb->eip = ti->address; + if ( TI_GET_IF(ti) ) + tb->flags |= TBF_INTERRUPT; + return tb; + } +#endif + v->arch.pv_vcpu.ctrlreg[2] = addr; arch_set_cr2(v, addr); @@ -1139,6 +1153,8 @@ void propagate_page_fault(unsigned long addr, u16 error_code) if ( unlikely(error_code & PFEC_reserved_bit) ) reserved_bit_page_fault(addr, guest_cpu_user_regs()); + + return NULL; } static int handle_gdt_ldt_mapping_fault( @@ -1172,13 +1188,16 @@ static int handle_gdt_ldt_mapping_fault( } else { + struct trap_bounce *tb; + /* In hypervisor mode? Leave it to the #PF handler to fix up. */ if ( !guest_mode(regs) ) return 0; - /* In guest mode? Propagate #PF to guest, with adjusted %cr2. */ - propagate_page_fault( - curr->arch.pv_vcpu.ldt_base + offset, - regs->error_code); + /* In guest mode? Propagate fault to guest, with adjusted %cr2. */ + tb = propagate_page_fault(curr->arch.pv_vcpu.ldt_base + offset, + regs->error_code); + if ( tb ) + tb->error_code = ((u16)offset & ~3) | 4; } } else diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index e783d8a..10a543e 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -602,7 +602,7 @@ int new_guest_cr3(unsigned long pfn); void make_cr3(struct vcpu *v, unsigned long mfn); void update_cr3(struct vcpu *v); int vcpu_destroy_pagetables(struct vcpu *); -void propagate_page_fault(unsigned long addr, u16 error_code); +struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code); void *do_page_walk(struct vcpu *v, unsigned long addr); int __sync_local_execstate(void); diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h index c432a97..ee35dff 100644 --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -408,7 +408,8 @@ guest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e) if ( likely(!paging_mode_translate(v->domain)) ) { ASSERT(!paging_mode_external(v->domain)); - if ( __copy_from_user(eff_l1e, + if ( !__addr_ok(addr) || + __copy_from_user(eff_l1e, &__linear_l1_table[l1_linear_offset(addr)], sizeof(l1_pgentry_t)) != 0 ) *(l1_pgentry_t *)eff_l1e = l1e_empty(); diff --git a/xen/include/asm-x86/x86_32/uaccess.h b/xen/include/asm-x86/x86_32/uaccess.h index d6a5230..9ac21c4 100644 --- a/xen/include/asm-x86/x86_32/uaccess.h +++ b/xen/include/asm-x86/x86_32/uaccess.h @@ -15,6 +15,7 @@ :"1" (addr),"g" ((int)(size)),"r" (HYPERVISOR_VIRT_START)); \ flag; }) +#define __addr_ok(addr) (likely((unsigned long)(addr) < HYPERVISOR_VIRT_START)) #define access_ok(addr,size) (likely(__range_not_ok(addr,size) == 0)) #define array_access_ok(addr,count,size) \ -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.2 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |