[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [HVM] Add canonical address checks and generally clean up.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID f35f17d24a23798a0baf1977da6d50dae9865047 # Parent c032103da1012b33fb42f9c35615bc9d3926a8ed [HVM] Add canonical address checks and generally clean up. Based on a patch from Jan Beulich <jbeulich@xxxxxxxxxx> Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/svm/emulate.c | 11 -- xen/arch/x86/hvm/svm/svm.c | 128 +++++++++++++++++----------- xen/arch/x86/hvm/vmx/vmx.c | 172 ++++++++++++++++++++++---------------- xen/arch/x86/mm/shadow/common.c | 11 +- xen/include/asm-x86/hvm/hvm.h | 4 xen/include/asm-x86/x86_32/page.h | 2 xen/include/asm-x86/x86_64/page.h | 2 7 files changed, 199 insertions(+), 131 deletions(-) diff -r c032103da101 -r f35f17d24a23 xen/arch/x86/hvm/svm/emulate.c --- a/xen/arch/x86/hvm/svm/emulate.c Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/arch/x86/hvm/svm/emulate.c Fri Dec 01 11:04:27 2006 +0000 @@ -127,17 +127,6 @@ static inline unsigned long DECODE_GPR_V *size = 0; \ return (unsigned long) -1; \ } - -#if 0 -/* - * hv_is_canonical - checks if the given address is canonical - */ -static inline u64 hv_is_canonical(u64 addr) -{ - u64 bits = addr & (u64)0xffff800000000000; - return (u64)((bits == (u64)0xffff800000000000) || (bits == (u64)0x0)); -} -#endif #define modrm operand [0] diff -r c032103da101 -r f35f17d24a23 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Dec 01 11:04:27 2006 +0000 @@ -269,13 +269,11 @@ static int svm_long_mode_enabled(struct return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state); } -#define IS_CANO_ADDRESS(add) 1 - static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) { u64 msr_content = 0; - struct vcpu *vc = current; - struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb; + struct vcpu *v = current; + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; switch ((u32)regs->ecx) { @@ -284,17 +282,25 @@ static inline int long_mode_do_msr_read( msr_content &= ~EFER_SVME; break; +#ifdef __x86_64__ case MSR_FS_BASE: msr_content = vmcb->fs.base; - break; + goto check_long_mode; case MSR_GS_BASE: msr_content = vmcb->gs.base; - break; + goto check_long_mode; case MSR_SHADOW_GS_BASE: msr_content = vmcb->kerngsbase; - break; + check_long_mode: + if ( !svm_long_mode_enabled(v) ) + { + svm_inject_exception(v, TRAP_gp_fault, 1, 0); + return 0; + } + break; +#endif case MSR_STAR: msr_content = vmcb->star; @@ -326,25 +332,25 @@ static inline int long_mode_do_msr_write static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) { u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); + u32 ecx = regs->ecx; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n", - (u32)regs->ecx, msr_content); - - switch ( (u32)regs->ecx ) + ecx, msr_content); + + switch ( ecx ) { case MSR_EFER: -#ifdef __x86_64__ /* offending reserved bit will cause #GP */ if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) { - printk("Trying to set reserved bit in EFER: %"PRIx64"\n", - msr_content); - svm_inject_exception(v, TRAP_gp_fault, 1, 0); - return 0; - } - + gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " + "EFER: %"PRIx64"\n", msr_content); + goto gp_fault; + } + +#ifdef __x86_64__ /* LME: 0 -> 1 */ if ( msr_content & EFER_LME && !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state)) @@ -353,10 +359,9 @@ static inline int long_mode_do_msr_write !test_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state) ) { - printk("Trying to set LME bit when " - "in paging mode or PAE bit is not set\n"); - svm_inject_exception(v, TRAP_gp_fault, 1, 0); - return 0; + gdprintk(XENLOG_WARNING, "Trying to set LME bit when " + "in paging mode or PAE bit is not set\n"); + goto gp_fault; } set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state); } @@ -371,37 +376,38 @@ static inline int long_mode_do_msr_write vmcb->efer = msr_content | EFER_SVME; break; +#ifdef __x86_64__ case MSR_FS_BASE: case MSR_GS_BASE: + case MSR_SHADOW_GS_BASE: if ( !svm_long_mode_enabled(v) ) - goto exit_and_crash; - - if (!IS_CANO_ADDRESS(msr_content)) - { - HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n"); - svm_inject_exception(v, TRAP_gp_fault, 1, 0); - } - - if (regs->ecx == MSR_FS_BASE) + goto gp_fault; + + if ( !is_canonical_address(msr_content) ) + goto uncanonical_address; + + if ( ecx == MSR_FS_BASE ) vmcb->fs.base = msr_content; - else + else if ( ecx == MSR_GS_BASE ) vmcb->gs.base = msr_content; - break; - - case MSR_SHADOW_GS_BASE: - vmcb->kerngsbase = msr_content; - break; + else + vmcb->kerngsbase = msr_content; + break; +#endif case MSR_STAR: vmcb->star = msr_content; break; case MSR_LSTAR: - vmcb->lstar = msr_content; - break; - case MSR_CSTAR: - vmcb->cstar = msr_content; + if ( !is_canonical_address(msr_content) ) + goto uncanonical_address; + + if ( ecx == MSR_LSTAR ) + vmcb->lstar = msr_content; + else + vmcb->cstar = msr_content; break; case MSR_SYSCALL_MASK: @@ -414,10 +420,11 @@ static inline int long_mode_do_msr_write return 1; - exit_and_crash: - gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx); - domain_crash(v->domain); - return 1; /* handled */ + uncanonical_address: + HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx); + gp_fault: + svm_inject_exception(v, TRAP_gp_fault, 1, 0); + return 0; } @@ -1272,7 +1279,7 @@ static inline int svm_get_io_address( #endif /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. - * l field combined with EFER_LMA -> longmode says whether it's 16 or 64 bit. + * l field combined with EFER_LMA says whether it's 16 or 64 bit. */ asize = (long_mode)?64:((vmcb->cs.attr.fields.db)?32:16); @@ -1383,8 +1390,35 @@ static inline int svm_get_io_address( *addr += seg->base; } - else if (seg == &vmcb->fs || seg == &vmcb->gs) - *addr += seg->base; +#ifdef __x86_64__ + else + { + if (seg == &vmcb->fs || seg == &vmcb->gs) + *addr += seg->base; + + if (!is_canonical_address(*addr) || + !is_canonical_address(*addr + size - 1)) + { + svm_inject_exception(v, TRAP_gp_fault, 1, 0); + return 0; + } + if (*count > (1UL << 48) / size) + *count = (1UL << 48) / size; + if (!(regs->eflags & EF_DF)) + { + if (*addr + *count * size - 1 < *addr || + !is_canonical_address(*addr + *count * size - 1)) + *count = (*addr & ~((1UL << 48) - 1)) / size; + } + else + { + if ((*count - 1) * size > *addr || + !is_canonical_address(*addr + (*count - 1) * size)) + *count = (*addr & ~((1UL << 48) - 1)) / size + 1; + } + ASSERT(*count); + } +#endif return 1; } diff -r c032103da101 -r f35f17d24a23 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Dec 01 11:04:27 2006 +0000 @@ -95,13 +95,7 @@ static void vmx_save_host_msrs(void) rdmsrl(msr_index[i], host_msr_state->msrs[i]); } -#define CASE_READ_MSR(address) \ - case MSR_ ## address: \ - msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_ ## address]; \ - break - -#define CASE_WRITE_MSR(address) \ - case MSR_ ## address: \ +#define WRITE_MSR(address) \ guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content; \ if ( !test_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags) )\ set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags); \ @@ -109,7 +103,6 @@ static void vmx_save_host_msrs(void) set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \ break -#define IS_CANO_ADDRESS(add) 1 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) { u64 msr_content = 0; @@ -123,27 +116,38 @@ static inline int long_mode_do_msr_read( break; case MSR_FS_BASE: - if ( !(vmx_long_mode_enabled(v)) ) - goto exit_and_crash; - msr_content = __vmread(GUEST_FS_BASE); - break; + goto check_long_mode; case MSR_GS_BASE: - if ( !(vmx_long_mode_enabled(v)) ) - goto exit_and_crash; - msr_content = __vmread(GUEST_GS_BASE); - break; + goto check_long_mode; case MSR_SHADOW_GS_BASE: msr_content = guest_msr_state->shadow_gs; - break; - - CASE_READ_MSR(STAR); - CASE_READ_MSR(LSTAR); - CASE_READ_MSR(CSTAR); - CASE_READ_MSR(SYSCALL_MASK); + check_long_mode: + if ( !(vmx_long_mode_enabled(v)) ) + { + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); + return 0; + } + break; + + case MSR_STAR: + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR]; + break; + + case MSR_LSTAR: + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR]; + break; + + case MSR_CSTAR: + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_CSTAR]; + break; + + case MSR_SYSCALL_MASK: + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK]; + break; default: return 0; @@ -155,32 +159,28 @@ static inline int long_mode_do_msr_read( regs->edx = (u32)(msr_content >> 32); return 1; - - exit_and_crash: - gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx); - domain_crash(v->domain); - return 1; /* handled */ } static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) { u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); + u32 ecx = regs->ecx; struct vcpu *v = current; struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state; struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state); HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n", - (u32)regs->ecx, msr_content); - - switch ( (u32)regs->ecx ) { + ecx, msr_content); + + switch ( ecx ) + { case MSR_EFER: /* offending reserved bit will cause #GP */ if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) { - printk("Trying to set reserved bit in EFER: %"PRIx64"\n", - msr_content); - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); - return 0; + gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " + "EFER: %"PRIx64"\n", msr_content); + goto gp_fault; } if ( (msr_content & EFER_LME) @@ -188,9 +188,9 @@ static inline int long_mode_do_msr_write { if ( unlikely(vmx_paging_enabled(v)) ) { - printk("Trying to set EFER.LME with paging enabled\n"); - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); - return 0; + gdprintk(XENLOG_WARNING, + "Trying to set EFER.LME with paging enabled\n"); + goto gp_fault; } } else if ( !(msr_content & EFER_LME) @@ -198,9 +198,9 @@ static inline int long_mode_do_msr_write { if ( unlikely(vmx_paging_enabled(v)) ) { - printk("Trying to clear EFER.LME with paging enabled\n"); - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); - return 0; + gdprintk(XENLOG_WARNING, + "Trying to clear EFER.LME with paging enabled\n"); + goto gp_fault; } } @@ -209,35 +209,40 @@ static inline int long_mode_do_msr_write case MSR_FS_BASE: case MSR_GS_BASE: + case MSR_SHADOW_GS_BASE: if ( !vmx_long_mode_enabled(v) ) - goto exit_and_crash; - - if ( !IS_CANO_ADDRESS(msr_content) ) - { - HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n"); - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); - return 0; - } - - if ( regs->ecx == MSR_FS_BASE ) + goto gp_fault; + + if ( !is_canonical_address(msr_content) ) + goto uncanonical_address; + + if ( ecx == MSR_FS_BASE ) __vmwrite(GUEST_FS_BASE, msr_content); + else if ( ecx == MSR_GS_BASE ) + __vmwrite(GUEST_GS_BASE, msr_content); else - __vmwrite(GUEST_GS_BASE, msr_content); - - break; - - case MSR_SHADOW_GS_BASE: - if ( !(vmx_long_mode_enabled(v)) ) - goto exit_and_crash; - - v->arch.hvm_vmx.msr_state.shadow_gs = msr_content; - wrmsrl(MSR_SHADOW_GS_BASE, msr_content); - break; - - CASE_WRITE_MSR(STAR); - CASE_WRITE_MSR(LSTAR); - CASE_WRITE_MSR(CSTAR); - CASE_WRITE_MSR(SYSCALL_MASK); + { + v->arch.hvm_vmx.msr_state.shadow_gs = msr_content; + wrmsrl(MSR_SHADOW_GS_BASE, msr_content); + } + + break; + + case MSR_STAR: + WRITE_MSR(STAR); + + case MSR_LSTAR: + if ( !is_canonical_address(msr_content) ) + goto uncanonical_address; + WRITE_MSR(LSTAR); + + case MSR_CSTAR: + if ( !is_canonical_address(msr_content) ) + goto uncanonical_address; + WRITE_MSR(CSTAR); + + case MSR_SYSCALL_MASK: + WRITE_MSR(SYSCALL_MASK); default: return 0; @@ -245,10 +250,11 @@ static inline int long_mode_do_msr_write return 1; - exit_and_crash: - gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx); - domain_crash(v->domain); - return 1; /* handled */ + uncanonical_address: + HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx); + gp_fault: + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); + return 0; } /* @@ -1283,6 +1289,32 @@ static void vmx_io_instruction(unsigned ASSERT(count); } } +#ifdef __x86_64__ + else + { + if ( !is_canonical_address(addr) || + !is_canonical_address(addr + size - 1) ) + { + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); + return; + } + if ( count > (1UL << 48) / size ) + count = (1UL << 48) / size; + if ( !(regs->eflags & EF_DF) ) + { + if ( addr + count * size - 1 < addr || + !is_canonical_address(addr + count * size - 1) ) + count = (addr & ~((1UL << 48) - 1)) / size; + } + else + { + if ( (count - 1) * size > addr || + !is_canonical_address(addr + (count - 1) * size) ) + count = (addr & ~((1UL << 48) - 1)) / size + 1; + } + ASSERT(count); + } +#endif /* * Handle string pio instructions that cross pages or that diff -r c032103da101 -r f35f17d24a23 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/arch/x86/mm/shadow/common.c Fri Dec 01 11:04:27 2006 +0000 @@ -120,12 +120,17 @@ static int hvm_translate_linear_addr( */ addr = (uint32_t)(addr + dreg.base); } - else if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) ) + else { /* - * LONG MODE: FS and GS add a segment base. + * LONG MODE: FS and GS add segment base. Addresses must be canonical. */ - addr += dreg.base; + + if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) ) + addr += dreg.base; + + if ( !is_canonical_address(addr) ) + goto gpf; } *paddr = addr; diff -r c032103da101 -r f35f17d24a23 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/include/asm-x86/hvm/hvm.h Fri Dec 01 11:04:27 2006 +0000 @@ -157,11 +157,15 @@ hvm_paging_enabled(struct vcpu *v) return hvm_funcs.paging_enabled(v); } +#ifdef __x86_64__ static inline int hvm_long_mode_enabled(struct vcpu *v) { return hvm_funcs.long_mode_enabled(v); } +#else +#define hvm_long_mode_enabled(v) 0 +#endif static inline int hvm_pae_enabled(struct vcpu *v) diff -r c032103da101 -r f35f17d24a23 xen/include/asm-x86/x86_32/page.h --- a/xen/include/asm-x86/x86_32/page.h Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/include/asm-x86/x86_32/page.h Fri Dec 01 11:04:27 2006 +0000 @@ -6,6 +6,8 @@ #define VADDR_BITS 32 #define VADDR_MASK (~0UL) + +#define is_canonical_address(x) 1 #include <xen/config.h> #ifdef CONFIG_X86_PAE diff -r c032103da101 -r f35f17d24a23 xen/include/asm-x86/x86_64/page.h --- a/xen/include/asm-x86/x86_64/page.h Fri Dec 01 10:47:57 2006 +0000 +++ b/xen/include/asm-x86/x86_64/page.h Fri Dec 01 11:04:27 2006 +0000 @@ -23,6 +23,8 @@ #define VADDR_BITS 48 #define PADDR_MASK ((1UL << PADDR_BITS)-1) #define VADDR_MASK ((1UL << VADDR_BITS)-1) + +#define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63)) #ifndef __ASSEMBLY__ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |