[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: Enable Supervisor Mode Execution Protection (SMEP)
# HG changeset patch # User Keir Fraser <keir@xxxxxxx> # Date 1307133540 -3600 # Node ID 0c0884fd8b494932a4b707e339cbe1b881d09103 # Parent bcd2476c2e2d00dc6371e52fbff66fe3178b7944 x86: Enable Supervisor Mode Execution Protection (SMEP) Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP prevents software operating with CPL < 3 (supervisor mode) from fetching instructions from any linear address with a valid translation for which the U/S flag (bit 2) is 1 in every paging-structure entry controlling the translation for the linear address. This patch enables SMEP in Xen to protect Xen hypervisor from executing pv guest instructions, whose translation paging-structure entries' U/S flags are all set. Signed-off-by: Yang Wei <wei.y.yang@xxxxxxxxx> Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx> Signed-off-by: Li Xin <xin.li@xxxxxxxxx> Signed-off-by: Keir Fraser <keir@xxxxxxx> --- diff -r bcd2476c2e2d -r 0c0884fd8b49 xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Fri Jun 03 17:27:01 2011 +0100 +++ b/xen/arch/x86/setup.c Fri Jun 03 21:39:00 2011 +0100 @@ -57,6 +57,10 @@ static bool_t __initdata opt_watchdog; boolean_param("watchdog", opt_watchdog); +/* smep: Enable/disable Supervisor Mode Execution Protection (default on). */ +static bool_t __initdata disable_smep; +invbool_param("smep", disable_smep); + /* **** Linux config option: propagated to domain0. */ /* "acpi=off": Sisables both ACPI table parsing and interpreter. */ /* "acpi=force": Override the disable blacklist. */ @@ -1200,11 +1204,17 @@ arch_init_memory(); identify_cpu(&boot_cpu_data); + if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR); if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT); + if ( disable_smep ) + setup_clear_cpu_cap(X86_FEATURE_SMEP); + if ( cpu_has_smep ) + set_in_cr4(X86_CR4_SMEP); + local_irq_enable(); #ifdef CONFIG_X86_64 diff -r bcd2476c2e2d -r 0c0884fd8b49 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Fri Jun 03 17:27:01 2011 +0100 +++ b/xen/arch/x86/traps.c Fri Jun 03 21:39:00 2011 +0100 @@ -1139,7 +1139,13 @@ (((va) >= HYPERVISOR_VIRT_START)) #endif -static int __spurious_page_fault( +enum pf_type { + real_fault, + smep_fault, + spurious_fault +}; + +static enum pf_type __page_fault_type( unsigned long addr, unsigned int error_code) { unsigned long mfn, cr3 = read_cr3(); @@ -1151,7 +1157,7 @@ #endif l2_pgentry_t l2e, *l2t; l1_pgentry_t l1e, *l1t; - unsigned int required_flags, disallowed_flags; + unsigned int required_flags, disallowed_flags, page_user; /* * We do not take spurious page faults in IRQ handlers as we do not @@ -1159,11 +1165,11 @@ * map_domain_page() is not IRQ-safe. */ if ( in_irq() ) - return 0; + return real_fault; /* Reserved bit violations are never spurious faults. */ if ( error_code & PFEC_reserved_bit ) - return 0; + return real_fault; required_flags = _PAGE_PRESENT; if ( error_code & PFEC_write_access ) @@ -1175,6 +1181,8 @@ if ( error_code & PFEC_insn_fetch ) disallowed_flags |= _PAGE_NX_BIT; + page_user = _PAGE_USER; + mfn = cr3 >> PAGE_SHIFT; #if CONFIG_PAGING_LEVELS >= 4 @@ -1184,7 +1192,8 @@ unmap_domain_page(l4t); if ( ((l4e_get_flags(l4e) & required_flags) != required_flags) || (l4e_get_flags(l4e) & disallowed_flags) ) - return 0; + return real_fault; + page_user &= l4e_get_flags(l4e); #endif #if CONFIG_PAGING_LEVELS >= 3 @@ -1197,13 +1206,14 @@ unmap_domain_page(l3t); #if CONFIG_PAGING_LEVELS == 3 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) - return 0; + return real_fault; #else if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) || (l3e_get_flags(l3e) & disallowed_flags) ) - return 0; + return real_fault; + page_user &= l3e_get_flags(l3e); if ( l3e_get_flags(l3e) & _PAGE_PSE ) - return 1; + goto leaf; #endif #endif @@ -1213,9 +1223,10 @@ unmap_domain_page(l2t); if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) || (l2e_get_flags(l2e) & disallowed_flags) ) - return 0; + return real_fault; + page_user &= l2e_get_flags(l2e); if ( l2e_get_flags(l2e) & _PAGE_PSE ) - return 1; + goto leaf; l1t = map_domain_page(mfn); l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]); @@ -1223,26 +1234,36 @@ unmap_domain_page(l1t); if ( ((l1e_get_flags(l1e) & required_flags) != required_flags) || (l1e_get_flags(l1e) & disallowed_flags) ) - return 0; - - return 1; + return real_fault; + page_user &= l1e_get_flags(l1e); + +leaf: + /* + * Supervisor Mode Execution Protection (SMEP): + * Disallow supervisor execution from user-accessible mappings + */ + if ( (read_cr4() & X86_CR4_SMEP) && page_user && + ((error_code & (PFEC_insn_fetch|PFEC_user_mode)) == PFEC_insn_fetch) ) + return smep_fault; + + return spurious_fault; } -static int spurious_page_fault( +static enum pf_type spurious_page_fault( unsigned long addr, unsigned int error_code) { unsigned long flags; - int is_spurious; + enum pf_type pf_type; /* * Disabling interrupts prevents TLB flushing, and hence prevents * page tables from becoming invalid under our feet during the walk. */ local_irq_save(flags); - is_spurious = __spurious_page_fault(addr, error_code); + pf_type = __page_fault_type(addr, error_code); local_irq_restore(flags); - return is_spurious; + return pf_type; } static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs) @@ -1317,6 +1338,7 @@ { unsigned long addr, fixup; unsigned int error_code; + enum pf_type pf_type; addr = read_cr2(); @@ -1332,7 +1354,9 @@ if ( unlikely(!guest_mode(regs)) ) { - if ( spurious_page_fault(addr, error_code) ) + pf_type = spurious_page_fault(addr, error_code); + BUG_ON(pf_type == smep_fault); + if ( pf_type != real_fault ) return; if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) @@ -1354,9 +1378,17 @@ error_code, _p(addr)); } - if ( unlikely(current->domain->arch.suppress_spurious_page_faults - && spurious_page_fault(addr, error_code)) ) - return; + if ( unlikely(current->domain->arch.suppress_spurious_page_faults) ) + { + pf_type = spurious_page_fault(addr, error_code); + if ( pf_type == smep_fault ) + { + gdprintk(XENLOG_ERR, "Fatal SMEP fault\n"); + domain_crash(current->domain); + } + if ( pf_type != real_fault ) + return; + } propagate_page_fault(addr, regs->error_code); } diff -r bcd2476c2e2d -r 0c0884fd8b49 xen/include/asm-x86/cpufeature.h --- a/xen/include/asm-x86/cpufeature.h Fri Jun 03 17:27:01 2011 +0100 +++ b/xen/include/asm-x86/cpufeature.h Fri Jun 03 21:39:00 2011 +0100 @@ -141,8 +141,9 @@ #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */ #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */ +#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) @@ -202,6 +203,8 @@ #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE) #endif +#define cpu_has_smep boot_cpu_has(X86_FEATURE_SMEP) + #define cpu_has_ffxsr ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \ && boot_cpu_has(X86_FEATURE_FFXSR)) diff -r bcd2476c2e2d -r 0c0884fd8b49 xen/include/asm-x86/domain.h --- a/xen/include/asm-x86/domain.h Fri Jun 03 17:27:01 2011 +0100 +++ b/xen/include/asm-x86/domain.h Fri Jun 03 21:39:00 2011 +0100 @@ -527,12 +527,14 @@ /* Convert between guest-visible and real CR4 values. */ #define pv_guest_cr4_to_real_cr4(v) \ (((v)->arch.pv_vcpu.ctrlreg[4] \ - | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)) \ - | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0) \ - | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0)) \ - & ~X86_CR4_DE) -#define real_cr4_to_pv_guest_cr4(c) \ - ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE)) + | (mmu_cr4_features \ + & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP)) \ + | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0) \ + | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0)) \ + & ~X86_CR4_DE) +#define real_cr4_to_pv_guest_cr4(c) \ + ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD \ + | X86_CR4_OSXSAVE | X86_CR4_SMEP)) void domain_cpuid(struct domain *d, unsigned int input, diff -r bcd2476c2e2d -r 0c0884fd8b49 xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h Fri Jun 03 17:27:01 2011 +0100 +++ b/xen/include/asm-x86/processor.h Fri Jun 03 21:39:00 2011 +0100 @@ -85,6 +85,7 @@ #define X86_CR4_SMXE 0x4000 /* enable SMX */ #define X86_CR4_FSGSBASE 0x10000 /* enable {rd,wr}{fs,gs}base */ #define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */ +#define X86_CR4_SMEP 0x100000/* enable SMEP */ /* * Trap/fault mnemonics. _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |