[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: Fail CPU bringup cleanly if it cannot initialise HVM.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1274290815 -3600 # Node ID 095e4b964f990d780cb3896cd1a0cadf804647b0 # Parent f71b641aa327b587886729958cc316ea13ba2181 x86: Fail CPU bringup cleanly if it cannot initialise HVM. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/acpi/power.c | 2 xen/arch/x86/cpu/amd.c | 4 - xen/arch/x86/cpu/intel.c | 2 xen/arch/x86/hvm/hvm.c | 42 +++++++++++---- xen/arch/x86/hvm/svm/svm.c | 51 +++++++----------- xen/arch/x86/hvm/vmx/vmcs.c | 102 +++++++++++++++++++++++++++++-------- xen/arch/x86/hvm/vmx/vmx.c | 28 ++-------- xen/arch/x86/smpboot.c | 33 +++++++++-- xen/include/asm-x86/hvm/hvm.h | 2 xen/include/asm-x86/hvm/support.h | 2 xen/include/asm-x86/hvm/vmx/vmcs.h | 1 11 files changed, 166 insertions(+), 103 deletions(-) diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/acpi/power.c --- a/xen/arch/x86/acpi/power.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/acpi/power.c Wed May 19 18:40:15 2010 +0100 @@ -198,7 +198,7 @@ static int enter_state(u32 state) local_irq_restore(flags); console_end_sync(); acpi_sleep_post(state); - if ( !hvm_cpu_up() ) + if ( hvm_cpu_up() ) BUG(); enable_cpu: diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/cpu/amd.c Wed May 19 18:40:15 2010 +0100 @@ -12,8 +12,6 @@ #include "cpu.h" #include "amd.h" - -void start_svm(struct cpuinfo_x86 *c); /* * Pre-canned values for overriding the CPUID features @@ -516,8 +514,6 @@ static void __devinit init_amd(struct cp disable_c1_ramping(); set_cpuidmask(c); - - start_svm(c); } static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/cpu/intel.c --- a/xen/arch/x86/cpu/intel.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/cpu/intel.c Wed May 19 18:40:15 2010 +0100 @@ -218,8 +218,6 @@ static void __devinit init_intel(struct (cpuid_eax(0x00000006) & (1u<<2))) set_bit(X86_FEATURE_ARAT, c->x86_capability); - start_vmx(); - if ( !use_xsave ) clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability); diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/hvm/hvm.c Wed May 19 18:40:15 2010 +0100 @@ -100,12 +100,35 @@ static struct notifier_block cpu_nfb = { .notifier_call = cpu_callback }; -void hvm_enable(struct hvm_function_table *fns) -{ +static int __init hvm_enable(void) +{ + extern struct hvm_function_table *start_svm(void); + extern struct hvm_function_table *start_vmx(void); extern int hvm_port80_allowed; - BUG_ON(hvm_enabled); - printk("HVM: %s enabled\n", fns->name); + struct hvm_function_table *fns = NULL; + + switch ( boot_cpu_data.x86_vendor ) + { + case X86_VENDOR_INTEL: + fns = start_vmx(); + break; + case X86_VENDOR_AMD: + fns = start_svm(); + break; + default: + break; + } + + if ( fns == NULL ) + return 0; + + hvm_funcs = *fns; + hvm_enabled = 1; + + printk("HVM: %s enabled\n", hvm_funcs.name); + if ( hvm_funcs.hap_supported ) + printk("HVM: Hardware Assisted Paging detected.\n"); /* * Allow direct access to the PC debug ports 0x80 and 0xed (they are @@ -116,14 +139,11 @@ void hvm_enable(struct hvm_function_tabl __clear_bit(0x80, hvm_io_bitmap); __clear_bit(0xed, hvm_io_bitmap); - hvm_funcs = *fns; - hvm_enabled = 1; - - if ( hvm_funcs.hap_supported ) - printk("HVM: Hardware Assisted Paging detected.\n"); - register_cpu_notifier(&cpu_nfb); -} + + return 0; +} +presmp_initcall(hvm_enable); /* * Need to re-inject a given event? We avoid re-injecting software exceptions diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Wed May 19 18:40:15 2010 +0100 @@ -856,22 +856,23 @@ static void svm_init_erratum_383(struct amd_erratum383_found = 1; } -static int svm_cpu_up(struct cpuinfo_x86 *c) +static int svm_cpu_up(void) { u32 eax, edx, phys_hsa_lo, phys_hsa_hi; u64 phys_hsa; - int cpu = smp_processor_id(); + int rc, cpu = smp_processor_id(); + struct cpuinfo_x86 *c = &cpu_data[cpu]; /* Check whether SVM feature is disabled in BIOS */ rdmsr(MSR_K8_VM_CR, eax, edx); if ( eax & K8_VMCR_SVME_DISABLE ) { printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu); - return 0; - } - - if ( svm_cpu_up_prepare(cpu) != 0 ) - return 0; + return -EINVAL; + } + + if ( (rc = svm_cpu_up_prepare(cpu)) != 0 ) + return rc; write_efer(read_efer() | EFER_SVME); @@ -905,39 +906,26 @@ static int svm_cpu_up(struct cpuinfo_x86 else { if ( cpu_has_lmsl ) - printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n"); + printk(XENLOG_WARNING "Inconsistent LMSLE support across CPUs!\n"); cpu_has_lmsl = 0; } #endif - return 1; -} - -void start_svm(struct cpuinfo_x86 *c) -{ - static bool_t bootstrapped; - - if ( test_and_set_bool(bootstrapped) ) - { - if ( hvm_enabled && !svm_cpu_up(c) ) - { - printk("SVM: FATAL: failed to initialise CPU%d!\n", - smp_processor_id()); - BUG(); - } - return; - } - + return 0; +} + +struct hvm_function_table * __init start_svm(void) +{ /* Xen does not fill x86_capability words except 0. */ boot_cpu_data.x86_capability[5] = cpuid_ecx(0x80000001); if ( !test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability) ) - return; - - if ( !svm_cpu_up(c) ) + return NULL; + + if ( svm_cpu_up() ) { printk("SVM: failed to initialise.\n"); - return; + return NULL; } setup_vmcb_dump(); @@ -949,7 +937,7 @@ void start_svm(struct cpuinfo_x86 *c) svm_function_table.hap_1gb_pgtb = (CONFIG_PAGING_LEVELS == 4)? !!(cpuid_edx(0x80000001) & 0x04000000):0; - hvm_enable(&svm_function_table); + return &svm_function_table; } static void svm_do_nested_pgfault(paddr_t gpa) @@ -1402,6 +1390,7 @@ static struct hvm_function_table __read_ .name = "SVM", .cpu_up_prepare = svm_cpu_up_prepare, .cpu_dead = svm_cpu_dead, + .cpu_up = svm_cpu_up, .cpu_down = svm_cpu_down, .domain_initialise = svm_domain_initialise, .domain_destroy = svm_domain_destroy, diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed May 19 18:40:15 2010 +0100 @@ -112,7 +112,14 @@ static u32 adjust_vmx_controls(u32 ctl_m return ctl; } -static void vmx_init_vmcs_config(void) +static bool_t cap_check(const char *name, u32 expected, u32 saw) +{ + if ( saw != expected ) + printk("VMX %s: saw 0x%08x expected 0x%08x\n", name, saw, expected); + return saw != expected; +} + +static int vmx_init_vmcs_config(void) { u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt; u32 _vmx_pin_based_exec_control; @@ -121,6 +128,7 @@ static void vmx_init_vmcs_config(void) u8 ept_super_page_level_limit = 0; u32 _vmx_vmexit_control; u32 _vmx_vmentry_control; + bool_t mismatch = 0; rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high); @@ -227,6 +235,9 @@ static void vmx_init_vmcs_config(void) _vmx_vmentry_control = adjust_vmx_controls( min, opt, MSR_IA32_VMX_ENTRY_CTLS); + if ( smp_processor_id() == 2 ) + vmx_basic_msr_low = 0xdeadbeef; + if ( !vmx_pin_based_exec_control ) { /* First time through. */ @@ -243,27 +254,73 @@ static void vmx_init_vmcs_config(void) else { /* Globals are already initialised: re-check them. */ - BUG_ON(vmcs_revision_id != vmx_basic_msr_low); - BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control); - BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control); - BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control); - BUG_ON(vmx_ept_super_page_level_limit > ept_super_page_level_limit); - BUG_ON(vmx_vmexit_control != _vmx_vmexit_control); - BUG_ON(vmx_vmentry_control != _vmx_vmentry_control); - BUG_ON(cpu_has_vmx_ins_outs_instr_info != - !!(vmx_basic_msr_high & (1U<<22))); + mismatch |= cap_check( + "VMCS revision ID", + vmcs_revision_id, vmx_basic_msr_low); + mismatch |= cap_check( + "Pin-Based Exec Control", + vmx_pin_based_exec_control, _vmx_pin_based_exec_control); + mismatch |= cap_check( + "CPU-Based Exec Control", + vmx_cpu_based_exec_control, _vmx_cpu_based_exec_control); + mismatch |= cap_check( + "Secondary Exec Control", + vmx_secondary_exec_control, _vmx_secondary_exec_control); + mismatch |= cap_check( + "VMExit Control", + vmx_vmexit_control, _vmx_vmexit_control); + mismatch |= cap_check( + "VMEntry Control", + vmx_vmentry_control, _vmx_vmentry_control); + if ( vmx_ept_super_page_level_limit > ept_super_page_level_limit ) + { + printk("EPT Super Page Limit: saw %u expected >= %u\n", + ept_super_page_level_limit, vmx_ept_super_page_level_limit); + mismatch = 1; + } + if ( cpu_has_vmx_ins_outs_instr_info != + !!(vmx_basic_msr_high & (1U<<22)) ) + { + printk("VMX INS/OUTS Instruction Info: saw %d expected %d\n", + !!(vmx_basic_msr_high & (1U<<22)), + cpu_has_vmx_ins_outs_instr_info); + mismatch = 1; + } + if ( mismatch ) + { + printk("VMX: Capabilities fatally differ between CPU%d and CPU0\n", + smp_processor_id()); + return -EINVAL; + } } /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ - BUG_ON((vmx_basic_msr_high & 0x1fff) > PAGE_SIZE); + if ( (vmx_basic_msr_high & 0x1fff) > PAGE_SIZE ) + { + printk("VMX: CPU%d VMCS size is too big (%u bytes)\n", + smp_processor_id(), vmx_basic_msr_high & 0x1fff); + return -EINVAL; + } #ifdef __x86_64__ /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ - BUG_ON(vmx_basic_msr_high & (1u<<16)); + if ( vmx_basic_msr_high & (1u<<16) ) + { + printk("VMX: CPU%d limits VMX structure pointers to 32 bits\n", + smp_processor_id()); + return -EINVAL; + } #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ - BUG_ON(((vmx_basic_msr_high >> 18) & 15) != 6); + if ( ((vmx_basic_msr_high >> 18) & 15) != 6 ) + { + printk("VMX: CPU%d has unexpected VMCS access type %u\n", + smp_processor_id(), (vmx_basic_msr_high >> 18) & 15); + return -EINVAL; + } + + return 0; } static struct vmcs_struct *vmx_alloc_vmcs(void) @@ -359,7 +416,7 @@ int vmx_cpu_up(void) int vmx_cpu_up(void) { u32 eax, edx; - int bios_locked, cpu = smp_processor_id(); + int rc, bios_locked, cpu = smp_processor_id(); u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1; BUG_ON(!(read_cr4() & X86_CR4_VMXE)); @@ -375,7 +432,7 @@ int vmx_cpu_up(void) { printk("CPU%d: some settings of host CR0 are " "not allowed in VMX operation.\n", cpu); - return 0; + return -EINVAL; } rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); @@ -388,7 +445,7 @@ int vmx_cpu_up(void) : IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX)) ) { printk("CPU%d: VMX disabled by BIOS.\n", cpu); - return 0; + return -EINVAL; } } else @@ -400,12 +457,13 @@ int vmx_cpu_up(void) wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0); } - vmx_init_vmcs_config(); + if ( (rc = vmx_init_vmcs_config()) != 0 ) + return rc; INIT_LIST_HEAD(&this_cpu(active_vmcs_list)); - if ( vmx_cpu_up_prepare(cpu) != 0 ) - return 0; + if ( (rc = vmx_cpu_up_prepare(cpu)) != 0 ) + return rc; switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) ) { @@ -419,12 +477,12 @@ int vmx_cpu_up(void) "in your BIOS configuration?\n", cpu); printk(" --> Disable TXT in your BIOS unless using a secure " "bootloader.\n"); - return 0; + return -EINVAL; } /* fall through */ case -1: /* CF==1 or ZF==1 */ printk("CPU%d: unexpected VMXON failure\n", cpu); - return 0; + return -EINVAL; case 0: /* success */ break; default: @@ -438,7 +496,7 @@ int vmx_cpu_up(void) if ( cpu_has_vmx_vpid ) vpid_sync_all(); - return 1; + return 0; } void vmx_cpu_down(void) diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed May 19 18:40:15 2010 +0100 @@ -1413,35 +1413,22 @@ static struct hvm_function_table __read_ .set_rdtsc_exiting = vmx_set_rdtsc_exiting }; -void start_vmx(void) -{ - static bool_t bootstrapped; - +struct hvm_function_table * __init start_vmx(void) +{ vmx_save_host_msrs(); - - if ( test_and_set_bool(bootstrapped) ) - { - if ( hvm_enabled && !vmx_cpu_up() ) - { - printk("VMX: FATAL: failed to initialise CPU%d!\n", - smp_processor_id()); - BUG(); - } - return; - } /* Xen does not fill x86_capability words except 0. */ boot_cpu_data.x86_capability[4] = cpuid_ecx(1); if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) ) - return; + return NULL; set_in_cr4(X86_CR4_VMXE); - if ( !vmx_cpu_up() ) + if ( vmx_cpu_up() ) { printk("VMX: failed to initialise.\n"); - return; + return NULL; } if ( cpu_has_vmx_ept ) @@ -1450,12 +1437,11 @@ void start_vmx(void) setup_ept_dump(); } - vmx_function_table.hap_1gb_pgtb = ( vmx_ept_super_page_level_limit == 2 ) ? - 1 : 0; + vmx_function_table.hap_1gb_pgtb = (vmx_ept_super_page_level_limit == 2); setup_vmcs_dump(); - hvm_enable(&vmx_function_table); + return &vmx_function_table; } /* diff -r f71b641aa327 -r 095e4b964f99 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Wed May 19 18:38:19 2010 +0100 +++ b/xen/arch/x86/smpboot.c Wed May 19 18:40:15 2010 +0100 @@ -71,7 +71,8 @@ u32 x86_cpu_to_apicid[NR_CPUS] __read_mo static void map_cpu_to_logical_apicid(void); -enum cpu_state { +static int cpu_error; +static enum cpu_state { CPU_STATE_DEAD = 0, /* slave -> master: I am completely dead */ CPU_STATE_INIT, /* master -> slave: Early bringup phase 1 */ CPU_STATE_CALLOUT, /* master -> slave: Early bringup phase 2 */ @@ -133,7 +134,8 @@ static void smp_store_cpu_info(int id) void smp_callin(void) { - int i; + unsigned int cpu = smp_processor_id(); + int i, rc; /* Wait 2s total for startup. */ Dprintk("Waiting for CALLOUT.\n"); @@ -155,7 +157,16 @@ void smp_callin(void) map_cpu_to_logical_apicid(); /* Save our processor parameters. */ - smp_store_cpu_info(smp_processor_id()); + smp_store_cpu_info(cpu); + + if ( (rc = hvm_cpu_up()) != 0 ) + { + extern void (*dead_idle) (void); + printk("CPU%d: Failed to initialise HVM. Not coming online.\n", cpu); + cpu_error = rc; + cpu_exit_clear(cpu); + (*dead_idle)(); + } /* Allow the master to continue. */ set_cpu_state(CPU_STATE_CALLIN); @@ -507,7 +518,7 @@ static int do_boot_cpu(int apicid, int c static int do_boot_cpu(int apicid, int cpu) { unsigned long boot_error; - int timeout; + int timeout, rc = 0; unsigned long start_eip; /* @@ -548,8 +559,8 @@ static int do_boot_cpu(int apicid, int c /* Wait 5s total for a response. */ for ( timeout = 0; timeout < 50000; timeout++ ) { - if ( cpu_state == CPU_STATE_CALLIN ) - break; /* It has booted */ + if ( cpu_state != CPU_STATE_CALLOUT ) + break; udelay(100); } @@ -559,6 +570,11 @@ static int do_boot_cpu(int apicid, int c Dprintk("OK.\n"); print_cpu_info(cpu); Dprintk("CPU has booted.\n"); + } + else if ( cpu_state == CPU_STATE_DEAD ) + { + rmb(); + rc = cpu_error; } else { @@ -575,7 +591,10 @@ static int do_boot_cpu(int apicid, int c } if ( boot_error ) + { cpu_exit_clear(cpu); + rc = -EIO; + } /* mark "stuck" area as not stuck */ bootsym(trampoline_cpu_started) = 0; @@ -583,7 +602,7 @@ static int do_boot_cpu(int apicid, int c smpboot_restore_warm_reset_vector(); - return boot_error ? -EIO : 0; + return rc; } void cpu_exit_clear(unsigned int cpu) diff -r f71b641aa327 -r 095e4b964f99 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Wed May 19 18:38:19 2010 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Wed May 19 18:40:15 2010 +0100 @@ -299,7 +299,7 @@ void hvm_set_rdtsc_exiting(struct domain static inline int hvm_cpu_up(void) { - return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 1); + return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0); } static inline void hvm_cpu_down(void) diff -r f71b641aa327 -r 095e4b964f99 xen/include/asm-x86/hvm/support.h --- a/xen/include/asm-x86/hvm/support.h Wed May 19 18:38:19 2010 +0100 +++ b/xen/include/asm-x86/hvm/support.h Wed May 19 18:40:15 2010 +0100 @@ -65,8 +65,6 @@ extern unsigned int opt_hvm_debug_level; #endif extern unsigned long hvm_io_bitmap[]; - -void hvm_enable(struct hvm_function_table *); enum hvm_copy_result { HVMCOPY_okay = 0, diff -r f71b641aa327 -r 095e4b964f99 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed May 19 18:38:19 2010 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed May 19 18:40:15 2010 +0100 @@ -23,7 +23,6 @@ #include <asm/hvm/io.h> #include <asm/hvm/vpmu.h> -extern void start_vmx(void); extern void vmcs_dump_vcpu(struct vcpu *v); extern void setup_vmcs_dump(void); extern int vmx_cpu_up_prepare(unsigned int cpu); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |