[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [HVM][VMX] Move vmcs and I/O bitmap allocation into
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID 82f481bda1c7c1c82545fb9d4aa0bab1509ab602 # Parent 4d2354be4aa691fc45ec8a33d3cc8e10b2b21a7c [HVM][VMX] Move vmcs and I/O bitmap allocation into vmx_initialise_guest_resources(). Signed-off-by: Xin B Li <xin.b.li@xxxxxxxxx> Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmcs.c | 195 ++++++++++++++++++------------------- xen/arch/x86/hvm/vmx/vmx.c | 142 ++++++++++++++++---------- xen/include/asm-x86/hvm/vmx/vmcs.h | 15 +- 3 files changed, 196 insertions(+), 156 deletions(-) diff -r 4d2354be4aa6 -r 82f481bda1c7 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Jul 05 10:32:33 2006 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Jul 05 11:21:19 2006 +0100 @@ -41,34 +41,52 @@ #include <asm/shadow_64.h> #endif -int vmcs_size; - -struct vmcs_struct *vmx_alloc_vmcs(void) +static int vmcs_size; +static int vmcs_order; +static u32 vmcs_revision_id; + +void vmx_init_vmcs_config(void) +{ + u32 vmx_msr_low, vmx_msr_high; + + if ( vmcs_size ) + return; + + rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); + + vmcs_revision_id = vmx_msr_low; + + vmcs_size = vmx_msr_high & 0x1fff; + vmcs_order = get_order_from_bytes(vmcs_size); +} + +static struct vmcs_struct *vmx_alloc_vmcs(void) { struct vmcs_struct *vmcs; - u32 vmx_msr_low, vmx_msr_high; - - rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); - vmcs_size = vmx_msr_high & 0x1fff; - vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); - memset((char *)vmcs, 0, vmcs_size); /* don't remove this */ - - vmcs->vmcs_revision_id = vmx_msr_low; + + if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL ) + { + DPRINTK("Failed to allocate VMCS.\n"); + return NULL; + } + + memset(vmcs, 0, vmcs_size); /* don't remove this */ + vmcs->vmcs_revision_id = vmcs_revision_id; + return vmcs; } -static void free_vmcs(struct vmcs_struct *vmcs) -{ - int order; - - order = get_order_from_bytes(vmcs_size); - free_xenheap_pages(vmcs, order); +static void vmx_free_vmcs(struct vmcs_struct *vmcs) +{ + free_xenheap_pages(vmcs, vmcs_order); } static void __vmx_clear_vmcs(void *info) { struct vcpu *v = info; + __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); + v->arch.hvm_vmx.active_cpu = -1; v->arch.hvm_vmx.launched = 0; } @@ -128,11 +146,19 @@ void vmx_vmcs_exit(struct vcpu *v) vcpu_unpause(v); } +struct vmcs_struct *vmx_alloc_host_vmcs(void) +{ + return vmx_alloc_vmcs(); +} + +void vmx_free_host_vmcs(struct vmcs_struct *vmcs) +{ + vmx_free_vmcs(vmcs); +} + static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx) { int error = 0; - void *io_bitmap_a; - void *io_bitmap_b; error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, MONITOR_PIN_BASED_EXEC_CONTROLS); @@ -141,19 +167,8 @@ static inline int construct_vmcs_control error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS); - /* need to use 0x1000 instead of PAGE_SIZE */ - io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); - io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); - memset(io_bitmap_a, 0xff, 0x1000); - /* don't bother debug port access */ - clear_bit(PC_DEBUG_PORT, io_bitmap_a); - memset(io_bitmap_b, 0xff, 0x1000); - - error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a)); - error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b)); - - arch_vmx->io_bitmap_a = io_bitmap_a; - arch_vmx->io_bitmap_b = io_bitmap_b; + error |= __vmwrite(IO_BITMAP_A, (u64)virt_to_maddr(arch_vmx->io_bitmap_a)); + error |= __vmwrite(IO_BITMAP_B, (u64)virt_to_maddr(arch_vmx->io_bitmap_b)); return error; } @@ -429,67 +444,52 @@ static inline int construct_vmcs_host(vo } /* - * Need to extend to support full virtualization. + * the working VMCS pointer has been set properly + * just before entering this function. */ static int construct_vmcs(struct vcpu *v, cpu_user_regs_t *regs) { struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; int error; - long rc; - - memset(arch_vmx, 0, sizeof(struct arch_vmx_struct)); - - spin_lock_init(&arch_vmx->vmcs_lock); - - /* - * Create a new VMCS - */ - if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) { - printk("Failed to create a new VMCS\n"); + + if ( (error = construct_vmcs_controls(arch_vmx)) ) { + printk("construct_vmcs: construct_vmcs_controls failed.\n"); + return error; + } + + /* host selectors */ + if ( (error = construct_vmcs_host()) ) { + printk("construct_vmcs: construct_vmcs_host failed.\n"); + return error; + } + + /* guest selectors */ + if ( (error = construct_init_vmcs_guest(regs)) ) { + printk("construct_vmcs: construct_vmcs_guest failed.\n"); + return error; + } + + if ( (error = __vmwrite(EXCEPTION_BITMAP, + MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) { + printk("construct_vmcs: setting exception bitmap failed.\n"); + return error; + } + + if ( regs->eflags & EF_TF ) + error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); + else + error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); + + return error; +} + +int vmx_create_vmcs(struct vcpu *v) +{ + if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL ) return -ENOMEM; - } - __vmx_clear_vmcs(v); - vmx_load_vmcs(v); - - if ((error = construct_vmcs_controls(arch_vmx))) { - printk("construct_vmcs: construct_vmcs_controls failed\n"); - rc = -EINVAL; - goto err_out; - } - - /* host selectors */ - if ((error = construct_vmcs_host())) { - printk("construct_vmcs: construct_vmcs_host failed\n"); - rc = -EINVAL; - goto err_out; - } - - /* guest selectors */ - if ((error = construct_init_vmcs_guest(regs))) { - printk("construct_vmcs: construct_vmcs_guest failed\n"); - rc = -EINVAL; - goto err_out; - } - - if ((error |= __vmwrite(EXCEPTION_BITMAP, - MONITOR_DEFAULT_EXCEPTION_BITMAP))) { - printk("construct_vmcs: setting Exception bitmap failed\n"); - rc = -EINVAL; - goto err_out; - } - - if (regs->eflags & EF_TF) - __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); - else - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); - return 0; - -err_out: - vmx_destroy_vmcs(v); - return rc; } void vmx_destroy_vmcs(struct vcpu *v) @@ -501,14 +501,14 @@ void vmx_destroy_vmcs(struct vcpu *v) vmx_clear_vmcs(v); - free_vmcs(arch_vmx->vmcs); + free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER); + free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER); + + arch_vmx->io_bitmap_a = NULL; + arch_vmx->io_bitmap_b = NULL; + + vmx_free_vmcs(arch_vmx->vmcs); arch_vmx->vmcs = NULL; - - free_xenheap_pages(arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000)); - arch_vmx->io_bitmap_a = NULL; - - free_xenheap_pages(arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000)); - arch_vmx->io_bitmap_b = NULL; } void vm_launch_fail(unsigned long eflags) @@ -547,19 +547,20 @@ void arch_vmx_do_resume(struct vcpu *v) void arch_vmx_do_launch(struct vcpu *v) { - int error; cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs; - error = construct_vmcs(v, regs); - if ( error < 0 ) + vmx_load_vmcs(v); + + if ( construct_vmcs(v, regs) < 0 ) { - if (v->vcpu_id == 0) { - printk("Failed to construct a new VMCS for BSP.\n"); + if ( v->vcpu_id == 0 ) { + printk("Failed to construct VMCS for BSP.\n"); } else { - printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id); + printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id); } domain_crash_synchronous(); } + vmx_do_launch(v); reset_stack_and_jump(vmx_asm_do_vmentry); } diff -r 4d2354be4aa6 -r 82f481bda1c7 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jul 05 10:32:33 2006 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jul 05 11:21:19 2006 +0100 @@ -54,34 +54,73 @@ static void vmx_ctxt_switch_from(struct static void vmx_ctxt_switch_from(struct vcpu *v); static void vmx_ctxt_switch_to(struct vcpu *v); -void vmx_final_setup_guest(struct vcpu *v) -{ +static int vmx_initialize_guest_resources(struct vcpu *v) +{ + struct domain *d = v->domain; + struct vcpu *vc; + void *io_bitmap_a, *io_bitmap_b; + int rc; + v->arch.schedule_tail = arch_vmx_do_launch; v->arch.ctxt_switch_from = vmx_ctxt_switch_from; v->arch.ctxt_switch_to = vmx_ctxt_switch_to; - if ( v->vcpu_id == 0 ) - { - struct domain *d = v->domain; - struct vcpu *vc; - + if ( v->vcpu_id != 0 ) + return 1; + + for_each_vcpu ( d, vc ) + { /* Initialize monitor page table */ - for_each_vcpu(d, vc) - vc->arch.monitor_table = pagetable_null(); - - /* - * Required to do this once per domain - * XXX todo: add a seperate function to do these. - */ - memset(&d->shared_info->evtchn_mask[0], 0xff, - sizeof(d->shared_info->evtchn_mask)); - - /* Put the domain in shadow mode even though we're going to be using - * the shared 1:1 page table initially. It shouldn't hurt */ - shadow_mode_enable(d, - SHM_enable|SHM_refcounts| - SHM_translate|SHM_external|SHM_wr_pt_pte); - } + vc->arch.monitor_table = pagetable_null(); + + memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct)); + + if ( (rc = vmx_create_vmcs(vc)) != 0 ) + { + DPRINTK("Failed to create VMCS for vcpu %d: err=%d.\n", + vc->vcpu_id, rc); + return 0; + } + + spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock); + + if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL ) + { + DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n", + vc->vcpu_id); + return 0; + } + + if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL ) + { + DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n", + vc->vcpu_id); + return 0; + } + + memset(io_bitmap_a, 0xff, 0x1000); + memset(io_bitmap_b, 0xff, 0x1000); + + /* don't bother debug port access */ + clear_bit(PC_DEBUG_PORT, io_bitmap_a); + + vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a; + vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b; + } + + /* + * Required to do this once per domain XXX todo: add a seperate function + * to do these. + */ + memset(&d->shared_info->evtchn_mask[0], 0xff, + sizeof(d->shared_info->evtchn_mask)); + + /* Put the domain in shadow mode even though we're going to be using + * the shared 1:1 page table initially. It shouldn't hurt */ + shadow_mode_enable( + d, SHM_enable|SHM_refcounts|SHM_translate|SHM_external|SHM_wr_pt_pte); + + return 1; } static void vmx_relinquish_guest_resources(struct domain *d) @@ -90,9 +129,9 @@ static void vmx_relinquish_guest_resourc for_each_vcpu ( d, v ) { + vmx_destroy_vmcs(v); if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) continue; - vmx_destroy_vmcs(v); free_monitor_pagetable(v); kill_timer(&v->arch.hvm_vmx.hlt_timer); if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) ) @@ -442,12 +481,6 @@ void stop_vmx(void) { if (read_cr4() & X86_CR4_VMXE) __vmxoff(); -} - -int vmx_initialize_guest_resources(struct vcpu *v) -{ - vmx_final_setup_guest(v); - return 1; } void vmx_migrate_timers(struct vcpu *v) @@ -638,58 +671,61 @@ static int check_vmx_controls(u32 ctrls, int start_vmx(void) { + u32 eax, edx; struct vmcs_struct *vmcs; - u32 ecx; - u32 eax, edx; - u64 phys_vmcs; /* debugging */ /* * Xen does not fill x86_capability words except 0. */ - ecx = cpuid_ecx(1); - boot_cpu_data.x86_capability[4] = ecx; + boot_cpu_data.x86_capability[4] = cpuid_ecx(1); if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability))) return 0; rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); - if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) { - if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) { + if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK ) + { + if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 ) + { printk("VMX disabled by Feature Control MSR.\n"); return 0; } } - else { + else + { wrmsr(IA32_FEATURE_CONTROL_MSR, IA32_FEATURE_CONTROL_MSR_LOCK | IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0); } - if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, - MSR_IA32_VMX_PINBASED_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, + MSR_IA32_VMX_PINBASED_CTLS_MSR) ) return 0; - if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, - MSR_IA32_VMX_PROCBASED_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, + MSR_IA32_VMX_PROCBASED_CTLS_MSR) ) return 0; - if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, - MSR_IA32_VMX_EXIT_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, + MSR_IA32_VMX_EXIT_CTLS_MSR) ) return 0; - if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, - MSR_IA32_VMX_ENTRY_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, + MSR_IA32_VMX_ENTRY_CTLS_MSR) ) return 0; - set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */ - - if (!(vmcs = vmx_alloc_vmcs())) { - printk("Failed to allocate VMCS\n"); + set_in_cr4(X86_CR4_VMXE); + + vmx_init_vmcs_config(); + + if ( (vmcs = vmx_alloc_host_vmcs()) == NULL ) + { + printk("Failed to allocate host VMCS\n"); return 0; } - phys_vmcs = (u64) virt_to_maddr(vmcs); - - if (__vmxon(phys_vmcs)) { + if ( __vmxon(virt_to_maddr(vmcs)) ) + { printk("VMXON failed\n"); + vmx_free_host_vmcs(vmcs); return 0; } diff -r 4d2354be4aa6 -r 82f481bda1c7 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jul 05 10:32:33 2006 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jul 05 11:21:19 2006 +0100 @@ -27,9 +27,7 @@ extern int start_vmx(void); extern int start_vmx(void); extern void stop_vmx(void); extern void vmcs_dump_vcpu(void); -void vmx_final_setup_guest(struct vcpu *v); - -void vmx_enter_scheduler(void); +extern void vmx_init_vmcs_config(void); enum { VMX_CPU_STATE_PAE_ENABLED=0, @@ -45,8 +43,6 @@ struct vmcs_struct { u32 vmcs_revision_id; unsigned char data [0]; /* vmcs size is read from MSR */ }; - -extern int vmcs_size; enum { VMX_INDEX_MSR_LSTAR = 0, @@ -63,6 +59,10 @@ struct vmx_msr_state { unsigned long msr_items[VMX_MSR_COUNT]; unsigned long shadow_gs; }; + +/* io bitmap is 4KBytes in size */ +#define IO_BITMAP_SIZE 0x1000 +#define IO_BITMAP_ORDER (get_order_from_bytes(IO_BITMAP_SIZE)) struct arch_vmx_struct { /* Virtual address of VMCS. */ @@ -101,7 +101,10 @@ struct arch_vmx_struct { void vmx_do_resume(struct vcpu *); -struct vmcs_struct *vmx_alloc_vmcs(void); +struct vmcs_struct *vmx_alloc_host_vmcs(void); +void vmx_free_host_vmcs(struct vmcs_struct *vmcs); + +int vmx_create_vmcs(struct vcpu *v); void vmx_destroy_vmcs(struct vcpu *v); void vmx_vmcs_enter(struct vcpu *v); void vmx_vmcs_exit(struct vcpu *v); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |