[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [HVM][VMX] Cleanups and fixes to VMCS lifecycle.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID d7543cff88ae4b9eb8a561dc72e6e126223ef720
# Parent  be05097d5d69925a72405201140df8da4c1cfa5c
[HVM][VMX] Cleanups and fixes to VMCS lifecycle.
 1. Maintain a 'launched' software flag to select between
    VMLAUNCH and VMRESUME.
 2. Take more care with VMPTRLD/VMCLEAR.
Also various other fixes (e.g., safe testing of
condition codes after executing a VMX instruction).

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/vmx_init.c        |    3 
 xen/arch/x86/hvm/vmx/vmcs.c         |  177 ++++++++++++++++++------------------
 xen/arch/x86/hvm/vmx/vmx.c          |   89 ++----------------
 xen/arch/x86/hvm/vmx/x86_32/exits.S |   74 +++++----------
 xen/arch/x86/hvm/vmx/x86_64/exits.S |   75 +++++----------
 xen/arch/x86/x86_32/asm-offsets.c   |    3 
 xen/arch/x86/x86_64/asm-offsets.c   |    3 
 xen/include/asm-ia64/vmx_vpd.h      |    3 
 xen/include/asm-x86/hvm/vmx/vmcs.h  |   56 ++++++-----
 xen/include/asm-x86/hvm/vmx/vmx.h   |   74 ++++++---------
 10 files changed, 231 insertions(+), 326 deletions(-)

diff -r be05097d5d69 -r d7543cff88ae xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/ia64/vmx/vmx_init.c      Sun Jun 11 14:33:16 2006 +0100
@@ -288,9 +288,6 @@ vmx_final_setup_guest(struct vcpu *v)
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
 
-       /* Set this ed to be vmx */
-       set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
-
        /* Physical mode emulation initialization, including
        * emulation ID allcation and related memory request
        */
diff -r be05097d5d69 -r d7543cff88ae xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Sun Jun 11 14:33:16 2006 +0100
@@ -42,7 +42,7 @@
 
 int vmcs_size;
 
-struct vmcs_struct *alloc_vmcs(void)
+struct vmcs_struct *vmx_alloc_vmcs(void)
 {
     struct vmcs_struct *vmcs;
     u32 vmx_msr_low, vmx_msr_high;
@@ -64,47 +64,63 @@ static void free_vmcs(struct vmcs_struct
     free_xenheap_pages(vmcs, order);
 }
 
-static int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
-{
-    int error;
-
-    if ((error = __vmptrld(phys_ptr))) {
-        clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
-        return error;
-    }
-    set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
-    return 0;
-}
-
-static void vmx_smp_clear_vmcs(void *info)
-{
-    struct vcpu *v = (struct vcpu *)info;
-
-    ASSERT(hvm_guest(v));
-
-    if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
-        __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-}
-
-void vmx_request_clear_vmcs(struct vcpu *v)
-{
-    ASSERT(hvm_guest(v));
-
-    if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
-        __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+static void __vmx_clear_vmcs(void *info)
+{
+    struct vcpu *v = info;
+    __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+    v->arch.hvm_vmx.active_cpu = -1;
+    v->arch.hvm_vmx.launched   = 0;
+}
+
+static void vmx_clear_vmcs(struct vcpu *v)
+{
+    unsigned int cpu = v->arch.hvm_vmx.active_cpu;
+
+    if ( (cpu == -1) || (cpu == smp_processor_id()) )
+        __vmx_clear_vmcs(v);
     else
-        smp_call_function(vmx_smp_clear_vmcs, v, 1, 1);
-}
-
-#if 0
-static int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
-{
-    /* take the current VMCS */
-    __vmptrst(phys_ptr);
-    clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
-    return 0;
-}
-#endif
+        on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
+}
+
+static void vmx_load_vmcs(struct vcpu *v)
+{
+    __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+    v->arch.hvm_vmx.active_cpu = smp_processor_id();
+}
+
+void vmx_vmcs_enter(struct vcpu *v)
+{
+    /*
+     * NB. We must *always* run an HVM VCPU on its own VMCS, except for
+     * vmx_vmcs_enter/exit critical regions. This leads to some XXX TODOs XXX:
+     *  1. Move construct_vmcs() much earlier, to domain creation or
+     *     context initialisation.
+     *  2. VMPTRLD as soon as we context-switch to a HVM VCPU.
+     *  3. VMCS destruction needs to happen later (from domain_destroy()).
+     */
+    if ( v == current )
+        return;
+
+    vcpu_pause(v);
+    spin_lock(&v->arch.hvm_vmx.vmcs_lock);
+
+    vmx_clear_vmcs(v);
+    vmx_load_vmcs(v);
+}
+
+void vmx_vmcs_exit(struct vcpu *v)
+{
+    if ( v == current )
+        return;
+
+    /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
+    vmx_clear_vmcs(v);
+    if ( hvm_guest(current) )
+        vmx_load_vmcs(current);
+
+    spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
+    vcpu_unpause(v);
+}
 
 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
 {
@@ -247,7 +263,6 @@ static void vmx_do_launch(struct vcpu *v
     __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
 
     v->arch.schedule_tail = arch_vmx_do_resume;
-    v->arch.hvm_vmx.launch_cpu = smp_processor_id();
 
     /* init guest tsc to start from 0 */
     set_guest_time(v, 0);
@@ -410,53 +425,49 @@ static inline int construct_vmcs_host(vo
 /*
  * Need to extend to support full virtualization.
  */
-static int construct_vmcs(struct arch_vmx_struct *arch_vmx,
+static int construct_vmcs(struct vcpu *v,
                           cpu_user_regs_t *regs)
 {
+    struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
     int error;
     long rc;
-    u64 vmcs_phys_ptr;
 
     memset(arch_vmx, 0, sizeof(struct arch_vmx_struct));
+
+    spin_lock_init(&arch_vmx->vmcs_lock);
+    arch_vmx->active_cpu = -1;
 
     /*
      * Create a new VMCS
      */
-    if (!(arch_vmx->vmcs = alloc_vmcs())) {
+    if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) {
         printk("Failed to create a new VMCS\n");
-        rc = -ENOMEM;
-        goto err_out;
-    }
-    vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
-
-    if ((error = __vmpclear(vmcs_phys_ptr))) {
-        printk("construct_vmcs: VMCLEAR failed\n");
-        rc = -EINVAL;
-        goto err_out;
-    }
-    if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
-        printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
-               (unsigned long) vmcs_phys_ptr);
-        rc = -EINVAL;
-        goto err_out;
-    }
+        return -ENOMEM;
+    }
+
+    vmx_clear_vmcs(v);
+    vmx_load_vmcs(v);
+
     if ((error = construct_vmcs_controls(arch_vmx))) {
         printk("construct_vmcs: construct_vmcs_controls failed\n");
         rc = -EINVAL;
         goto err_out;
     }
+
     /* host selectors */
     if ((error = construct_vmcs_host())) {
         printk("construct_vmcs: construct_vmcs_host failed\n");
         rc = -EINVAL;
         goto err_out;
     }
+
     /* guest selectors */
     if ((error = construct_init_vmcs_guest(regs))) {
         printk("construct_vmcs: construct_vmcs_guest failed\n");
         rc = -EINVAL;
         goto err_out;
     }
+
     if ((error |= __vmwrite(EXCEPTION_BITMAP,
                             MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
         printk("construct_vmcs: setting Exception bitmap failed\n");
@@ -472,12 +483,16 @@ static int construct_vmcs(struct arch_vm
     return 0;
 
 err_out:
-    destroy_vmcs(arch_vmx);
+    vmx_destroy_vmcs(v);
     return rc;
 }
 
-void destroy_vmcs(struct arch_vmx_struct *arch_vmx)
-{
+void vmx_destroy_vmcs(struct vcpu *v)
+{
+    struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
+
+    vmx_clear_vmcs(v);
+
     free_vmcs(arch_vmx->vmcs);
     arch_vmx->vmcs = NULL;
 
@@ -506,22 +521,20 @@ void vm_resume_fail(unsigned long eflags
 
 void arch_vmx_do_resume(struct vcpu *v)
 {
-    if ( v->arch.hvm_vmx.launch_cpu == smp_processor_id() )
+    if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
     {
-        load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
-        vmx_do_resume(v);
-        reset_stack_and_jump(vmx_asm_do_resume);
+        vmx_load_vmcs(v);
     }
     else
     {
-        vmx_request_clear_vmcs(v);
-        load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
+        vmx_clear_vmcs(v);
+        vmx_load_vmcs(v);
         vmx_migrate_timers(v);
         vmx_set_host_env(v);
-        vmx_do_resume(v);
-        v->arch.hvm_vmx.launch_cpu = smp_processor_id();
-        reset_stack_and_jump(vmx_asm_do_relaunch);
-    }
+    }
+
+    vmx_do_resume(v);
+    reset_stack_and_jump(vmx_asm_do_vmentry);
 }
 
 void arch_vmx_do_launch(struct vcpu *v)
@@ -529,7 +542,7 @@ void arch_vmx_do_launch(struct vcpu *v)
     int error;
     cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
 
-    error = construct_vmcs(&v->arch.hvm_vmx, regs);
+    error = construct_vmcs(v, regs);
     if ( error < 0 )
     {
         if (v->vcpu_id == 0) {
@@ -540,7 +553,7 @@ void arch_vmx_do_launch(struct vcpu *v)
         domain_crash_synchronous();
     }
     vmx_do_launch(v);
-    reset_stack_and_jump(vmx_asm_do_launch);
+    reset_stack_and_jump(vmx_asm_do_vmentry);
 }
 
 
@@ -613,17 +626,9 @@ static void vmcs_dump(unsigned char ch)
             }
             printk("\tVCPU %d\n", v->vcpu_id);
 
-            if (v != current) {
-                vcpu_pause(v);
-                __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-            }
-
+            vmx_vmcs_enter(v);
             vmcs_dump_vcpu();
-
-            if (v != current) {
-                __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
-                vcpu_unpause(v);
-            }
+            vmx_vmcs_exit(v);
         }
     }
 
diff -r be05097d5d69 -r d7543cff88ae xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Sun Jun 11 14:33:16 2006 +0100
@@ -91,8 +91,7 @@ static void vmx_relinquish_guest_resourc
     {
         if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
             continue;
-        vmx_request_clear_vmcs(v);
-        destroy_vmcs(&v->arch.hvm_vmx);
+        vmx_destroy_vmcs(v);
         free_monitor_pagetable(v);
         kill_timer(&v->arch.hvm_vmx.hlt_timer);
         if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
@@ -402,54 +401,10 @@ void vmx_migrate_timers(struct vcpu *v)
         migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
 }
 
-struct vmx_cpu_guest_regs_callback_info {
-    struct vcpu *v;
-    struct cpu_user_regs *regs;
-    unsigned long *crs;
-};
-
-static void vmx_store_cpu_guest_regs(
-    struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs);
-
-static void vmx_load_cpu_guest_regs(
-    struct vcpu *v, struct cpu_user_regs *regs);
-
-static void vmx_store_cpu_guest_regs_callback(void *data)
-{
-    struct vmx_cpu_guest_regs_callback_info *info = data;
-    vmx_store_cpu_guest_regs(info->v, info->regs, info->crs);
-}
-
-static void vmx_load_cpu_guest_regs_callback(void *data)
-{
-    struct vmx_cpu_guest_regs_callback_info *info = data;
-    vmx_load_cpu_guest_regs(info->v, info->regs);
-}
-
 static void vmx_store_cpu_guest_regs(
     struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
 {
-    if ( v != current )
-    {
-        /* Non-current VCPUs must be paused to get a register snapshot. */
-        ASSERT(atomic_read(&v->pausecnt) != 0);
-
-        if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
-        {
-            /* Get register details from remote CPU. */
-            struct vmx_cpu_guest_regs_callback_info info = {
-                .v = v, .regs = regs, .crs = crs };
-            cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
-            on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback,
-                             &info, 1, 1);
-            return;
-        }
-
-        /* Register details are on this CPU. Load the correct VMCS. */
-        __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-    }
-
-    ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
+    vmx_vmcs_enter(v);
 
     if ( regs != NULL )
     {
@@ -471,9 +426,7 @@ static void vmx_store_cpu_guest_regs(
         __vmread(CR4_READ_SHADOW, &crs[4]);
     }
 
-    /* Reload current VCPU's VMCS if it was temporarily unloaded. */
-    if ( (v != current) && hvm_guest(current) )
-        __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
+    vmx_vmcs_exit(v);
 }
 
 /*
@@ -517,26 +470,7 @@ static void fixup_vm86_seg_bases(struct 
 
 void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
 {
-    if ( v != current )
-    {
-        /* Non-current VCPUs must be paused to set the register snapshot. */
-        ASSERT(atomic_read(&v->pausecnt) != 0);
-
-        if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
-        {
-            struct vmx_cpu_guest_regs_callback_info info = {
-                .v = v, .regs = regs };
-            cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
-            on_selected_cpus(cpumask, vmx_load_cpu_guest_regs_callback,
-                             &info, 1, 1);
-            return;
-        }
-
-        /* Register details are on this CPU. Load the correct VMCS. */
-        __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-    }
-
-    ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
+    vmx_vmcs_enter(v);
 
     __vmwrite(GUEST_SS_SELECTOR, regs->ss);
     __vmwrite(GUEST_DS_SELECTOR, regs->ds);
@@ -557,9 +491,7 @@ void vmx_load_cpu_guest_regs(struct vcpu
     __vmwrite(GUEST_CS_SELECTOR, regs->cs);
     __vmwrite(GUEST_RIP, regs->eip);
 
-    /* Reload current VCPU's VMCS if it was temporarily unloaded. */
-    if ( (v != current) && hvm_guest(current) )
-        __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
+    vmx_vmcs_exit(v);
 }
 
 int vmx_realmode(struct vcpu *v)
@@ -688,16 +620,19 @@ int start_vmx(void)
 
     set_in_cr4(X86_CR4_VMXE);   /* Enable VMXE */
 
-    if (!(vmcs = alloc_vmcs())) {
+    if (!(vmcs = vmx_alloc_vmcs())) {
         printk("Failed to allocate VMCS\n");
         return 0;
     }
 
     phys_vmcs = (u64) virt_to_maddr(vmcs);
 
-    if (!(__vmxon(phys_vmcs))) {
-        printk("VMXON is done\n");
-    }
+    if (__vmxon(phys_vmcs)) {
+        printk("VMXON failed\n");
+        return 0;
+    }
+
+    printk("VMXON is done\n");
 
     vmx_save_init_msrs();
 
diff -r be05097d5d69 -r d7543cff88ae xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S       Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S       Sun Jun 11 14:33:16 2006 +0100
@@ -78,69 +78,49 @@
         addl $(NR_SKIPPED_REGS*4), %esp
 
         ALIGN
-
 ENTRY(vmx_asm_vmexit_handler)
         /* selectors are restored/saved by VMX */
         HVM_SAVE_ALL_NOSEGREGS
         call vmx_trace_vmexit
         call vmx_vmexit_handler
-        jmp vmx_asm_do_resume
+        jmp vmx_asm_do_vmentry
 
-.macro vmx_asm_common launch, initialized
-1:
-/* vmx_test_all_events */
-        .if \initialized
+        ALIGN
+vmx_process_softirqs:
+        sti       
+        call do_softirq
+        jmp vmx_asm_do_vmentry
+
+        ALIGN
+ENTRY(vmx_asm_do_vmentry)
         GET_CURRENT(%ebx)
-/*test_all_events:*/
-        xorl %ecx,%ecx
-        notl %ecx
         cli                             # tests must not race interrupts
-/*test_softirqs:*/  
+
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
-        test %ecx,irq_stat(%eax,1)
-        jnz 2f
+        cmpl $0,irq_stat(%eax,1)
+        jnz  vmx_process_softirqs
 
-/* vmx_restore_all_guest */
         call vmx_intr_assist
         call vmx_load_cr2
         call vmx_trace_vmentry
-        .endif
+
+        cmpl $0,VCPU_vmx_launched(%ebx)
+        je   vmx_launch
+
+/*vmx_resume:*/
         HVM_RESTORE_ALL_NOSEGREGS
-        /* 
-         * Check if we are going back to VMX-based VM
-         * By this time, all the setups in the VMCS must be complete.
-         */
-        .if \launch
+        /* VMRESUME */
+        .byte 0x0f,0x01,0xc3
+        pushf
+        call vm_resume_fail
+        ud2
+
+vmx_launch:
+        movl $1,VCPU_vmx_launched(%ebx)
+        HVM_RESTORE_ALL_NOSEGREGS
         /* VMLAUNCH */
         .byte 0x0f,0x01,0xc2
         pushf
         call vm_launch_fail
-        .else
-        /* VMRESUME */
-        .byte 0x0f,0x01,0xc3
-        pushf
-        call vm_resume_fail
-        .endif
-        /* Should never reach here */
-        hlt
-
-        ALIGN
-        .if \initialized
-2:
-/* vmx_process_softirqs */
-        sti       
-        call do_softirq
-        jmp 1b
-        ALIGN
-        .endif
-.endm
-
-ENTRY(vmx_asm_do_launch)
-    vmx_asm_common 1, 0
-
-ENTRY(vmx_asm_do_resume)
-    vmx_asm_common 0, 1
-
-ENTRY(vmx_asm_do_relaunch)
-    vmx_asm_common 1, 1
+        ud2
diff -r be05097d5d69 -r d7543cff88ae xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S       Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S       Sun Jun 11 14:33:16 2006 +0100
@@ -88,68 +88,51 @@
         popq %rdi;                              \
         addq $(NR_SKIPPED_REGS*8), %rsp;
 
+        ALIGN
 ENTRY(vmx_asm_vmexit_handler)
         /* selectors are restored/saved by VMX */
         HVM_SAVE_ALL_NOSEGREGS
         call vmx_trace_vmexit
         call vmx_vmexit_handler
-        jmp vmx_asm_do_resume
+        jmp vmx_asm_do_vmentry
 
-.macro vmx_asm_common launch, initialized 
-1:
-        .if \initialized
-/* vmx_test_all_events */
+        ALIGN
+vmx_process_softirqs:
+        sti       
+        call do_softirq
+        jmp vmx_asm_do_vmentry
+
+        ALIGN
+ENTRY(vmx_asm_do_vmentry)
         GET_CURRENT(%rbx)
-/* test_all_events: */
         cli                             # tests must not race interrupts
-/*test_softirqs:*/  
+
         movl  VCPU_processor(%rbx),%eax
         shl   $IRQSTAT_shift,%rax
-        leaq  irq_stat(%rip), %rdx
-        testl $~0,(%rdx,%rax,1)
-        jnz  2f 
+        leaq  irq_stat(%rip),%rdx
+        cmpl  $0,(%rdx,%rax,1)
+        jnz   vmx_process_softirqs
 
-/* vmx_restore_all_guest */
         call vmx_intr_assist
         call vmx_load_cr2
         call vmx_trace_vmentry
-        .endif
-        /* 
-         * Check if we are going back to VMX-based VM
-         * By this time, all the setups in the VMCS must be complete.
-         */
+
+        cmpl $0,VCPU_vmx_launched(%rbx)
+        je   vmx_launch
+
+/*vmx_resume:*/
         HVM_RESTORE_ALL_NOSEGREGS
-        .if \launch
+        /* VMRESUME */
+        .byte 0x0f,0x01,0xc3
+        pushfq
+        call vm_resume_fail
+        ud2
+
+vmx_launch:
+        movl $1,VCPU_vmx_launched(%rbx)
+        HVM_RESTORE_ALL_NOSEGREGS
         /* VMLAUNCH */
         .byte 0x0f,0x01,0xc2
         pushfq
         call vm_launch_fail
-        .else
-        /* VMRESUME */
-        .byte 0x0f,0x01,0xc3
-        pushfq
-        call vm_resume_fail
-        .endif
-        /* Should never reach here */
-        hlt
-
-        ALIGN
-
-        .if \initialized
-2:
-/* vmx_process_softirqs */
-        sti       
-        call do_softirq
-        jmp 1b
-        ALIGN
-        .endif
-.endm
-
-ENTRY(vmx_asm_do_launch)
-      vmx_asm_common 1, 0
-
-ENTRY(vmx_asm_do_resume)
-      vmx_asm_common 0, 1
-
-ENTRY(vmx_asm_do_relaunch)
-      vmx_asm_common 1, 1
+        ud2
diff -r be05097d5d69 -r d7543cff88ae xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/x86/x86_32/asm-offsets.c Sun Jun 11 14:33:16 2006 +0100
@@ -86,6 +86,9 @@ void __dummy__(void)
     OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
     BLANK();
 
+    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
+    BLANK();
+
     OFFSET(VMCB_rax, struct vmcb_struct, rax);
     OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
     BLANK();
diff -r be05097d5d69 -r d7543cff88ae xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Sun Jun 11 14:33:16 2006 +0100
@@ -80,6 +80,9 @@ void __dummy__(void)
     OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
     BLANK();
 
+    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
+    BLANK();
+
     OFFSET(VMCB_rax, struct vmcb_struct, rax);
     OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
     BLANK();
diff -r be05097d5d69 -r d7543cff88ae xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h    Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/include/asm-ia64/vmx_vpd.h    Sun Jun 11 14:33:16 2006 +0100
@@ -104,9 +104,6 @@ struct arch_vmx_struct {
 
 #define VMX_DOMAIN(d)   d->arch.arch_vmx.flags
 
-#define ARCH_VMX_VMCS_LOADED    0       /* VMCS has been loaded and active */
-#define ARCH_VMX_VMCS_LAUNCH    1       /* Needs VMCS launch */
-#define ARCH_VMX_VMCS_RESUME    2       /* Needs VMCS resume */
 #define ARCH_VMX_IO_WAIT        3       /* Waiting for I/O completion */
 #define ARCH_VMX_INTR_ASSIST    4       /* Need DM's assist to issue intr */
 #define ARCH_VMX_CONTIG_MEM    5       /* Need contiguous machine pages */
diff -r be05097d5d69 -r d7543cff88ae xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Sun Jun 11 14:33:16 2006 +0100
@@ -65,34 +65,46 @@ struct vmx_msr_state {
 };
 
 struct arch_vmx_struct {
-    struct vmcs_struct      *vmcs;  /* VMCS pointer in virtual. */
-    unsigned int            launch_cpu; /* VMCS is valid on this CPU. */
-    u32                     exec_control; /* cache of cpu execution control */
-    u32                     vector_injected; /* if there is vector installed 
in the INTR_INFO_FIELD */
-    unsigned long           flags;  /* VMCS flags */
-    unsigned long           cpu_cr0; /* copy of guest CR0 */
-    unsigned long           cpu_shadow_cr0; /* copy of guest read shadow CR0 */
-    unsigned long           cpu_cr2; /* save CR2 */
-    unsigned long           cpu_cr3;
-    unsigned long           cpu_state;
-    unsigned long           cpu_based_exec_control;
-    struct vmx_msr_state    msr_content;
-    void                    *io_bitmap_a, *io_bitmap_b;
-    struct timer            hlt_timer;  /* hlt ins emulation wakeup timer */
+    /* Virtual address of VMCS. */
+    struct vmcs_struct  *vmcs;
+
+    /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */
+    spinlock_t           vmcs_lock;
+
+    /*
+     * Activation and launch status of this VMCS.
+     *  - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR.
+     *  - Launched on active CPU by VMLAUNCH when current VMCS.
+     */
+    int                  active_cpu;
+    int                  launched;
+
+    /* Cache of cpu execution control. */
+    u32                  exec_control;
+
+    /* If there is vector installed in the INTR_INFO_FIELD. */
+    u32                  vector_injected;
+
+    unsigned long        cpu_cr0; /* copy of guest CR0 */
+    unsigned long        cpu_shadow_cr0; /* copy of guest read shadow CR0 */
+    unsigned long        cpu_cr2; /* save CR2 */
+    unsigned long        cpu_cr3;
+    unsigned long        cpu_state;
+    unsigned long        cpu_based_exec_control;
+    struct vmx_msr_state msr_content;
+    void                *io_bitmap_a, *io_bitmap_b;
+    struct timer         hlt_timer;  /* hlt ins emulation wakeup timer */
 };
 
 #define vmx_schedule_tail(next)         \
     (next)->thread.arch_vmx.arch_vmx_schedule_tail((next))
 
-#define ARCH_VMX_VMCS_LOADED    0       /* VMCS has been loaded and active */
-#define ARCH_VMX_VMCS_LAUNCH    1       /* Needs VMCS launch */
-#define ARCH_VMX_VMCS_RESUME    2       /* Needs VMCS resume */
-
 void vmx_do_resume(struct vcpu *);
-struct vmcs_struct *alloc_vmcs(void);
-void destroy_vmcs(struct arch_vmx_struct *arch_vmx);
-
-extern void vmx_request_clear_vmcs(struct vcpu *v);
+
+struct vmcs_struct *vmx_alloc_vmcs(void);
+void vmx_destroy_vmcs(struct vcpu *v);
+void vmx_vmcs_enter(struct vcpu *v);
+void vmx_vmcs_exit(struct vcpu *v);
 
 #define VMCS_USE_HOST_ENV       1
 #define VMCS_USE_SEPARATE_ENV   0
diff -r be05097d5d69 -r d7543cff88ae xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Sun Jun 11 09:54:35 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Sun Jun 11 14:33:16 2006 +0100
@@ -27,8 +27,7 @@
 #include <asm/i387.h>
 
 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
-extern void vmx_asm_do_resume(void);
-extern void vmx_asm_do_launch(void);
+extern void vmx_asm_do_vmentry(void);
 extern void vmx_intr_assist(void);
 extern void vmx_migrate_timers(struct vcpu *v);
 extern void arch_vmx_do_launch(struct vcpu *);
@@ -200,22 +199,18 @@ extern unsigned int cpu_rev;
 #define MODRM_EAX_07    ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
 #define MODRM_EAX_ECX   ".byte 0xc1\n" /* [EAX], [ECX] */
 
-static inline int __vmptrld (u64 addr)
-{
-    unsigned long eflags;
+static inline void __vmptrld(u64 addr)
+{
     __asm__ __volatile__ ( VMPTRLD_OPCODE
                            MODRM_EAX_06
+                           /* CF==1 or ZF==1 --> crash (ud2) */
+                           "ja 1f ; ud2 ; 1:\n"
                            :
                            : "a" (&addr) 
                            : "memory");
-
-    __save_flags(eflags);
-    if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
-        return -1;
-    return 0;
-}
-
-static inline void __vmptrst (u64 addr)
+}
+
+static inline void __vmptrst(u64 addr)
 {
     __asm__ __volatile__ ( VMPTRST_OPCODE
                            MODRM_EAX_07
@@ -224,31 +219,30 @@ static inline void __vmptrst (u64 addr)
                            : "memory");
 }
 
-static inline int __vmpclear (u64 addr)
-{
-    unsigned long eflags;
-
+static inline void __vmpclear(u64 addr)
+{
     __asm__ __volatile__ ( VMCLEAR_OPCODE
                            MODRM_EAX_06
+                           /* CF==1 or ZF==1 --> crash (ud2) */
+                           "ja 1f ; ud2 ; 1:\n"
                            :
                            : "a" (&addr) 
                            : "memory");
-    __save_flags(eflags);
-    if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
-        return -1;
-    return 0;
 }
 
 #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
 
-static always_inline int ___vmread (const unsigned long field,  void *ptr, 
const int size)
-{
-    unsigned long eflags;
+static always_inline int ___vmread(
+    const unsigned long field, void *ptr, const int size)
+{
     unsigned long ecx = 0;
+    int rc;
 
     __asm__ __volatile__ ( VMREAD_OPCODE
-                           MODRM_EAX_ECX       
-                           : "=c" (ecx)
+                           MODRM_EAX_ECX
+                           /* CF==1 or ZF==1 --> rc = -1 */
+                           "setna %b0 ; neg %0"
+                           : "=r" (rc), "=c" (ecx)
                            : "a" (field)
                            : "memory");
 
@@ -270,10 +264,7 @@ static always_inline int ___vmread (cons
         break;
     }
 
-    __save_flags(eflags);
-    if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
-        return -1;
-    return 0;
+    return rc;
 }
 
 
@@ -315,17 +306,16 @@ static always_inline void __vmread_vcpu(
 
 static inline int __vmwrite (unsigned long field, unsigned long value)
 {
-    unsigned long eflags;
     struct vcpu *v = current;
+    int rc;
 
     __asm__ __volatile__ ( VMWRITE_OPCODE
                            MODRM_EAX_ECX
-                           :
+                           /* CF==1 or ZF==1 --> rc = -1 */
+                           "setna %b0 ; neg %0"
+                           : "=r" (rc)
                            : "a" (field) , "c" (value)
                            : "memory");
-    __save_flags(eflags);
-    if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
-        return -1;
 
     switch(field) {
     case CR0_READ_SHADOW:
@@ -335,7 +325,7 @@ static inline int __vmwrite (unsigned lo
        break;
     }
 
-    return 0;
+    return rc;
 }
 
 static inline int __vm_set_bit(unsigned long field, unsigned long mask)
@@ -370,17 +360,17 @@ static inline void __vmxoff (void)
 
 static inline int __vmxon (u64 addr)
 {
-    unsigned long eflags;
+    int rc;
 
     __asm__ __volatile__ ( VMXON_OPCODE
                            MODRM_EAX_06
-                           :
+                           /* CF==1 or ZF==1 --> rc = -1 */
+                           "setna %b0 ; neg %0"
+                           : "=r" (rc)
                            : "a" (&addr) 
                            : "memory");
-    __save_flags(eflags);
-    if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
-        return -1;
-    return 0;
+
+    return rc;
 }
 
 /* Make sure that xen intercepts any FP accesses from current */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.