[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Support VCPU migration for VMX guests.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 57b3fdca5daed68d14c073be1dc5b813a2841ed4
# Parent  f972da9a2dda8a0ac844a6a4f683c1e0974cb227
Support VCPU migration for VMX guests.

Add a hook to support CPU migration for VMX domains

Reorganize the low level asm code to support relaunching a VMCS on a different
logical CPU.

Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>

diff -r f972da9a2dda -r 57b3fdca5dae xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c    Thu Aug 11 21:19:45 2005
+++ b/xen/arch/ia64/domain.c    Thu Aug 11 21:38:58 2005
@@ -1398,3 +1398,12 @@
 {
        vcpu_pend_interrupt(dom0->vcpu[0],irq);
 }
+
+void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
+{
+       if ( v->processor == newcpu )
+               return;
+
+       set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+       v->processor = newcpu;
+}
diff -r f972da9a2dda -r 57b3fdca5dae xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Aug 11 21:19:45 2005
+++ b/xen/arch/x86/domain.c     Thu Aug 11 21:38:58 2005
@@ -295,26 +295,23 @@
         l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
 }
 
+void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
+{
+    if ( v->processor == newcpu )
+        return;
+
+    set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+    v->processor = newcpu;
+
+    if ( VMX_DOMAIN(v) )
+    {
+        __vmpclear(virt_to_phys(v->arch.arch_vmx.vmcs));
+        v->arch.schedule_tail = arch_vmx_do_relaunch;
+    }
+}
+
 #ifdef CONFIG_VMX
 static int vmx_switch_on;
-
-void arch_vmx_do_resume(struct vcpu *v) 
-{
-    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
-
-    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
-    vmx_do_resume(v);
-    reset_stack_and_jump(vmx_asm_do_resume);
-}
-
-void arch_vmx_do_launch(struct vcpu *v) 
-{
-    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
-
-    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
-    vmx_do_launch(v);
-    reset_stack_and_jump(vmx_asm_do_launch);
-}
 
 static int vmx_final_setup_guest(
     struct vcpu *v, struct vcpu_guest_context *ctxt)
@@ -346,7 +343,7 @@
 
     v->arch.schedule_tail = arch_vmx_do_launch;
 
-#if defined (__i386)
+#if defined (__i386__)
     v->domain->arch.vmx_platform.real_mode_data = 
         (unsigned long *) regs->esi;
 #endif
diff -r f972da9a2dda -r 57b3fdca5dae xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Thu Aug 11 21:19:45 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Thu Aug 11 21:38:58 2005
@@ -198,7 +198,7 @@
     host_env.idtr_limit = desc.size;
     host_env.idtr_base = desc.address;
     error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
- 
+
     __asm__ __volatile__ ("sgdt  (%0) \n" :: "a"(&desc) : "memory");
     host_env.gdtr_limit = desc.size;
     host_env.gdtr_base = desc.address;
@@ -210,7 +210,6 @@
     host_env.tr_base = (unsigned long) &init_tss[cpu];
     error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
     error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
-
 }
 
 void vmx_do_launch(struct vcpu *v) 
@@ -544,6 +543,36 @@
     __vmx_bug(guest_cpu_user_regs());
 }
 
+void arch_vmx_do_resume(struct vcpu *v) 
+{
+    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+    vmx_do_resume(v);
+    reset_stack_and_jump(vmx_asm_do_resume);
+}
+
+void arch_vmx_do_launch(struct vcpu *v) 
+{
+    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+    vmx_do_launch(v);
+    reset_stack_and_jump(vmx_asm_do_launch);
+}
+
+void arch_vmx_do_relaunch(struct vcpu *v)
+{
+    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+    vmx_do_resume(v);
+    vmx_set_host_env(v);
+    v->arch.schedule_tail = arch_vmx_do_resume;
+
+    reset_stack_and_jump(vmx_asm_do_relaunch);
+}
+
 #endif /* CONFIG_VMX */
 
 /*
diff -r f972da9a2dda -r 57b3fdca5dae xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Thu Aug 11 21:19:45 2005
+++ b/xen/arch/x86/x86_32/entry.S       Thu Aug 11 21:38:58 2005
@@ -108,31 +108,26 @@
         pushl %ecx; \
         pushl %ebx;
 
+#define VMX_RESTORE_ALL_NOSEGREGS   \
+        popl %ebx;  \
+        popl %ecx;  \
+        popl %edx;  \
+        popl %esi;  \
+        popl %edi;  \
+        popl %ebp;  \
+        popl %eax;  \
+        addl $(NR_SKIPPED_REGS*4), %esp
+
 ENTRY(vmx_asm_vmexit_handler)
         /* selectors are restored/saved by VMX */
         VMX_SAVE_ALL_NOSEGREGS
         call vmx_vmexit_handler
         jmp vmx_asm_do_resume
 
-ENTRY(vmx_asm_do_launch)
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-        popl %eax
-        addl $(NR_SKIPPED_REGS*4), %esp
-        /* VMLUANCH */
-        .byte 0x0f,0x01,0xc2
-        pushf
-        call vm_launch_fail
-        hlt
-        
-        ALIGN
-        
-ENTRY(vmx_asm_do_resume)
-vmx_test_all_events:
+.macro vmx_asm_common launch initialized
+1:
+/* vmx_test_all_events */
+        .if \initialized
         GET_CURRENT(%ebx)
 /*test_all_events:*/
         xorl %ecx,%ecx
@@ -142,34 +137,50 @@
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
         test %ecx,irq_stat(%eax,1)
-        jnz  vmx_process_softirqs
-
-vmx_restore_all_guest:
+        jnz 2f
+
+/* vmx_restore_all_guest */
         call load_cr2
+        .endif
+        VMX_RESTORE_ALL_NOSEGREGS
         /* 
          * Check if we are going back to VMX-based VM
          * By this time, all the setups in the VMCS must be complete.
          */
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-        popl %eax
-        addl $(NR_SKIPPED_REGS*4), %esp
+        .if \launch
+        /* VMLUANCH */
+        .byte 0x0f,0x01,0xc2
+        pushf
+        call vm_launch_fail
+        .else
         /* VMRESUME */
         .byte 0x0f,0x01,0xc3
         pushf
         call vm_resume_fail
+        .endif
         /* Should never reach here */
         hlt
 
         ALIGN
-vmx_process_softirqs:
+        .if \initialized
+2:
+/* vmx_process_softirqs */
         sti       
         call do_softirq
-        jmp  vmx_test_all_events
+        jmp 1b
+        ALIGN
+        .endif
+.endm
+
+ENTRY(vmx_asm_do_launch)
+    vmx_asm_common 1 0
+
+ENTRY(vmx_asm_do_resume)
+    vmx_asm_common 0 1
+
+ENTRY(vmx_asm_do_relaunch)
+    vmx_asm_common 1 1
+
 #endif
 
         ALIGN
diff -r f972da9a2dda -r 57b3fdca5dae xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Thu Aug 11 21:19:45 2005
+++ b/xen/arch/x86/x86_64/entry.S       Thu Aug 11 21:38:58 2005
@@ -194,39 +194,34 @@
         pushq %r14; \
         pushq %r15; \
 
+#define VMX_RESTORE_ALL_NOSEGREGS \
+        popq %r15; \
+        popq %r14; \
+        popq %r13; \
+        popq %r12; \
+        popq %rbp; \
+        popq %rbx; \
+        popq %r11; \
+        popq %r10; \
+        popq %r9;  \
+        popq %r8;  \
+        popq %rax; \
+        popq %rcx; \
+        popq %rdx; \
+        popq %rsi; \
+        popq %rdi; \
+        addq $(NR_SKIPPED_REGS*8), %rsp; \
+
 ENTRY(vmx_asm_vmexit_handler)
         /* selectors are restored/saved by VMX */
         VMX_SAVE_ALL_NOSEGREGS
         call vmx_vmexit_handler
         jmp vmx_asm_do_resume
 
-ENTRY(vmx_asm_do_launch)
-        popq %r15
-        popq %r14
-        popq %r13
-        popq %r12
-        popq %rbp
-        popq %rbx
-        popq %r11
-        popq %r10
-        popq %r9
-        popq %r8
-        popq %rax
-        popq %rcx
-        popq %rdx
-        popq %rsi
-        popq %rdi
-        addq $(NR_SKIPPED_REGS*8), %rsp
-        /* VMLUANCH */
-        .byte 0x0f,0x01,0xc2
-        pushfq
-        call vm_launch_fail
-        hlt
-        
-        ALIGN
-        
-ENTRY(vmx_asm_do_resume)
-vmx_test_all_events:
+.macro vmx_asm_common launch initialized 
+1:
+        .if \initialized
+/* vmx_test_all_events */
         GET_CURRENT(%rbx)
 /* test_all_events: */
         cli                             # tests must not race interrupts
@@ -235,42 +230,51 @@
         shl   $IRQSTAT_shift,%rax
         leaq  irq_stat(%rip), %rdx
         testl $~0,(%rdx,%rax,1)
-        jnz   vmx_process_softirqs
-
-vmx_restore_all_guest:
+        jnz  2f 
+
+/* vmx_restore_all_guest */
         call load_cr2
+        .endif
         /* 
          * Check if we are going back to VMX-based VM
          * By this time, all the setups in the VMCS must be complete.
          */
-        popq %r15
-        popq %r14
-        popq %r13
-        popq %r12
-        popq %rbp
-        popq %rbx
-        popq %r11
-        popq %r10
-        popq %r9
-        popq %r8
-        popq %rax
-        popq %rcx
-        popq %rdx
-        popq %rsi
-        popq %rdi
-        addq $(NR_SKIPPED_REGS*8), %rsp
+        VMX_RESTORE_ALL_NOSEGREGS
+        .if \launch
+        /* VMLUANCH */
+        .byte 0x0f,0x01,0xc2
+        pushfq
+        call vm_launch_fail
+        .else
         /* VMRESUME */
         .byte 0x0f,0x01,0xc3
         pushfq
         call vm_resume_fail
+        .endif
         /* Should never reach here */
         hlt
 
         ALIGN
-vmx_process_softirqs:
+
+        .if \initialized
+2:
+/* vmx_process_softirqs */
         sti       
         call do_softirq
-        jmp  vmx_test_all_events
+        jmp 1b
+        ALIGN
+        .endif
+.endm
+
+ENTRY(vmx_asm_do_launch)
+      vmx_asm_common 1 0
+
+ENTRY(vmx_asm_do_resume)
+      vmx_asm_common 0 1
+
+ENTRY(vmx_asm_do_relaunch)
+      vmx_asm_common 1 1
+
 #endif
 
         ALIGN
diff -r f972da9a2dda -r 57b3fdca5dae xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c     Thu Aug 11 21:19:45 2005
+++ b/xen/common/dom0_ops.c     Thu Aug 11 21:38:58 2005
@@ -293,17 +293,17 @@
         v->cpumap = cpumap;
 
         if ( cpumap == CPUMAP_RUNANYWHERE )
+        {
             clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
+        }
         else
         {
             /* pick a new cpu from the usable map */
             int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus();
 
             vcpu_pause(v);
-            if ( v->processor != new_cpu )
-                set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+            vcpu_migrate_cpu(v, new_cpu);
             set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
-            v->processor = new_cpu;
             vcpu_unpause(v);
         }
 
diff -r f972da9a2dda -r 57b3fdca5dae xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Thu Aug 11 21:19:45 2005
+++ b/xen/include/asm-x86/vmx.h Thu Aug 11 21:38:58 2005
@@ -35,6 +35,7 @@
 
 extern void arch_vmx_do_launch(struct vcpu *);
 extern void arch_vmx_do_resume(struct vcpu *);
+extern void arch_vmx_do_relaunch(struct vcpu *);
 
 extern int vmcs_size;
 extern unsigned int cpu_rev;
diff -r f972da9a2dda -r 57b3fdca5dae xen/include/asm-x86/vmx_vmcs.h
--- a/xen/include/asm-x86/vmx_vmcs.h    Thu Aug 11 21:19:45 2005
+++ b/xen/include/asm-x86/vmx_vmcs.h    Thu Aug 11 21:38:58 2005
@@ -93,6 +93,7 @@
 
 void vmx_do_launch(struct vcpu *); 
 void vmx_do_resume(struct vcpu *); 
+void vmx_set_host_env(struct vcpu *);
 
 struct vmcs_struct *alloc_vmcs(void);
 void free_vmcs(struct vmcs_struct *);
diff -r f972da9a2dda -r 57b3fdca5dae xen/include/xen/domain.h
--- a/xen/include/xen/domain.h  Thu Aug 11 21:19:45 2005
+++ b/xen/include/xen/domain.h  Thu Aug 11 21:38:58 2005
@@ -15,7 +15,9 @@
 extern void arch_do_boot_vcpu(struct vcpu *v);
 
 extern int  arch_set_info_guest(
-    struct vcpu *d, struct vcpu_guest_context *c);
+    struct vcpu *v, struct vcpu_guest_context *c);
+
+extern void vcpu_migrate_cpu(struct vcpu *v, int newcpu);
 
 extern void free_perdomain_pt(struct domain *d);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.