[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1174391780 0
# Node ID 61a4a4795be57d12d2c1d9de5a12b8c847e77205
# Parent  09f2e758a198d8a28172ded2e849d98c82d296e7
# Parent  cabf9e221cd506b1f23609a8a696a08124901d7a
Merge
---
 xen/arch/x86/hvm/svm/svm.c          |   52 +++++++++++++-----------------------
 xen/arch/x86/hvm/svm/vmcb.c         |   14 ++-------
 xen/arch/x86/hvm/svm/x86_32/exits.S |   40 +++++++++------------------
 xen/arch/x86/hvm/svm/x86_64/exits.S |   34 ++++++++---------------
 xen/include/asm-x86/hvm/svm/svm.h   |    2 -
 xen/include/asm-x86/hvm/svm/vmcb.h  |    2 -
 6 files changed, 48 insertions(+), 96 deletions(-)

diff -r 09f2e758a198 -r 61a4a4795be5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Mar 20 11:10:52 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Mar 20 11:56:20 2007 +0000
@@ -485,7 +485,6 @@ int svm_vmcb_restore(struct vcpu *v, str
          * first.
          */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
-        /* current!=vcpu as not called by arch_vmx_do_launch */
         mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
         if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 
             goto bad_cr3;
@@ -921,17 +920,6 @@ static void svm_load_cpu_guest_regs(
     svm_load_cpu_user_regs(v, regs);
 }
 
-static void arch_svm_do_launch(struct vcpu *v) 
-{
-    svm_do_launch(v);
-
-    if ( paging_mode_hap(v->domain) ) {
-        v->arch.hvm_svm.vmcb->h_cr3 = 
pagetable_get_paddr(v->domain->arch.phys_table);
-    }
-
-    reset_stack_and_jump(svm_asm_do_launch);
-}
-
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
     svm_save_dr(v);
@@ -953,15 +941,29 @@ static void svm_ctxt_switch_to(struct vc
     svm_restore_dr(v);
 }
 
+static void arch_svm_do_resume(struct vcpu *v) 
+{
+    if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
+    {
+        v->arch.hvm_svm.launch_core = smp_processor_id();
+        hvm_migrate_timers(v);
+    }
+
+    hvm_do_resume(v);
+    reset_stack_and_jump(svm_asm_do_resume);
+}
+
 static int svm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
-    v->arch.schedule_tail    = arch_svm_do_launch;
+    v->arch.schedule_tail    = arch_svm_do_resume;
     v->arch.ctxt_switch_from = svm_ctxt_switch_from;
     v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
 
     v->arch.hvm_svm.saved_irq_vector = -1;
+
+    v->arch.hvm_svm.launch_core = -1;
 
     if ( (rc = svm_create_vmcb(v)) != 0 )
     {
@@ -1026,10 +1028,12 @@ void svm_npt_detect(void)
 
     /* check CPUID for nested paging support */
     cpuid(0x8000000A, &eax, &ebx, &ecx, &edx);
-    if ( edx & 0x01 ) { /* nested paging */
+    if ( edx & 0x01 ) /* nested paging */
+    {
         hap_capable_system = 1;
     }
-    else if ( opt_hap_enabled ) {
+    else if ( opt_hap_enabled )
+    {
         printk(" nested paging is not supported by this CPU.\n");
         hap_capable_system = 0; /* no nested paging, we disable flag. */
     }
@@ -1085,24 +1089,6 @@ int start_svm(void)
     hvm_enable(&svm_function_table);
 
     return 1;
-}
-
-void arch_svm_do_resume(struct vcpu *v) 
-{
-    /* pinning VCPU to a different core? */
-    if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
-        hvm_do_resume( v );
-        reset_stack_and_jump( svm_asm_do_resume );
-    }
-    else {
-        if (svm_dbg_on)
-            printk("VCPU core pinned: %d to %d\n", 
-                   v->arch.hvm_svm.launch_core, smp_processor_id() );
-        v->arch.hvm_svm.launch_core = smp_processor_id();
-        hvm_migrate_timers( v );
-        hvm_do_resume( v );
-        reset_stack_and_jump( svm_asm_do_resume );
-    }
 }
 
 static int svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
diff -r 09f2e758a198 -r 61a4a4795be5 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue Mar 20 11:10:52 2007 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Mar 20 11:56:20 2007 +0000
@@ -196,11 +196,13 @@ static int construct_vmcb(struct vcpu *v
 
     arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP;
 
-    if ( paging_mode_hap(v->domain) ) {
+    if ( paging_mode_hap(v->domain) )
+    {
         vmcb->cr0 = arch_svm->cpu_shadow_cr0;
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG;
+        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
     }
 
     return 0;
@@ -245,16 +247,6 @@ void svm_destroy_vmcb(struct vcpu *v)
     }
 
     arch_svm->vmcb = NULL;
-}
-
-void svm_do_launch(struct vcpu *v)
-{
-    hvm_stts(v);
-
-    /* current core is the one we intend to perform the VMRUN on */
-    v->arch.hvm_svm.launch_core = smp_processor_id();
-
-    v->arch.schedule_tail = arch_svm_do_resume;
 }
 
 static void svm_dump_sel(char *name, svm_segment_register_t *s)
diff -r 09f2e758a198 -r 61a4a4795be5 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S       Tue Mar 20 11:10:52 2007 +0000
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S       Tue Mar 20 11:56:20 2007 +0000
@@ -80,15 +80,24 @@
         popl %eax;  \
         addl $(NR_SKIPPED_REGS*4), %esp
 
-        ALIGN
-
 #define VMRUN  .byte 0x0F,0x01,0xD8
 #define VMLOAD .byte 0x0F,0x01,0xDA
 #define VMSAVE .byte 0x0F,0x01,0xDB
 #define STGI   .byte 0x0F,0x01,0xDC
 #define CLGI   .byte 0x0F,0x01,0xDD
 
-ENTRY(svm_asm_do_launch)
+ENTRY(svm_asm_do_resume)
+        GET_CURRENT(%ebx)
+        xorl %ecx,%ecx
+        notl %ecx
+        cli                             # tests must not race interrupts
+        movl VCPU_processor(%ebx),%eax
+        shl  $IRQSTAT_shift,%eax
+        test %ecx,irq_stat(%eax,1)
+        jnz  svm_process_softirqs
+        call svm_intr_assist
+        call svm_load_cr2
+
         CLGI                
         sti
         GET_CURRENT(%ebx)
@@ -135,30 +144,7 @@ svm_stgi_label:
         jmp  svm_asm_do_resume
 
         ALIGN
-
-ENTRY(svm_asm_do_resume)
-svm_test_all_events:
-        GET_CURRENT(%ebx)
-/*test_all_events:*/
-        xorl %ecx,%ecx
-        notl %ecx
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/  
-        movl VCPU_processor(%ebx),%eax
-        shl  $IRQSTAT_shift,%eax
-        test %ecx,irq_stat(%eax,1)
-        jnz  svm_process_softirqs
-svm_restore_all_guest:
-        call svm_intr_assist
-        call svm_load_cr2
-        /* 
-         * Check if we are going back to AMD-V based VM
-         * By this time, all the setups in the VMCB must be complete.
-         */
-        jmp svm_asm_do_launch
-
-        ALIGN
 svm_process_softirqs:
         sti       
         call do_softirq
-        jmp  svm_test_all_events
+        jmp  svm_asm_do_resume
diff -r 09f2e758a198 -r 61a4a4795be5 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S       Tue Mar 20 11:10:52 2007 +0000
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S       Tue Mar 20 11:56:20 2007 +0000
@@ -98,7 +98,17 @@
 #define STGI   .byte 0x0F,0x01,0xDC
 #define CLGI   .byte 0x0F,0x01,0xDD
 
-ENTRY(svm_asm_do_launch)
+ENTRY(svm_asm_do_resume)
+        GET_CURRENT(%rbx)
+        cli                             # tests must not race interrupts
+        movl VCPU_processor(%rbx),%eax
+        shl  $IRQSTAT_shift, %rax
+        leaq irq_stat(%rip), %rdx
+        testl $~0, (%rdx, %rax, 1)
+        jnz  svm_process_softirqs
+        call svm_intr_assist
+        call svm_load_cr2
+
         CLGI                
         sti
         GET_CURRENT(%rbx)
@@ -150,28 +160,8 @@ svm_stgi_label:
         call svm_vmexit_handler
         jmp  svm_asm_do_resume
 
-ENTRY(svm_asm_do_resume)
-svm_test_all_events:
-        GET_CURRENT(%rbx)
-/*test_all_events:*/
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/
-        movl  VCPU_processor(%rbx),%eax
-        shl   $IRQSTAT_shift, %rax
-        leaq  irq_stat(%rip), %rdx
-        testl $~0, (%rdx, %rax, 1)
-        jnz   svm_process_softirqs
-svm_restore_all_guest:
-        call svm_intr_assist
-        call svm_load_cr2
-        /*
-         * Check if we are going back to AMD-V based VM
-         * By this time, all the setups in the VMCB must be complete.
-         */
-        jmp svm_asm_do_launch
-
         ALIGN
 svm_process_softirqs:
         sti
         call do_softirq
-        jmp  svm_test_all_events
+        jmp  svm_asm_do_resume
diff -r 09f2e758a198 -r 61a4a4795be5 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Tue Mar 20 11:10:52 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/svm.h Tue Mar 20 11:56:20 2007 +0000
@@ -29,8 +29,6 @@
 #include <asm/i387.h>
 
 extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-extern void svm_do_launch(struct vcpu *v);
-extern void arch_svm_do_resume(struct vcpu *v);
 
 extern u64 root_vmcb_pa[NR_CPUS];
 
diff -r 09f2e758a198 -r 61a4a4795be5 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Mar 20 11:10:52 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Mar 20 11:56:20 2007 +0000
@@ -447,7 +447,7 @@ struct arch_svm_struct {
     u32                 *msrpm;
     u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
     int                 saved_irq_vector;
-    u32                 launch_core;
+    int                 launch_core;
     
     unsigned long       flags;            /* VMCB flags */
     unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.