[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] SVM patch to fix guest time, including 64bit msr code - allowing 64bit



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b7facd6aa72efa087db768af65fc2dca371a8eb4
# Parent  72b469303d6dde2156a207eca3e8460e0c1c92d1
SVM patch to fix guest time, including 64bit msr code - allowing 64bit
linux guests to enable APIC (ie. apic=1 now works in guest config file).

Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>

diff -r 72b469303d6d -r b7facd6aa72e xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Thu Mar 23 09:47:44 2006
+++ b/xen/arch/x86/hvm/svm/intr.c       Thu Mar 23 09:50:34 2006
@@ -44,6 +44,58 @@
  */
 #define BSP_CPU(v)    (!(v->vcpu_id))
 
+u64 svm_get_guest_time(struct vcpu *v)
+{
+    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+    u64    host_tsc;
+    
+    rdtscll(host_tsc);
+    return host_tsc + vpit->cache_tsc_offset;
+}
+
+void svm_set_guest_time(struct vcpu *v, u64 gtime)
+{
+    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+    u64    host_tsc;
+   
+    rdtscll(host_tsc);
+    
+    vpit->cache_tsc_offset = gtime - host_tsc;
+    v->arch.hvm_svm.vmcb->tsc_offset = vpit->cache_tsc_offset;
+}
+
+static inline void
+interrupt_post_injection(struct vcpu * v, int vector, int type)
+{
+    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+
+    if ( is_pit_irq(v, vector, type) ) {
+        if ( !vpit->first_injected ) {
+            vpit->pending_intr_nr = 0;
+            vpit->last_pit_gtime = svm_get_guest_time(v);
+            vpit->scheduled = NOW() + vpit->period;
+            set_timer(&vpit->pit_timer, vpit->scheduled);
+            vpit->first_injected = 1;
+        } else {
+            vpit->pending_intr_nr--;
+        }
+        vpit->inject_point = NOW();
+
+        vpit->last_pit_gtime += vpit->period;
+        svm_set_guest_time(v, vpit->last_pit_gtime);
+    }
+
+    switch(type)
+    {
+    case VLAPIC_DELIV_MODE_EXT:
+        break;
+
+    default:
+        vlapic_post_injection(v, vector, type);
+        break;
+    }
+}
+
 static inline int svm_inject_extint(struct vcpu *v, int trap, int error_code)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -62,45 +114,6 @@
     vmcb->vintr = intr;
 //  printf( "IRQ = %d\n", trap );
     return 0;
-}
-
-void svm_set_tsc_shift(struct vcpu *v, struct hvm_virpit *vpit)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    u64    drift;
-
-    if ( vpit->first_injected )
-        drift = vpit->period_cycles * vpit->pending_intr_nr;
-    else
-        drift = 0;
-    vmcb->tsc_offset = ( 0 - drift );
-}
-
-static inline void
-interrupt_post_injection(struct vcpu * v, int vector, int type)
-{
-    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
-
-    if ( is_pit_irq(v, vector, type) ) {
-            if ( !vpit->first_injected ) {
-                vpit->first_injected = 1;
-                vpit->pending_intr_nr = 0;
-            }
-            else if (vpit->pending_intr_nr) {
-                --vpit->pending_intr_nr;
-            }
-            vpit->inject_point = NOW();
-            svm_set_tsc_shift (v, vpit);
-    }
-
-    switch(type)
-    {
-    case VLAPIC_DELIV_MODE_EXT:
-        break;
-
-    default:
-        vlapic_post_injection(v, vector, type);
-    }
 }
 
 asmlinkage void svm_intr_assist(void) 
diff -r 72b469303d6d -r b7facd6aa72e xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Mar 23 09:47:44 2006
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Mar 23 09:50:34 2006
@@ -670,8 +670,18 @@
     reset_stack_and_jump(svm_asm_do_launch);
 }
 
+static void svm_freeze_time(struct vcpu *v)
+{
+    struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
+    
+    v->domain->arch.hvm_domain.guest_time = svm_get_guest_time(v);
+    if ( vpit->first_injected )
+        stop_timer(&(vpit->pit_timer));
+}
+
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
+    svm_freeze_time(v);
 }
 
 static void svm_ctxt_switch_to(struct vcpu *v)
@@ -911,7 +921,7 @@
 
     if (input == 1)
     {
-        if ( hvm_apic_support(v->domain) &&
+        if ( !hvm_apic_support(v->domain) ||
                 !vlapic_global_enabled((VLAPIC(v))) )
             clear_bit(X86_FEATURE_APIC, &edx);
            
@@ -1693,7 +1703,7 @@
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     int  inst_len;
-    int64_t tsc_sum;
+    u64 msr_content=0;
 
     ASSERT(vmcb);
 
@@ -1708,24 +1718,27 @@
         inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
 
         regs->edx = 0;
-        switch (regs->ecx)
+        switch (regs->ecx) {
+        case MSR_IA32_TIME_STAMP_COUNTER:
         {
+            struct hvm_virpit *vpit;
+
+            rdtscll(msr_content);
+            vpit = &(v->domain->arch.hvm_domain.vpit);
+            msr_content += vpit->cache_tsc_offset;
+            break;
+        }
         case MSR_IA32_SYSENTER_CS:
-            regs->eax = vmcb->sysenter_cs;
+            msr_content = vmcb->sysenter_cs;
             break;
         case MSR_IA32_SYSENTER_ESP: 
-            regs->eax = vmcb->sysenter_esp;
+            msr_content = vmcb->sysenter_esp;
             break;
         case MSR_IA32_SYSENTER_EIP:     
-            regs->eax = vmcb->sysenter_eip;
+            msr_content = vmcb->sysenter_eip;
             break;
-        case MSR_IA32_TIME_STAMP_COUNTER:
-            __asm__ __volatile__("rdtsc" : "=a" (regs->eax), "=d" (regs->edx));
-            tsc_sum = regs->edx;
-            tsc_sum = (tsc_sum << 32) + regs->eax;
-            tsc_sum += (int64_t) vmcb->tsc_offset;
-            regs->eax = tsc_sum & 0xFFFFFFFF;
-            regs->edx = (tsc_sum >> 32) & 0xFFFFFFFF;
+        case MSR_IA32_APICBASE:
+            msr_content = VLAPIC(v) ? VLAPIC(v)->apic_base_msr : 0;
             break;
         default:
             if (long_mode_do_msr_read(regs))
@@ -1733,21 +1746,30 @@
             rdmsr_safe(regs->ecx, regs->eax, regs->edx);
             break;
         }
+        regs->eax = msr_content & 0xFFFFFFFF;
+        regs->edx = msr_content >> 32;
     }
     else
     {
         inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
+        msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
 
         switch (regs->ecx)
         {
+        case MSR_IA32_TIME_STAMP_COUNTER:
+            svm_set_guest_time(v, msr_content);
+            break;
         case MSR_IA32_SYSENTER_CS:
-            vmcb->sysenter_cs = regs->eax;
+            vmcb->sysenter_cs = msr_content;
             break;
         case MSR_IA32_SYSENTER_ESP: 
-            vmcb->sysenter_esp = regs->eax;
+            vmcb->sysenter_esp = msr_content;
             break;
         case MSR_IA32_SYSENTER_EIP:     
-            vmcb->sysenter_eip = regs->eax;
+            vmcb->sysenter_eip = msr_content;
+            break;
+        case MSR_IA32_APICBASE:
+            vlapic_msr_set(VLAPIC(v), msr_content);
             break;
         default:
             long_mode_do_msr_write(regs);
diff -r 72b469303d6d -r b7facd6aa72e xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu Mar 23 09:47:44 2006
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu Mar 23 09:50:34 2006
@@ -467,6 +467,8 @@
     v->arch.hvm_svm.injecting_event  = 0;
     v->arch.hvm_svm.saved_irq_vector = -1;
 
+    svm_set_guest_time(v, 0);
+       
     if (svm_dbg_on)
         svm_dump_vmcb(__func__, vmcb);
 }
@@ -494,15 +496,16 @@
     struct hvm_virpit *vpit = &d->arch.hvm_domain.vpit;
 
     svm_stts(v);
+    
+    /* pick up the elapsed PIT ticks and re-enable pit_timer */
+    if ( vpit->first_injected) {
+        svm_set_guest_time(v, v->domain->arch.hvm_domain.guest_time);
+        pickup_deactive_ticks(vpit);
+    }
 
     if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
          test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
         hvm_wait_io();
-
-    /* pick up the elapsed PIT ticks and re-enable pit_timer */
-    if ( vpit->first_injected )
-        pickup_deactive_ticks(vpit);
-    svm_set_tsc_shift(v, vpit);
 
     /* We can't resume the guest if we're waiting on I/O */
     ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
diff -r 72b469303d6d -r b7facd6aa72e xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Thu Mar 23 09:47:44 2006
+++ b/xen/include/asm-x86/hvm/svm/svm.h Thu Mar 23 09:50:34 2006
@@ -48,6 +48,8 @@
 extern void svm_stts(struct vcpu *v); 
 extern void svm_do_launch(struct vcpu *v);
 extern void svm_do_resume(struct vcpu *v);
+extern void svm_set_guest_time(struct vcpu *v, u64 gtime);
+extern u64 svm_get_guest_time(struct vcpu *v);
 extern void arch_svm_do_resume(struct vcpu *v);
 extern int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa);
 /* For debugging. Remove when no longer needed. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.