[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Fix SMP Windows boot failure



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 4816a891b3d69491c0f2e063dc59d8118d40b8e9
# Parent  51be39239c47cf8d22937544cfcb83567f87afaa
[IA64] Fix SMP Windows boot failure

Sometime SMP Windows can't boot, the root cause is guest timer interrupt
is lost.

This patch fixes following issues.
1. Windows uses different way to sync itc.
2. Previously when Guest timer fires and guest ITV is masked, XEN will
   desert this Guest timer interrupt. It is not correct for windows,
   windows may expect this timer interrupt.
3. Windows may use different way to set timer in some situations.
   Windows first sets itm (which may be smaller than current itc), and
   then sets itc (which is samller than itm).
   XEN can support this way to set timer.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
 xen/arch/ia64/asm-offsets.c   |    1 
 xen/arch/ia64/vmx/optvfault.S |   17 +++---
 xen/arch/ia64/vmx/vlsapic.c   |  119 ++++++++++++++++++++++--------------------
 xen/include/asm-ia64/vtm.h    |    3 -
 4 files changed, 77 insertions(+), 63 deletions(-)

diff -r 51be39239c47 -r 4816a891b3d6 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Fri Nov 10 11:19:51 2006 -0700
+++ b/xen/arch/ia64/asm-offsets.c       Fri Nov 10 11:19:57 2006 -0700
@@ -38,6 +38,7 @@ void foo(void)
 
        BLANK();
        DEFINE(VCPU_VTM_OFFSET_OFS, offsetof(struct vcpu, 
arch.arch_vmx.vtm.vtm_offset));
+       DEFINE(VCPU_VTM_LAST_ITC_OFS, offsetof(struct vcpu, 
arch.arch_vmx.vtm.last_itc));
        DEFINE(VCPU_VRR0_OFS, offsetof(struct vcpu, arch.arch_vmx.vrr[0]));
 #ifdef   VTI_DEBUG
        DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
diff -r 51be39239c47 -r 4816a891b3d6 xen/arch/ia64/vmx/optvfault.S
--- a/xen/arch/ia64/vmx/optvfault.S     Fri Nov 10 11:19:51 2006 -0700
+++ b/xen/arch/ia64/vmx/optvfault.S     Fri Nov 10 11:19:57 2006 -0700
@@ -29,17 +29,22 @@ GLOBAL_ENTRY(vmx_asm_mov_from_ar)
     br.many vmx_virtualization_fault_back
 #endif
     add r18=VCPU_VTM_OFFSET_OFS,r21
+    add r16=VCPU_VTM_LAST_ITC_OFS,r21
+    extr.u r17=r25,6,7
+    ;;
+    ld8 r18=[r18]
     mov r19=ar.itc
-    extr.u r17=r25,6,7
-    ;;
-    ld8 r18=[r18]
+    mov r24=b0
+    ;;
+    ld8 r16=[r16]
+    add r19=r19,r18
     movl r20=asm_mov_to_reg
     ;;
     adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20
     shladd r17=r17,4,r20
-    mov r24=b0
-    ;;
-    add r19=r19,r18
+    cmp.gtu p6,p0=r16,r19
+    ;;
+    (p6) mov r19=r16
     mov b0=r17
     br.sptk.few b0
     ;;
diff -r 51be39239c47 -r 4816a891b3d6 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Fri Nov 10 11:19:51 2006 -0700
+++ b/xen/arch/ia64/vmx/vlsapic.c       Fri Nov 10 11:19:57 2006 -0700
@@ -119,14 +119,11 @@ static uint64_t now_itc(vtime_t *vtm)
         if ( vtm->vtm_local_drift ) {
 //          guest_itc -= vtm->vtm_local_drift;
         }       
-        if ( (long)(guest_itc - vtm->last_itc) > 0 ) {
+        if (guest_itc >= vtm->last_itc)
             return guest_itc;
-
-        }
-        else {
+        else
             /* guest ITC backwarded due after LP switch */
             return vtm->last_itc;
-        }
 }
 
 /*
@@ -134,33 +131,42 @@ static uint64_t now_itc(vtime_t *vtm)
  */
 static void vtm_reset(VCPU *vcpu)
 {
-    uint64_t    cur_itc;
-    vtime_t     *vtm;
-    
-    vtm=&(vcpu->arch.arch_vmx.vtm);
-    vtm->vtm_offset = 0;
+    int i;
+    u64 vtm_offset;
+    VCPU *v;
+    struct domain *d = vcpu->domain;
+    vtime_t *vtm = &VMX(vcpu, vtm);
+
+    if (vcpu->vcpu_id == 0) {
+        vtm_offset = 0UL - ia64_get_itc();
+        for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
+            if ((v = d->vcpu[i]) != NULL) {
+                VMX(v, vtm).vtm_offset = vtm_offset;
+                VMX(v, vtm).last_itc = 0;
+            }
+        }
+    }
     vtm->vtm_local_drift = 0;
     VCPU(vcpu, itm) = 0;
     VCPU(vcpu, itv) = 0x10000;
-    cur_itc = ia64_get_itc();
-    vtm->last_itc = vtm->vtm_offset + cur_itc;
+    vtm->last_itc = 0;
 }
 
 /* callback function when vtm_timer expires */
 static void vtm_timer_fn(void *data)
 {
-    vtime_t *vtm;
-    VCPU    *vcpu = data;
-    u64            cur_itc,vitv;
+    VCPU *vcpu = data;
+    vtime_t *vtm = &VMX(vcpu, vtm);
+    u64 vitv;
 
     vitv = VCPU(vcpu, itv);
-    if ( !ITV_IRQ_MASK(vitv) ){
-        vmx_vcpu_pend_interrupt(vcpu, vitv & 0xff);
+    if (!ITV_IRQ_MASK(vitv)) {
+        vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(vitv));
         vcpu_unblock(vcpu);
-    }
-    vtm=&(vcpu->arch.arch_vmx.vtm);
-    cur_itc = now_itc(vtm);
-    update_last_itc(vtm,cur_itc);  // pseudo read to update vITC
+    } else
+        vtm->pending = 1;
+
+    update_last_itc(vtm, VCPU(vcpu, itm));  // update vITC
 }
 
 void vtm_init(VCPU *vcpu)
@@ -168,7 +174,7 @@ void vtm_init(VCPU *vcpu)
     vtime_t     *vtm;
     uint64_t    itc_freq;
     
-    vtm=&(vcpu->arch.arch_vmx.vtm);
+    vtm = &VMX(vcpu, vtm);
 
     itc_freq = local_cpu_data->itc_freq;
     vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
@@ -182,36 +188,38 @@ void vtm_init(VCPU *vcpu)
  */
 uint64_t vtm_get_itc(VCPU *vcpu)
 {
-    uint64_t    guest_itc;
-    vtime_t    *vtm;
-
-    vtm=&(vcpu->arch.arch_vmx.vtm);
+    uint64_t guest_itc;
+    vtime_t *vtm = &VMX(vcpu, vtm);
+
     guest_itc = now_itc(vtm);
-    update_last_itc(vtm, guest_itc);  // update vITC
     return guest_itc;
 }
 
 
 void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
 {
-    uint64_t    vitm, vitv;
-    vtime_t     *vtm;
-    vitm = VCPU(vcpu,itm);
-    vitv = VCPU(vcpu,itv);
-    vtm=&(vcpu->arch.arch_vmx.vtm);
-    if(vcpu->vcpu_id == 0){
-        vtm->vtm_offset = new_itc - ia64_get_itc();
-        vtm->last_itc = new_itc;
-    }
-    else{
-        vtm->vtm_offset = vcpu->domain->vcpu[0]->arch.arch_vmx.vtm.vtm_offset;
-        new_itc=vtm->vtm_offset + ia64_get_itc();
-        vtm->last_itc = new_itc;
-    }
-    if(vitm < new_itc){
-        vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
+    int i;
+    uint64_t vitm, vtm_offset;
+    vtime_t *vtm;
+    VCPU *v;
+    struct domain *d = vcpu->domain;
+
+    vitm = VCPU(vcpu, itm);
+    vtm = &VMX(vcpu, vtm);
+    if (vcpu->vcpu_id == 0) {
+        vtm_offset = new_itc - ia64_get_itc();
+        for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
+            if ((v = d->vcpu[i]) != NULL) {
+                VMX(v, vtm).vtm_offset = vtm_offset;
+                VMX(v, vtm).last_itc = 0;
+            }
+        }
+    }
+    vtm->last_itc = 0;
+    if (vitm <= new_itc)
         stop_timer(&vtm->vtm_timer);
-    }
+    else
+        vtm_set_itm(vcpu, vitm);
 }
 
 
@@ -223,16 +231,16 @@ void vtm_set_itm(VCPU *vcpu, uint64_t va
 {
     vtime_t *vtm;
     uint64_t   vitv, cur_itc, expires;
+
     vitv = VCPU(vcpu, itv);
-    vtm=&(vcpu->arch.arch_vmx.vtm);
-    // TODO; need to handle VHPI in future
-    vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
-    VCPU(vcpu,itm)=val;
-    if (val >= vtm->last_itc) {
+    vtm = &VMX(vcpu, vtm);
+    VCPU(vcpu, itm) = val;
+    if (val > vtm->last_itc) {
         cur_itc = now_itc(vtm);
         if (time_before(val, cur_itc))
             val = cur_itc;
         expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP;
+        vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
         set_timer(&vtm->vtm_timer, expires);
     }else{
         stop_timer(&vtm->vtm_timer);
@@ -242,14 +250,13 @@ void vtm_set_itm(VCPU *vcpu, uint64_t va
 
 void vtm_set_itv(VCPU *vcpu, uint64_t val)
 {
-    uint64_t    olditv;
-    olditv = VCPU(vcpu, itv);
+    vtime_t *vtm = &VMX(vcpu, vtm);
+
     VCPU(vcpu, itv) = val;
-    if(ITV_IRQ_MASK(val)){
-        vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(olditv));
-    }else if(ITV_VECTOR(olditv)!=ITV_VECTOR(val)){
-        if (vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(olditv)))
-            vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
+
+    if (!ITV_IRQ_MASK(val) && vtm->pending) {
+        vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
+        vtm->pending = 0;
     }
 }
 
diff -r 51be39239c47 -r 4816a891b3d6 xen/include/asm-ia64/vtm.h
--- a/xen/include/asm-ia64/vtm.h        Fri Nov 10 11:19:51 2006 -0700
+++ b/xen/include/asm-ia64/vtm.h        Fri Nov 10 11:19:57 2006 -0700
@@ -33,7 +33,8 @@ typedef struct vtime {
 typedef struct vtime {
        long        vtm_offset; // guest ITC = host ITC + vtm_offset
        uint64_t    vtm_local_drift;
-       uint64_t   last_itc;
+       uint64_t    last_itc;
+       uint64_t    pending;
        /* 
         * Local drift (temporary) after guest suspension
         * In case of long jump amount of ITC after suspension, 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.