[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Clean out save/restore debug tracing.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1207753051 -3600
# Node ID 3cac47973e15e674d648e1c9d1be5bba9dd63207
# Parent  aee133a8e5e72bc9a6da4bb1619931992da3b6ff
hvm: Clean out save/restore debug tracing.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/i8254.c   |   56 ++-----------------------------
 xen/arch/x86/hvm/svm/svm.c |    5 --
 xen/arch/x86/hvm/vioapic.c |   33 +-----------------
 xen/arch/x86/hvm/vlapic.c  |   81 ++++++++++++++++-----------------------------
 xen/arch/x86/hvm/vmx/vmx.c |   23 ------------
 xen/arch/x86/hvm/vpic.c    |   28 ---------------
 6 files changed, 35 insertions(+), 191 deletions(-)

diff -r aee133a8e5e7 -r 3cac47973e15 xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c  Wed Apr 09 15:25:16 2008 +0100
+++ b/xen/arch/x86/hvm/i8254.c  Wed Apr 09 15:57:31 2008 +0100
@@ -401,50 +401,6 @@ void pit_stop_channel0_irq(PITState *pit
     spin_unlock(&pit->lock);
 }
 
-#ifdef HVM_DEBUG_SUSPEND
-static void pit_info(PITState *pit)
-{
-    struct hvm_hw_pit_channel *s;
-    struct periodic_time *pt;
-    int i;
-
-    for ( i = 0; i < 3; i++ )
-    {
-        printk("*****pit channel %d's state:*****\n", i);
-        s = &pit->hw.channels[i];
-        printk("pit 0x%x.\n", s->count);
-        printk("pit 0x%x.\n", s->latched_count);
-        printk("pit 0x%x.\n", s->count_latched);
-        printk("pit 0x%x.\n", s->status_latched);
-        printk("pit 0x%x.\n", s->status);
-        printk("pit 0x%x.\n", s->read_state);
-        printk("pit 0x%x.\n", s->write_state);
-        printk("pit 0x%x.\n", s->write_latch);
-        printk("pit 0x%x.\n", s->rw_mode);
-        printk("pit 0x%x.\n", s->mode);
-        printk("pit 0x%x.\n", s->bcd);
-        printk("pit 0x%x.\n", s->gate);
-        printk("pit %"PRId64"\n", pit->count_load_time[i]);
-
-    }
-
-    pt = &pit->pt0;
-    printk("pit channel 0 periodic timer:\n", i);
-    printk("pt %d.\n", pt->enabled);
-    printk("pt %d.\n", pt->one_shot);
-    printk("pt %d.\n", pt->irq);
-    printk("pt %d.\n", pt->first_injected);
-    printk("pt %d.\n", pt->pending_intr_nr);
-    printk("pt %d.\n", pt->period);
-    printk("pt %"PRId64"\n", pt->period_cycles);
-    printk("pt %"PRId64"\n", pt->last_plt_gtime);
-}
-#else
-static void pit_info(PITState *pit)
-{
-}
-#endif
-
 static int pit_save(struct domain *d, hvm_domain_context_t *h)
 {
     PITState *pit = domain_vpit(d);
@@ -452,9 +408,6 @@ static int pit_save(struct domain *d, hv
 
     spin_lock(&pit->lock);
     
-    pit_info(pit);
-
-    /* Save the PIT hardware state */
     rc = hvm_save_entry(PIT, 0, h, &pit->hw);
 
     spin_unlock(&pit->lock);
@@ -469,21 +422,20 @@ static int pit_load(struct domain *d, hv
 
     spin_lock(&pit->lock);
 
-    /* Restore the PIT hardware state */
     if ( hvm_load_entry(PIT, h, &pit->hw) )
     {
         spin_unlock(&pit->lock);
         return 1;
     }
     
-    /* Recreate platform timers from hardware state.  There will be some 
+    /*
+     * Recreate platform timers from hardware state.  There will be some 
      * time jitter here, but the wall-clock will have jumped massively, so 
-     * we hope the guest can handle it. */
+     * we hope the guest can handle it.
+     */
     pit->pt0.last_plt_gtime = hvm_get_guest_time(d->vcpu[0]);
     for ( i = 0; i < 3; i++ )
         pit_load_count(pit, i, pit->hw.channels[i].count);
-
-    pit_info(pit);
 
     spin_unlock(&pit->lock);
 
diff -r aee133a8e5e7 -r 3cac47973e15 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Apr 09 15:25:16 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Apr 09 15:57:31 2008 +0100
@@ -255,11 +255,6 @@ static int svm_vmcb_restore(struct vcpu 
     svm_update_guest_cr(v, 2);
     svm_update_guest_cr(v, 4);
 
-#ifdef HVM_DEBUG_SUSPEND
-    printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
-           __func__, c->cr3, c->cr0, c->cr4);
-#endif
-
     vmcb->sysenter_cs =  c->sysenter_cs;
     vmcb->sysenter_esp = c->sysenter_esp;
     vmcb->sysenter_eip = c->sysenter_eip;
diff -r aee133a8e5e7 -r 3cac47973e15 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c        Wed Apr 09 15:25:16 2008 +0100
+++ b/xen/arch/x86/hvm/vioapic.c        Wed Apr 09 15:57:31 2008 +0100
@@ -477,45 +477,16 @@ void vioapic_update_EOI(struct domain *d
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
 
-#ifdef HVM_DEBUG_SUSPEND
-static void ioapic_info(struct hvm_hw_vioapic *s)
-{
-    int i;
-    printk("*****ioapic state:*****\n");
-    printk("ioapic 0x%x.\n", s->ioregsel);
-    printk("ioapic 0x%x.\n", s->id);
-    printk("ioapic 0x%lx.\n", s->base_address);
-    for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
-        printk("ioapic redirtbl[%d]:0x%"PRIx64"\n", i, s->redirtbl[i].bits);
-    }
-
-}
-#else
-static void ioapic_info(struct hvm_hw_vioapic *s)
-{
-}
-#endif
-
-
 static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
 {
     struct hvm_hw_vioapic *s = domain_vioapic(d);
-    ioapic_info(s);
-
-    /* save io-apic state*/
-    return ( hvm_save_entry(IOAPIC, 0, h, s) );
+    return hvm_save_entry(IOAPIC, 0, h, s);
 }
 
 static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
 {
     struct hvm_hw_vioapic *s = domain_vioapic(d);
-    
-    /* restore ioapic state */
-    if ( hvm_load_entry(IOAPIC, h, s) != 0 )
-        return -EINVAL;
-
-    ioapic_info(s);
-    return 0;
+    return hvm_load_entry(IOAPIC, h, s);
 }
 
 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
diff -r aee133a8e5e7 -r 3cac47973e15 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Wed Apr 09 15:25:16 2008 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Wed Apr 09 15:57:31 2008 +0100
@@ -791,75 +791,54 @@ void vlapic_reset(struct vlapic *vlapic)
     vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
 }
 
-#ifdef HVM_DEBUG_SUSPEND
-static void lapic_info(struct vlapic *s)
-{
-    printk("*****lapic state:*****\n");
-    printk("lapic 0x%"PRIx64".\n", s->hw.apic_base_msr);
-    printk("lapic 0x%x.\n", s->hw.disabled);
-    printk("lapic 0x%x.\n", s->hw.timer_divisor);
-}
-#else
-static void lapic_info(struct vlapic *s)
-{
-}
-#endif
-
 /* rearm the actimer if needed, after a HVM restore */
 static void lapic_rearm(struct vlapic *s)
 {
-    unsigned long tmict;
-
-    tmict = vlapic_get_reg(s, APIC_TMICT);
-    if ( tmict > 0 )
-    {
-        uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
-                            (uint32_t)tmict * s->hw.timer_divisor;
-        uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT);
-
-        s->pt.irq = lvtt & APIC_VECTOR_MASK;
-        create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
-                             !vlapic_lvtt_period(s), vlapic_pt_cb,
-                             &s->timer_last_update);
-        s->timer_last_update = s->pt.last_plt_gtime;
-
-        printk("lapic_load to rearm the actimer:"
-               "bus cycle is %uns, "
-               "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
-               APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
-    }
-
-    lapic_info(s);
+    unsigned long tmict = vlapic_get_reg(s, APIC_TMICT);
+    uint64_t period;
+
+    if ( (tmict = vlapic_get_reg(s, APIC_TMICT)) == 0 )
+        return;
+
+    period = ((uint64_t)APIC_BUS_CYCLE_NS *
+              (uint32_t)tmict * s->hw.timer_divisor);
+    s->pt.irq = vlapic_get_reg(s, APIC_LVTT) & APIC_VECTOR_MASK;
+    create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
+                         !vlapic_lvtt_period(s), vlapic_pt_cb,
+                         &s->timer_last_update);
+    s->timer_last_update = s->pt.last_plt_gtime;
 }
 
 static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct vlapic *s;
-
-    for_each_vcpu(d, v)
+    int rc = 0;
+
+    for_each_vcpu ( d, v )
     {
         s = vcpu_vlapic(v);
-        lapic_info(s);
-
-        if ( hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw) != 0 )
-            return 1; 
-    }
-    return 0;
+        if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
+            break;
+    }
+
+    return rc;
 }
 
 static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct vlapic *s;
-
-    for_each_vcpu(d, v)
+    int rc = 0;
+
+    for_each_vcpu ( d, v )
     {
         s = vcpu_vlapic(v);
-        if ( hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs) != 0 )
-            return 1; 
-    }
-    return 0;
+        if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
+            break;
+    }
+
+    return rc;
 }
 
 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
@@ -880,8 +859,6 @@ static int lapic_load_hidden(struct doma
     if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 ) 
         return -EINVAL;
 
-    lapic_info(s);
-
     vmx_vlapic_msr_changed(v);
 
     return 0;
diff -r aee133a8e5e7 -r 3cac47973e15 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Apr 09 15:25:16 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Apr 09 15:57:31 2008 +0100
@@ -561,11 +561,6 @@ static int vmx_vmcs_restore(struct vcpu 
     vmx_update_guest_cr(v, 2);
     vmx_update_guest_cr(v, 4);
 
-#ifdef HVM_DEBUG_SUSPEND
-    printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
-           __func__, c->cr3, c->cr0, c->cr4);
-#endif
-
     v->arch.hvm_vcpu.guest_efer = c->msr_efer;
     vmx_update_guest_efer(v);
 
@@ -596,20 +591,6 @@ static int vmx_vmcs_restore(struct vcpu 
     return 0;
 }
 
-#if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND)
-static void dump_msr_state(struct vmx_msr_state *m)
-{
-    int i = 0;
-    printk("**** msr state ****\n");
-    printk("shadow_gs=0x%lx, flags=0x%lx, msr_items:", m->shadow_gs, m->flags);
-    for ( i = 0; i < VMX_MSR_COUNT; i++ )
-        printk("0x%lx,", m->msrs[i]);
-    printk("\n");
-}
-#else
-#define dump_msr_state(m) ((void)0)
-#endif
-
 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
 #ifdef __x86_64__
@@ -627,8 +608,6 @@ static void vmx_save_cpu_state(struct vc
 #endif
 
     data->tsc = hvm_get_guest_time(v);
-
-    dump_msr_state(guest_state);
 }
 
 static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
@@ -647,8 +626,6 @@ static void vmx_load_cpu_state(struct vc
 #endif
 
     hvm_set_guest_time(v, data->tsc);
-
-    dump_msr_state(guest_state);
 }
 
 
diff -r aee133a8e5e7 -r 3cac47973e15 xen/arch/x86/hvm/vpic.c
--- a/xen/arch/x86/hvm/vpic.c   Wed Apr 09 15:25:16 2008 +0100
+++ b/xen/arch/x86/hvm/vpic.c   Wed Apr 09 15:57:31 2008 +0100
@@ -363,32 +363,6 @@ static int vpic_intercept_elcr_io(
     return 1;
 }
 
-#ifdef HVM_DEBUG_SUSPEND
-static void vpic_info(struct hvm_hw_vpic *s)
-{
-    printk("*****pic state:*****\n");
-    printk("pic 0x%x.\n", s->irr);
-    printk("pic 0x%x.\n", s->imr);
-    printk("pic 0x%x.\n", s->isr);
-    printk("pic 0x%x.\n", s->irq_base);
-    printk("pic 0x%x.\n", s->init_state);
-    printk("pic 0x%x.\n", s->priority_add);
-    printk("pic 0x%x.\n", s->readsel_isr);
-    printk("pic 0x%x.\n", s->poll);
-    printk("pic 0x%x.\n", s->auto_eoi);
-    printk("pic 0x%x.\n", s->rotate_on_auto_eoi);
-    printk("pic 0x%x.\n", s->special_fully_nested_mode);
-    printk("pic 0x%x.\n", s->special_mask_mode);
-    printk("pic 0x%x.\n", s->elcr);
-    printk("pic 0x%x.\n", s->int_output);
-    printk("pic 0x%x.\n", s->is_master);
-}
-#else
-static void vpic_info(struct hvm_hw_vpic *s)
-{
-}
-#endif
-
 static int vpic_save(struct domain *d, hvm_domain_context_t *h)
 {
     struct hvm_hw_vpic *s;
@@ -398,7 +372,6 @@ static int vpic_save(struct domain *d, h
     for ( i = 0; i < 2 ; i++ )
     {
         s = &d->arch.hvm_domain.vpic[i];
-        vpic_info(s);
         if ( hvm_save_entry(PIC, i, h, s) )
             return 1;
     }
@@ -421,7 +394,6 @@ static int vpic_load(struct domain *d, h
     if ( hvm_load_entry(PIC, h, s) != 0 )
         return -EINVAL;
 
-    vpic_info(s);
     return 0;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.