[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen: Switch to new TRACE() API
commit 26da08059eef35e6ce1a308dbe276e8ef6a6c5b1 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Sat Sep 18 00:31:27 2021 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Mon May 20 18:29:44 2024 +0100 xen: Switch to new TRACE() API (Almost) no functional change. * In irq_move_cleanup_interrupt(), use the 'me' local variable rather than calling smp_processor_id() again. This manifests as a minor code improvement. * In vlapic_update_timer() and lapic_rearm(), introduce a new 'timer_period' local variable to simplify the expressions used for both the trace and create_periodic_time() calls. All other differences in the compiled binary are to do with line numbers changing. Some conversion notes: * HVMTRACE_LONG_[234]D() and TRACE_2_LONG_[234]D() were latently buggy. They blindly discard extra parameters, but luckily no users are impacted. They are also obfuscated wrappers, depending on exactly one or two parameters being TRC_PAR_LONG() to compile successfully. * HVMTRACE_LONG_1D() behaves unlike its named companions, and takes exactly one 64bit parameter which it splits manually. It's one user, vmx_cr_access()'s LMSW path, is gets adjusted. * TRACE_?D() and TRACE_2_LONG_*() change to TRACE_TIME() as cycles is always enabled. * HVMTRACE_ND() is opencoded for VMENTRY/VMEXIT records to include cycles. These are converted to TRACE_TIME(), with the old modifier parameter expressed as an OR at the callsite. One callsite, svm_vmenter_helper() had a nested tb_init_done check, which is dropped. (The optimiser also spotted this, which is why it doesn't manifest as a binary difference.) * All uses of *LONG() are either opencoded or swapped to using a struct, to avoid MISRA issues. * All HVMTRACE_?D() change to TRACE() as cycles is explicitly skipped. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxx> --- xen/arch/x86/acpi/cpu_idle.c | 12 +++++----- xen/arch/x86/compat.c | 4 ++-- xen/arch/x86/cpu/mwait-idle.c | 6 ++--- xen/arch/x86/emul-i8254.c | 14 +++++------ xen/arch/x86/hvm/emulate.c | 8 +++---- xen/arch/x86/hvm/hpet.c | 6 ++--- xen/arch/x86/hvm/hvm.c | 16 ++++++------- xen/arch/x86/hvm/rtc.c | 12 +++++----- xen/arch/x86/hvm/svm/intr.c | 6 ++--- xen/arch/x86/hvm/svm/svm.c | 40 +++++++++++++++----------------- xen/arch/x86/hvm/vlapic.c | 43 +++++++++++++++++++--------------- xen/arch/x86/hvm/vmx/intr.c | 6 ++--- xen/arch/x86/hvm/vmx/vmx.c | 54 +++++++++++++++++++++---------------------- xen/arch/x86/hvm/vpic.c | 13 +++++------ xen/arch/x86/irq.c | 14 +++++------ xen/arch/x86/traps.c | 2 +- xen/common/domain.c | 4 ++-- xen/common/grant_table.c | 6 ++--- xen/drivers/cpufreq/utility.c | 2 +- 19 files changed, 133 insertions(+), 135 deletions(-) diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c index c8db1aa991..57ac984790 100644 --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -767,7 +767,7 @@ static void cf_check acpi_processor_idle(void) /* Get start time (ticks) */ t1 = alternative_call(cpuidle_get_tick); /* Trace cpu idle entry */ - TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); + TRACE_TIME(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); update_last_cx_stat(power, cx, t1); @@ -777,8 +777,8 @@ static void cf_check acpi_processor_idle(void) t2 = alternative_call(cpuidle_get_tick); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ - TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, - irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); + TRACE_TIME(TRC_PM_IDLE_EXIT, cx->idx, t2, + irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ update_idle_stats(power, cx, t1, t2); /* Re-enable interrupts */ @@ -798,7 +798,7 @@ static void cf_check acpi_processor_idle(void) /* Get start time (ticks) */ t1 = alternative_call(cpuidle_get_tick); /* Trace cpu idle entry */ - TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); + TRACE_TIME(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); update_last_cx_stat(power, cx, t1); @@ -853,8 +853,8 @@ static void cf_check acpi_processor_idle(void) cstate_restore_tsc(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ - TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, - irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); + TRACE_TIME(TRC_PM_IDLE_EXIT, cx->idx, t2, + irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ update_idle_stats(power, cx, t1, t2); diff --git a/xen/arch/x86/compat.c b/xen/arch/x86/compat.c index a031062830..217b5b1fcc 100644 --- a/xen/arch/x86/compat.c +++ b/xen/arch/x86/compat.c @@ -37,8 +37,8 @@ long do_sched_op_compat(int cmd, unsigned long arg) return do_sched_op(cmd, guest_handle_from_ptr(NULL, void)); case SCHEDOP_shutdown: - TRACE_3D(TRC_SCHED_SHUTDOWN, - current->domain->domain_id, current->vcpu_id, arg); + TRACE_TIME(TRC_SCHED_SHUTDOWN, + current->domain->domain_id, current->vcpu_id, arg); domain_shutdown(current->domain, (u8)arg); break; diff --git a/xen/arch/x86/cpu/mwait-idle.c b/xen/arch/x86/cpu/mwait-idle.c index ff5c808bc9..ae69871171 100644 --- a/xen/arch/x86/cpu/mwait-idle.c +++ b/xen/arch/x86/cpu/mwait-idle.c @@ -962,7 +962,7 @@ static void cf_check mwait_idle(void) lapic_timer_off(); before = alternative_call(cpuidle_get_tick); - TRACE_4D(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred); + TRACE_TIME(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred); update_last_cx_stat(power, cx, before); @@ -986,8 +986,8 @@ static void cf_check mwait_idle(void) local_irq_enable(); - TRACE_6D(TRC_PM_IDLE_EXIT, cx->type, after, - irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); + TRACE_TIME(TRC_PM_IDLE_EXIT, cx->type, after, + irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); if (!(lapic_timer_reliable_states & (1 << cx->type))) lapic_timer_on(); diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c index 0191f80ba2..144aa168a3 100644 --- a/xen/arch/x86/emul-i8254.c +++ b/xen/arch/x86/emul-i8254.c @@ -94,7 +94,7 @@ static void cf_check pit_time_fired(struct vcpu *v, void *priv) { uint64_t *count_load_time = priv; - TRACE_0D(TRC_HVM_EMUL_PIT_TIMER_CB); + TRACE_TIME(TRC_HVM_EMUL_PIT_TIMER_CB); *count_load_time = get_guest_time(v); } @@ -128,19 +128,19 @@ static void pit_load_count(PITState *pit, int channel, int val) case 2: case 3: /* Periodic timer. */ - TRACE_2D(TRC_HVM_EMUL_PIT_START_TIMER, period, period); + TRACE_TIME(TRC_HVM_EMUL_PIT_START_TIMER, period, period); create_periodic_time(v, &pit->pt0, period, period, 0, pit_time_fired, &pit->count_load_time[channel], false); break; case 1: case 4: /* One-shot timer. */ - TRACE_2D(TRC_HVM_EMUL_PIT_START_TIMER, period, 0); + TRACE_TIME(TRC_HVM_EMUL_PIT_START_TIMER, period, 0); create_periodic_time(v, &pit->pt0, period, 0, 0, pit_time_fired, &pit->count_load_time[channel], false); break; default: - TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER); destroy_periodic_time(&pit->pt0); break; } @@ -404,7 +404,7 @@ void pit_stop_channel0_irq(PITState *pit) if ( !has_vpit(current->domain) ) return; - TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER); spin_lock(&pit->lock); destroy_periodic_time(&pit->pt0); spin_unlock(&pit->lock); @@ -615,7 +615,7 @@ void pit_reset(struct domain *d) if ( is_hvm_domain(d) ) { - TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER); destroy_periodic_time(&pit->pt0); pit->pt0.source = PTSRC_isa; } @@ -660,7 +660,7 @@ void pit_deinit(struct domain *d) if ( is_hvm_domain(d) ) { - TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER); destroy_periodic_time(&pit->pt0); } } diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index ab1bc51683..d8f36206a7 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -2181,7 +2181,7 @@ static int cf_check hvmemul_read_cr( case 3: case 4: *val = current->arch.hvm.guest_cr[reg]; - HVMTRACE_LONG_2D(CR_READ, reg, TRC_PAR_LONG(*val)); + TRACE(TRC_HVM_CR_READ64, reg, *val, *val >> 32); return X86EMUL_OKAY; default: break; @@ -2197,7 +2197,7 @@ static int cf_check hvmemul_write_cr( { int rc; - HVMTRACE_LONG_2D(CR_WRITE, reg, TRC_PAR_LONG(val)); + TRACE(TRC_HVM_CR_WRITE64, reg, val, val >> 32); switch ( reg ) { case 0: @@ -2242,7 +2242,7 @@ static int cf_check hvmemul_read_xcr( int rc = x86emul_read_xcr(reg, val, ctxt); if ( rc == X86EMUL_OKAY ) - HVMTRACE_LONG_2D(XCR_READ, reg, TRC_PAR_LONG(*val)); + TRACE(TRC_HVM_XCR_READ64, reg, *val, *val >> 32); return rc; } @@ -2252,7 +2252,7 @@ static int cf_check hvmemul_write_xcr( uint64_t val, struct x86_emulate_ctxt *ctxt) { - HVMTRACE_LONG_2D(XCR_WRITE, reg, TRC_PAR_LONG(val)); + TRACE(TRC_HVM_XCR_WRITE64, reg, val, val >> 32); return x86emul_write_xcr(reg, val, ctxt); } diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index 5f456221cb..4c5b171922 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -201,7 +201,7 @@ static void hpet_stop_timer(HPETState *h, unsigned int tn, { ASSERT(tn < HPET_TIMER_NUM); ASSERT(rw_is_write_locked(&h->lock)); - TRACE_1D(TRC_HVM_EMUL_HPET_STOP_TIMER, tn); + TRACE_TIME(TRC_HVM_EMUL_HPET_STOP_TIMER, tn); destroy_periodic_time(&h->pt[tn]); /* read the comparator to get it updated so a read while stopped will * return the expected value. */ @@ -305,8 +305,8 @@ static void hpet_set_timer(HPETState *h, unsigned int tn, if ( !oneshot ) period_ns = hpet_tick_to_ns(h, h->hpet.period[tn]); - TRACE_2_LONG_4D(TRC_HVM_EMUL_HPET_START_TIMER, tn, irq, - TRC_PAR_LONG(diff_ns), TRC_PAR_LONG(period_ns)); + TRACE_TIME(TRC_HVM_EMUL_HPET_START_TIMER, tn, irq, + diff_ns, diff_ns >> 32, period_ns, period_ns >> 32); create_periodic_time(vhpet_vcpu(h), &h->pt[tn], diff_ns, period_ns, irq, timer_level(h, tn) ? hpet_timer_fired : NULL, diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 9594e0a5c5..2c92e751cb 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1705,7 +1705,7 @@ void hvm_hlt(unsigned int eflags) do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void)); - HVMTRACE_1D(HLT, /* pending = */ vcpu_runnable(curr)); + TRACE(TRC_HVM_HLT, /* pending = */ vcpu_runnable(curr)); } void hvm_triple_fault(void) @@ -2159,7 +2159,7 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr) unsigned long val = *decode_gpr(guest_cpu_user_regs(), gpr); int rc; - HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val)); + TRACE(TRC_HVM_CR_WRITE64, cr, val, val >> 32); HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val); switch ( cr ) @@ -2224,7 +2224,7 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr) } *reg = val; - HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(val)); + TRACE(TRC_HVM_CR_READ64, cr, val, val >> 32); HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%u, value = %lx", cr, val); return X86EMUL_OKAY; @@ -3514,7 +3514,7 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len) } guest_cpuid(curr, leaf, subleaf, &res); - HVMTRACE_6D(CPUID, leaf, subleaf, res.a, res.b, res.c, res.d); + TRACE(TRC_HVM_CPUID, leaf, subleaf, res.a, res.b, res.c, res.d); regs->rax = res.a; regs->rbx = res.b; @@ -3528,7 +3528,7 @@ void hvm_rdtsc_intercept(struct cpu_user_regs *regs) { msr_split(regs, hvm_get_guest_tsc(current)); - HVMTRACE_2D(RDTSC, regs->eax, regs->edx); + TRACE(TRC_HVM_RDTSC, regs->eax, regs->edx); } int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) @@ -3635,8 +3635,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) } out: - HVMTRACE_3D(MSR_READ, msr, - (uint32_t)*msr_content, (uint32_t)(*msr_content >> 32)); + TRACE(TRC_HVM_MSR_READ, msr, *msr_content, *msr_content >> 32); return ret; gp_fault: @@ -3652,8 +3651,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, struct domain *d = v->domain; int ret; - HVMTRACE_3D(MSR_WRITE, msr, - (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); + TRACE(TRC_HVM_MSR_WRITE, msr, msr_content, msr_content >> 32); if ( may_defer && unlikely(monitored_msr(v->domain, msr)) ) { diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c index 4bb1c75055..cba8532a5a 100644 --- a/xen/arch/x86/hvm/rtc.c +++ b/xen/arch/x86/hvm/rtc.c @@ -91,7 +91,7 @@ static void cf_check rtc_pf_callback(struct vcpu *v, void *opaque) && ++(s->pt_dead_ticks) >= 10 ) { /* VM is ignoring its RTC; no point in running the timer */ - TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER); destroy_periodic_time(&s->pt); s->period = 0; } @@ -154,7 +154,7 @@ static void rtc_timer_update(RTCState *s) delta = period - ((now - s->start_time) % period); if ( s->hw.cmos_data[RTC_REG_B] & RTC_PIE ) { - TRACE_2D(TRC_HVM_EMUL_RTC_START_TIMER, delta, period); + TRACE_TIME(TRC_HVM_EMUL_RTC_START_TIMER, delta, period); create_periodic_time(v, &s->pt, delta, period, RTC_IRQ, rtc_pf_callback, s, false); } @@ -165,7 +165,7 @@ static void rtc_timer_update(RTCState *s) } /* fall through */ default: - TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER); destroy_periodic_time(&s->pt); s->period = 0; break; @@ -553,7 +553,7 @@ static int rtc_ioport_write(void *opaque, uint32_t addr, uint32_t data) rtc_update_irq(s); if ( (data ^ orig) & RTC_PIE ) { - TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER); destroy_periodic_time(&s->pt); s->period = 0; rtc_timer_update(s); @@ -807,7 +807,7 @@ void rtc_reset(struct domain *d) if ( !has_vrtc(d) ) return; - TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER); destroy_periodic_time(&s->pt); s->period = 0; s->pt.source = PTSRC_isa; @@ -892,7 +892,7 @@ void rtc_deinit(struct domain *d) spin_barrier(&s->lock); - TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER); destroy_periodic_time(&s->pt); kill_timer(&s->update_timer); kill_timer(&s->update_timer2); diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c index 4805c55672..dc684fdc84 100644 --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -90,8 +90,8 @@ static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack) } } - HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, - vmcb->event_inj.v ? vmcb->event_inj.vector : -1); + TRACE(TRC_HVM_INTR_WINDOW, intack.vector, intack.source, + vmcb->event_inj.v ? vmcb->event_inj.vector : -1); /* * Create a dummy virtual interrupt to intercept as soon as the @@ -206,7 +206,7 @@ void asmlinkage svm_intr_assist(void) } else { - HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); + TRACE(TRC_HVM_INJ_VIRQ, intack.vector, /*fake=*/ 0); svm_inject_extint(v, intack.vector); pt_intr_post(v, intack); } diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 4719fffae5..6103c171d2 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1019,10 +1019,8 @@ void asmlinkage svm_vmenter_helper(void) svm_asid_handle_vmrun(); - if ( unlikely(tb_init_done) ) - HVMTRACE_ND(VMENTRY, - nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0, - 1/*cycles*/); + TRACE_TIME(TRC_HVM_VMENTRY | + (nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0)); svm_sync_vmcb(curr, vmcb_needs_vmsave); @@ -1394,10 +1392,10 @@ static void cf_check svm_inject_event(const struct x86_event *event) if ( _event.vector == X86_EXC_PF && _event.type == X86_EVENTTYPE_HW_EXCEPTION ) - HVMTRACE_LONG_2D(PF_INJECT, _event.error_code, - TRC_PAR_LONG(_event.cr2)); + TRACE(TRC_HVM_PF_INJECT64, _event.error_code, + _event.cr2, _event.cr2 >> 32); else - HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code); + TRACE(TRC_HVM_INJ_EXC, _event.vector, _event.error_code); } static bool cf_check svm_event_pending(const struct vcpu *v) @@ -1704,7 +1702,7 @@ static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs) { struct vmcb_struct *vmcb = vcpu_nestedhvm(v).nv_n1vmcx; - HVMTRACE_0D(DR_WRITE); + TRACE(TRC_HVM_DR_WRITE); __restore_debug_registers(vmcb, v); } @@ -2345,7 +2343,7 @@ static void svm_invlpga_intercept( static void svm_invlpg_intercept(unsigned long linear) { - HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear)); + TRACE(TRC_HVM_INVLPG64, 0, linear, linear >> 32); paging_invlpg(current, linear); } @@ -2574,11 +2572,11 @@ void asmlinkage svm_vmexit_handler(void) exit_reason = vmcb->exitcode; if ( hvm_long_mode_active(v) ) - HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0, - 1/*cycles*/, exit_reason, TRC_PAR_LONG(regs->rip)); + TRACE_TIME(TRC_HVM_VMEXIT64 | (vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0), + exit_reason, regs->rip, regs->rip >> 32); else - HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0, - 1/*cycles*/, exit_reason, regs->eip); + TRACE_TIME(TRC_HVM_VMEXIT | (vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0), + exit_reason, regs->eip); if ( vcpu_guestmode ) { @@ -2673,17 +2671,17 @@ void asmlinkage svm_vmexit_handler(void) { case VMEXIT_INTR: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ - HVMTRACE_0D(INTR); + TRACE(TRC_HVM_INTR); break; case VMEXIT_NMI: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ - HVMTRACE_0D(NMI); + TRACE(TRC_HVM_NMI); break; case VMEXIT_SMI: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ - HVMTRACE_0D(SMI); + TRACE(TRC_HVM_SMI); break; case VMEXIT_ICEBP: @@ -2771,9 +2769,9 @@ void asmlinkage svm_vmexit_handler(void) if ( trace_will_trace_event(TRC_SHADOW) ) break; if ( hvm_long_mode_active(v) ) - HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va)); + TRACE(TRC_HVM_PF_XEN64, regs->error_code, va, va >> 32); else - HVMTRACE_2D(PF_XEN, regs->error_code, va); + TRACE(TRC_HVM_PF_XEN, regs->error_code, va); break; } @@ -2782,7 +2780,7 @@ void asmlinkage svm_vmexit_handler(void) } case VMEXIT_EXCEPTION_AC: - HVMTRACE_1D(TRAP, X86_EXC_AC); + TRACE(TRC_HVM_TRAP, X86_EXC_AC); hvm_inject_hw_exception(X86_EXC_AC, vmcb->ei.exc.ec); break; @@ -2792,7 +2790,7 @@ void asmlinkage svm_vmexit_handler(void) /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ case VMEXIT_EXCEPTION_MC: - HVMTRACE_0D(MCE); + TRACE(TRC_HVM_MCE); svm_vmexit_mce_intercept(v, regs); break; @@ -2938,7 +2936,7 @@ void asmlinkage svm_vmexit_handler(void) if ( (insn_len = svm_get_insn_len(v, INSTR_VMCALL)) == 0 ) break; BUG_ON(vcpu_guestmode); - HVMTRACE_1D(VMMCALL, regs->eax); + TRACE(TRC_HVM_VMMCALL, regs->eax); if ( hvm_hypercall(regs) == HVM_HCALL_completed ) __update_guest_eip(regs, insn_len); diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 05072a21bf..4f5e714aa4 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -682,7 +682,7 @@ int guest_rdmsr_x2apic(const struct vcpu *v, uint32_t msr, uint64_t *val) static void cf_check vlapic_pt_cb(struct vcpu *v, void *data) { - TRACE_0D(TRC_HVM_EMUL_LAPIC_TIMER_CB); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_TIMER_CB); *(s_time_t *)data = hvm_get_guest_time(v); } @@ -729,6 +729,8 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, if ( delta && (is_oneshot || is_periodic) ) { + uint64_t timer_period = 0; + if ( vlapic->hw.timer_divisor != old_divisor ) { period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) @@ -736,12 +738,14 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, delta = delta * vlapic->hw.timer_divisor / old_divisor; } - TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta), - TRC_PAR_LONG(is_periodic ? period : 0), - vlapic->pt.irq); + if ( is_periodic ) + timer_period = period; + + TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, delta, delta >> 32, + timer_period, timer_period >> 32, vlapic->pt.irq); create_periodic_time(current, &vlapic->pt, delta, - is_periodic ? period : 0, vlapic->pt.irq, + timer_period, vlapic->pt.irq, is_periodic ? vlapic_pt_cb : NULL, &vlapic->timer_last_update, false); @@ -758,7 +762,7 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, } else { - TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER); destroy_periodic_time(&vlapic->pt); /* * From now, TMCCT should return 0 until TMICT is set again. @@ -1208,8 +1212,8 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) vlapic->hw.tdt_msr = value; /* .... reprogram tdt timer */ - TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta), - TRC_PAR_LONG(0LL), vlapic->pt.irq); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, delta, delta >> 32, + 0, 0, vlapic->pt.irq); create_periodic_time(v, &vlapic->pt, delta, 0, vlapic->pt.irq, vlapic_tdt_pt_cb, &vlapic->timer_last_update, false); @@ -1222,8 +1226,8 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) /* trigger a timer event if needed */ if ( value > 0 ) { - TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(0LL), - TRC_PAR_LONG(0LL), vlapic->pt.irq); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, 0, 0, + 0, 0, vlapic->pt.irq); create_periodic_time(v, &vlapic->pt, 0, 0, vlapic->pt.irq, vlapic_tdt_pt_cb, &vlapic->timer_last_update, false); @@ -1232,7 +1236,7 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) else { /* .... stop tdt timer */ - TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER); destroy_periodic_time(&vlapic->pt); } @@ -1282,7 +1286,7 @@ int vlapic_accept_pic_intr(struct vcpu *v) if ( target ) accept = __vlapic_accept_pic_intr(v); - TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR, target, accept); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_PIC_INTR, target, accept); return target && accept; } @@ -1433,7 +1437,7 @@ static void vlapic_do_init(struct vlapic *vlapic) vlapic_set_reg(vlapic, APIC_SPIV, 0xff); vlapic->hw.disabled |= VLAPIC_SW_DISABLED; - TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER); destroy_periodic_time(&vlapic->pt); } @@ -1457,7 +1461,7 @@ void vlapic_reset(struct vlapic *vlapic) static void lapic_rearm(struct vlapic *s) { unsigned long tmict; - uint64_t period, tdt_msr; + uint64_t period, timer_period = 0, tdt_msr; bool is_periodic; s->pt.irq = vlapic_get_reg(s, APIC_LVTT) & APIC_VECTOR_MASK; @@ -1476,11 +1480,14 @@ static void lapic_rearm(struct vlapic *s) (uint32_t)tmict * s->hw.timer_divisor); is_periodic = vlapic_lvtt_period(s); - TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(period), - TRC_PAR_LONG(is_periodic ? period : 0LL), s->pt.irq); + if ( is_periodic ) + timer_period = period; + + TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, period, period >> 32, + timer_period, timer_period >> 32, s->pt.irq); create_periodic_time(vlapic_vcpu(s), &s->pt, period, - is_periodic ? period : 0, + timer_period, s->pt.irq, is_periodic ? vlapic_pt_cb : NULL, &s->timer_last_update, false); @@ -1669,7 +1676,7 @@ void vlapic_destroy(struct vcpu *v) return; tasklet_kill(&vlapic->init_sipi.tasklet); - TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); + TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER); destroy_periodic_time(&vlapic->pt); unmap_domain_page_global(vlapic->regs); free_domheap_page(vlapic->regs_page); diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index 8beeaab151..d36498d831 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -67,8 +67,8 @@ static void vmx_enable_intr_window(struct vcpu *v, struct hvm_intack intack) unsigned long intr; __vmread(VM_ENTRY_INTR_INFO, &intr); - HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, - (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1); + TRACE(TRC_HVM_INTR_WINDOW, intack.vector, intack.source, + (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1); } if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi ) @@ -390,7 +390,7 @@ void asmlinkage vmx_intr_assist(void) } else { - HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); + TRACE(TRC_HVM_INJ_VIRQ, intack.vector, /*fake=*/ 0); vmx_inject_extint(intack.vector, intack.source); pt_intr_post(v, intack); } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 5f67a48592..165a85a25b 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2112,10 +2112,10 @@ static void cf_check vmx_inject_event(const struct x86_event *event) if ( (_event.vector == X86_EXC_PF) && (_event.type == X86_EVENTTYPE_HW_EXCEPTION) ) - HVMTRACE_LONG_2D(PF_INJECT, _event.error_code, - TRC_PAR_LONG(curr->arch.hvm.guest_cr[2])); + TRACE(TRC_HVM_PF_INJECT64, _event.error_code, + curr->arch.hvm.guest_cr[2], curr->arch.hvm.guest_cr[2] >> 32); else - HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code); + TRACE(TRC_HVM_INJ_EXC, _event.vector, _event.error_code); } static bool cf_check vmx_event_pending(const struct vcpu *v) @@ -3125,7 +3125,7 @@ static void vmx_dr_access(unsigned long exit_qualification, { struct vcpu *v = current; - HVMTRACE_0D(DR_WRITE); + TRACE(TRC_HVM_DR_WRITE); if ( !v->arch.hvm.flag_dr_dirty ) __restore_debug_registers(v); @@ -3137,7 +3137,7 @@ static void vmx_dr_access(unsigned long exit_qualification, static void vmx_invlpg_intercept(unsigned long linear) { - HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear)); + TRACE(TRC_HVM_INVLPG64, /*invlpga=*/ 0, linear, linear >> 32); paging_invlpg(current, linear); } @@ -3185,7 +3185,7 @@ static int vmx_cr_access(cr_access_qual_t qual) hvm_monitor_crX(CR0, value, old); curr->arch.hvm.guest_cr[0] = value; vmx_update_guest_cr(curr, 0, 0); - HVMTRACE_0D(CLTS); + TRACE(TRC_HVM_CLTS); break; } @@ -3198,7 +3198,7 @@ static int vmx_cr_access(cr_access_qual_t qual) value = (value & ~(X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)) | (qual.lmsw_data & (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)); - HVMTRACE_LONG_1D(LMSW, value); + TRACE(TRC_HVM_LMSW64, value, value >> 32); if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(X86_EXC_GP, 0); @@ -3751,7 +3751,7 @@ static void vmx_do_extint(struct cpu_user_regs *regs) BUG_ON(!(vector & INTR_INFO_VALID_MASK)); vector &= INTR_INFO_VECTOR_MASK; - HVMTRACE_1D(INTR, vector); + TRACE(TRC_HVM_INTR, vector); regs->entry_vector = vector; do_IRQ(regs); @@ -3891,7 +3891,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason, case EXIT_REASON_MCE_DURING_VMENTRY: printk("MCE\n"); - HVMTRACE_0D(MCE); + TRACE(TRC_HVM_MCE); /* Already handled. */ break; @@ -3932,7 +3932,7 @@ static int vmx_handle_eoi_write(void) { update_guest_eip(); /* Safe: APIC data write */ vlapic_EOI_set(vcpu_vlapic(current)); - HVMTRACE_0D(VLAPIC); + TRACE(TRC_HVM_VLAPIC); return 1; } @@ -4106,10 +4106,9 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) __vmread(VM_EXIT_REASON, &exit_reason); if ( hvm_long_mode_active(v) ) - HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, exit_reason, - TRC_PAR_LONG(regs->rip)); + TRACE_TIME(TRC_HVM_VMEXIT64, exit_reason, regs->rip, regs->rip >> 32); else - HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, exit_reason, regs->eip); + TRACE_TIME(TRC_HVM_VMEXIT, exit_reason, regs->eip); perfc_incra(vmexits, (uint16_t)exit_reason); @@ -4226,7 +4225,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) default: perfc_incr(realmode_exits); v->arch.hvm.vmx.vmx_emulate = 1; - HVMTRACE_0D(REALMODE_EMULATE); + TRACE(TRC_HVM_REALMODE_EMULATE); return; } case EXIT_REASON_EXTERNAL_INTERRUPT: @@ -4283,7 +4282,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) * Table 23-1, "Exit Qualification for Debug Exceptions"). */ __vmread(EXIT_QUALIFICATION, &exit_qualification); - HVMTRACE_1D(TRAP_DEBUG, exit_qualification); + TRACE(TRC_HVM_TRAP_DEBUG, exit_qualification); __restore_debug_registers(v); write_debugreg(6, exit_qualification | DR_STATUS_RESERVED_ONE); @@ -4345,7 +4344,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) domain_pause_for_debugger(); break; case X86_EXC_BP: - HVMTRACE_1D(TRAP, vector); + TRACE(TRC_HVM_TRAP, vector); if ( !v->domain->debugger_attached ) { unsigned long insn_len; @@ -4370,7 +4369,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) } break; case X86_EXC_NM: - HVMTRACE_1D(TRAP, vector); + TRACE(TRC_HVM_TRAP, vector); vmx_fpu_dirty_intercept(); break; case X86_EXC_PF: @@ -4388,37 +4387,36 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) if ( trace_will_trace_event(TRC_SHADOW) ) break; if ( hvm_long_mode_active(v) ) - HVMTRACE_LONG_2D(PF_XEN, regs->error_code, - TRC_PAR_LONG(exit_qualification) ); + TRACE(TRC_HVM_PF_XEN64, regs->error_code, + exit_qualification, exit_qualification >> 32); else - HVMTRACE_2D(PF_XEN, - regs->error_code, exit_qualification ); + TRACE(TRC_HVM_PF_XEN, regs->error_code, exit_qualification); break; } hvm_inject_page_fault(regs->error_code, exit_qualification); break; case X86_EXC_AC: - HVMTRACE_1D(TRAP, vector); + TRACE(TRC_HVM_TRAP, vector); vmx_propagate_intr(intr_info); break; case X86_EXC_NMI: if ( MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) != X86_EVENTTYPE_NMI ) goto exit_and_crash; - HVMTRACE_0D(NMI); + TRACE(TRC_HVM_NMI); /* Already handled above. */ break; case X86_EXC_MC: - HVMTRACE_0D(MCE); + TRACE(TRC_HVM_MCE); /* Already handled above. */ break; case X86_EXC_UD: - HVMTRACE_1D(TRAP, vector); + TRACE(TRC_HVM_TRAP, vector); hvm_ud_intercept(regs); break; default: - HVMTRACE_1D(TRAP, vector); + TRACE(TRC_HVM_TRAP, vector); goto exit_and_crash; } break; @@ -4504,7 +4502,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs) break; case EXIT_REASON_VMCALL: - HVMTRACE_1D(VMMCALL, regs->eax); + TRACE(TRC_HVM_VMMCALL, regs->eax); if ( hvm_hypercall(regs) == HVM_HCALL_completed ) update_guest_eip(); /* Safe: VMCALL */ @@ -4912,7 +4910,7 @@ bool asmlinkage vmx_vmenter_helper(const struct cpu_user_regs *regs) if ( unlikely(curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_MASK) ) lbr_fixup(); - HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/); + TRACE_TIME(TRC_HVM_VMENTRY); __vmwrite(GUEST_RIP, regs->rip); __vmwrite(GUEST_RSP, regs->rsp); diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c index 4e23247a46..7c3b5c7254 100644 --- a/xen/arch/x86/hvm/vpic.c +++ b/xen/arch/x86/hvm/vpic.c @@ -99,8 +99,7 @@ static void vpic_update_int_output(struct hvm_hw_vpic *vpic) ASSERT(vpic_is_locked(vpic)); irq = vpic_get_highest_priority_irq(vpic); - TRACE_3D(TRC_HVM_EMUL_PIC_INT_OUTPUT, vpic->int_output, vpic->is_master, - irq); + TRACE_TIME(TRC_HVM_EMUL_PIC_INT_OUTPUT, vpic->int_output, vpic->is_master, irq); if ( vpic->int_output == (!vpic->init_state && irq >= 0) ) return; @@ -119,7 +118,7 @@ static void vpic_update_int_output(struct hvm_hw_vpic *vpic) if ( v != NULL ) { - TRACE_1D(TRC_HVM_EMUL_PIC_KICK, irq); + TRACE_TIME(TRC_HVM_EMUL_PIC_KICK, irq); vcpu_kick(v); } } @@ -144,7 +143,7 @@ static void __vpic_intack(struct hvm_hw_vpic *vpic, int irq) ASSERT(vpic_is_locked(vpic)); - TRACE_2D(TRC_HVM_EMUL_PIC_INTACK, vpic->is_master, irq); + TRACE_TIME(TRC_HVM_EMUL_PIC_INTACK, vpic->is_master, irq); /* Edge-triggered: clear the IRR (forget the edge). */ if ( !(vpic->elcr & mask) ) vpic->irr &= ~mask; @@ -526,7 +525,7 @@ void vpic_irq_positive_edge(struct domain *d, int irq) ASSERT(irq <= 15); ASSERT(vpic_is_locked(vpic)); - TRACE_1D(TRC_HVM_EMUL_PIC_POSEDGE, irq); + TRACE_TIME(TRC_HVM_EMUL_PIC_POSEDGE, irq); if ( irq == 2 ) return; @@ -544,7 +543,7 @@ void vpic_irq_negative_edge(struct domain *d, int irq) ASSERT(irq <= 15); ASSERT(vpic_is_locked(vpic)); - TRACE_1D(TRC_HVM_EMUL_PIC_NEGEDGE, irq); + TRACE_TIME(TRC_HVM_EMUL_PIC_NEGEDGE, irq); if ( irq == 2 ) return; @@ -562,7 +561,7 @@ int vpic_ack_pending_irq(struct vcpu *v) accept = vlapic_accept_pic_intr(v); - TRACE_2D(TRC_HVM_EMUL_PIC_PEND_IRQ_CALL, accept, vpic->int_output); + TRACE_TIME(TRC_HVM_EMUL_PIC_PEND_IRQ_CALL, accept, vpic->int_output); if ( !accept || !vpic->int_output ) return -1; diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 80ba8d9fe9..da54ca0f17 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -229,7 +229,7 @@ static void _clear_irq_vector(struct irq_desc *desc) for_each_cpu(cpu, tmp_mask) { ASSERT(per_cpu(vector_irq, cpu)[old_vector] == irq); - TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu); + TRACE_TIME(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu); per_cpu(vector_irq, cpu)[old_vector] = ~irq; } @@ -796,13 +796,11 @@ void cf_check irq_move_cleanup_interrupt(void) goto unlock; } send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); - TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY, - irq, vector, smp_processor_id()); + TRACE_TIME(TRC_HW_IRQ_MOVE_CLEANUP_DELAY, irq, vector, me); goto unlock; } - TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP, - irq, vector, smp_processor_id()); + TRACE_TIME(TRC_HW_IRQ_MOVE_CLEANUP, irq, vector, me); per_cpu(vector_irq, me)[vector] = ~irq; desc->arch.move_cleanup_count--; @@ -1943,7 +1941,7 @@ void do_IRQ(struct cpu_user_regs *regs) spin_unlock(&desc->lock); } } - TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector); + TRACE_TIME(TRC_HW_IRQ_UNMAPPED_VECTOR, vector); } goto out_no_unlock; } @@ -1984,7 +1982,7 @@ void do_IRQ(struct cpu_user_regs *regs) tsc_in = tb_init_done ? get_cycles() : 0; do_IRQ_guest(desc, vector); - TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles()); + TRACE_TIME(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles()); goto out_no_end; } @@ -2008,7 +2006,7 @@ void do_IRQ(struct cpu_user_regs *regs) tsc_in = tb_init_done ? get_cycles() : 0; action->handler(irq, action->dev_id); - TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles()); + TRACE_TIME(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles()); spin_lock_irq(&desc->lock); } diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index d554c9d41e..468a036081 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -1899,7 +1899,7 @@ void asmlinkage do_device_not_available(struct cpu_user_regs *regs) curr->arch.pv.ctrlreg[0] &= ~X86_CR0_TS; } else - TRACE_0D(TRC_PV_MATH_STATE_RESTORE); + TRACE_TIME(TRC_PV_MATH_STATE_RESTORE); #else ASSERT_UNREACHABLE(); #endif diff --git a/xen/common/domain.c b/xen/common/domain.c index 6773f7fb90..67cadb7c3f 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -626,7 +626,7 @@ struct domain *domain_create(domid_t domid, hardware_domain = d; } - TRACE_1D(TRC_DOM0_DOM_ADD, d->domain_id); + TRACE_TIME(TRC_DOM0_DOM_ADD, d->domain_id); lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid); @@ -1223,7 +1223,7 @@ void domain_destroy(struct domain *d) if ( atomic_cmpxchg(&d->refcnt, 0, DOMAIN_DESTROYED) != 0 ) return; - TRACE_1D(TRC_DOM0_DOM_REM, d->domain_id); + TRACE_TIME(TRC_DOM0_DOM_REM, d->domain_id); /* Delete from task list and task hashtable. */ spin_lock(&domlist_update_lock); diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index e98e9da520..ebe2d2a3ce 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -1307,7 +1307,7 @@ map_grant_ref( goto undo_out; } - TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom); + TRACE_TIME(TRC_MEM_PAGE_GRANT_MAP, op->dom); /* * All maptrack entry users check mt->flags first before using the @@ -1434,7 +1434,7 @@ unmap_common( return; } - TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom); + TRACE_TIME(TRC_MEM_PAGE_GRANT_UNMAP, dom); rgt = rd->grant_table; @@ -2445,7 +2445,7 @@ gnttab_transfer( put_gfn(d, gop.mfn); #endif - TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id); + TRACE_TIME(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id); /* Tell the guest about its new page frame. */ grant_read_lock(e->grant_table); diff --git a/xen/drivers/cpufreq/utility.c b/xen/drivers/cpufreq/utility.c index 6d2413b32a..e690a484f1 100644 --- a/xen/drivers/cpufreq/utility.c +++ b/xen/drivers/cpufreq/utility.c @@ -365,7 +365,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, retval = alternative_call(cpufreq_driver.target, policy, target_freq, relation); if ( retval == 0 ) - TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000); + TRACE_TIME(TRC_PM_FREQ_CHANGE, prev_freq / 1000, policy->cur / 1000); } return retval; -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |