[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/5] x86/time: Rework pv_soft_rdtsc() to aid further cleanup
Having pv_soft_rdtsc() emulate all parts of an rdtscp is awkward, and gets in the way of some intended cleanup. * Drop the rdtscp parameter and always make the caller responsible for ecx updates when appropriate. * Switch the function from being void, and return the main timestamp in the return value. The regs parameter is still needed, but only for the stats collection, once again bringing into question their utility. The parameter can however switch to being const. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- xen/arch/x86/pv/emul-inv-op.c | 7 ++++++- xen/arch/x86/pv/emul-priv-op.c | 12 ++++++++---- xen/arch/x86/time.c | 8 ++------ xen/include/asm-x86/time.h | 2 +- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/pv/emul-inv-op.c b/xen/arch/x86/pv/emul-inv-op.c index f894417..b1916b4 100644 --- a/xen/arch/x86/pv/emul-inv-op.c +++ b/xen/arch/x86/pv/emul-inv-op.c @@ -46,6 +46,7 @@ static int emulate_invalid_rdtscp(struct cpu_user_regs *regs) char opcode[3]; unsigned long eip, rc; struct vcpu *v = current; + struct domain *currd = v->domain; eip = regs->rip; if ( (rc = copy_from_user(opcode, (char *)eip, sizeof(opcode))) != 0 ) @@ -56,7 +57,11 @@ static int emulate_invalid_rdtscp(struct cpu_user_regs *regs) if ( memcmp(opcode, "\xf\x1\xf9", sizeof(opcode)) ) return 0; eip += sizeof(opcode); - pv_soft_rdtsc(v, regs, 1); + + msr_split(regs, pv_soft_rdtsc(v, regs)); + regs->rcx = ((currd->arch.tsc_mode == TSC_MODE_PVRDTSCP) + ? currd->arch.incarnation : 0); + pv_emul_instruction_done(regs, eip); return EXCRET_fault_fixed; } diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index 17aaf97..d4d64f2 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -1374,10 +1374,14 @@ int pv_emulate_privileged_op(struct cpu_user_regs *regs) case X86EMUL_OKAY: if ( ctxt.tsc & TSC_BASE ) { - if ( ctxt.tsc & TSC_AUX ) - pv_soft_rdtsc(curr, regs, 1); - else if ( currd->arch.vtsc ) - pv_soft_rdtsc(curr, regs, 0); + if ( currd->arch.vtsc || (ctxt.tsc & TSC_AUX) ) + { + msr_split(regs, pv_soft_rdtsc(curr, regs)); + + if ( ctxt.tsc & TSC_AUX ) + regs->rcx = ((currd->arch.tsc_mode == TSC_MODE_PVRDTSCP) + ? currd->arch.incarnation : 0); + } else msr_split(regs, rdtsc()); } diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index c90524d..c4ca515 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -2024,7 +2024,7 @@ u64 gtsc_to_gtime(struct domain *d, u64 tsc) return time; } -void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp) +uint64_t pv_soft_rdtsc(struct vcpu *v, const struct cpu_user_regs *regs) { s_time_t now = get_s_time(); struct domain *d = v->domain; @@ -2045,11 +2045,7 @@ void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp) spin_unlock(&d->arch.vtsc_lock); - msr_split(regs, gtime_to_gtsc(d, now)); - - if ( rdtscp ) - regs->rcx = - (d->arch.tsc_mode == TSC_MODE_PVRDTSCP) ? d->arch.incarnation : 0; + return gtime_to_gtsc(d, now); } bool clocksource_is_tsc(void) diff --git a/xen/include/asm-x86/time.h b/xen/include/asm-x86/time.h index 046302e..3bac74c 100644 --- a/xen/include/asm-x86/time.h +++ b/xen/include/asm-x86/time.h @@ -56,7 +56,7 @@ uint64_t ns_to_acpi_pm_tick(uint64_t ns); uint64_t tsc_ticks2ns(uint64_t ticks); -void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp); +uint64_t pv_soft_rdtsc(struct vcpu *v, const struct cpu_user_regs *regs); u64 gtime_to_gtsc(struct domain *d, u64 time); u64 gtsc_to_gtime(struct domain *d, u64 tsc); -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |