[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/HVM: add wrapper for hvm_funcs.set_tsc_offset()
commit e25243d53c3986d2e855873fa87d1ae24e537cb2 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu Jul 19 09:40:19 2018 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Jul 19 09:40:19 2018 +0200 x86/HVM: add wrapper for hvm_funcs.set_tsc_offset() It's used in quite a few places, and hence doing so eases subsequent adjustment to how these (indirect) calls are carried out. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/domain.c | 6 +++--- xen/arch/x86/hvm/hvm.c | 8 ++++---- xen/arch/x86/hvm/vmx/vvmx.c | 4 ++-- xen/arch/x86/time.c | 6 +++--- xen/include/asm-x86/hvm/hvm.h | 6 ++++++ 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c index ce15ce0470..ae70aaf8f9 100644 --- a/xen/arch/x86/hvm/domain.c +++ b/xen/arch/x86/hvm/domain.c @@ -317,9 +317,9 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) /* Sync AP's TSC with BSP's. */ v->arch.hvm_vcpu.cache_tsc_offset = - v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; - hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, - v->domain->arch.hvm_domain.sync_tsc); + d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; + hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, + d->arch.hvm_domain.sync_tsc); paging_update_paging_modes(v); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index f9408e1ee4..4ed24a401d 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -417,7 +417,7 @@ static void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc) delta_tsc = guest_tsc - tsc; v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc; - hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc); + hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc); } #define hvm_set_guest_tsc(v, t) hvm_set_guest_tsc_fixed(v, t, 0) @@ -435,7 +435,7 @@ static void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust) { v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust - v->arch.hvm_vcpu.msr_tsc_adjust; - hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); + hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust; } @@ -3941,8 +3941,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip) /* Sync AP's TSC with BSP's. */ v->arch.hvm_vcpu.cache_tsc_offset = v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; - hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, - d->arch.hvm_domain.sync_tsc); + hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, + d->arch.hvm_domain.sync_tsc); v->arch.hvm_vcpu.msr_tsc_adjust = 0; diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index e97db330cf..918d47df93 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -1082,7 +1082,7 @@ static void load_shadow_guest_state(struct vcpu *v) hvm_inject_hw_exception(TRAP_gp_fault, 0); } - hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); + hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields); @@ -1288,7 +1288,7 @@ static void load_vvmcs_host_state(struct vcpu *v) hvm_inject_hw_exception(TRAP_gp_fault, 0); } - hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); + hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); set_vvmcs(v, VM_ENTRY_INTR_INFO, 0); } diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index c342d00732..536449b264 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -2198,9 +2198,9 @@ void tsc_set_info(struct domain *d, * will sync their TSC to BSP's sync_tsc. */ d->arch.hvm_domain.sync_tsc = rdtsc(); - hvm_funcs.set_tsc_offset(d->vcpu[0], - d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset, - d->arch.hvm_domain.sync_tsc); + hvm_set_tsc_offset(d->vcpu[0], + d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset, + d->arch.hvm_domain.sync_tsc); } } diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 667efa10bc..1ee273b075 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -347,6 +347,12 @@ static inline void hvm_cpuid_policy_changed(struct vcpu *v) hvm_funcs.cpuid_policy_changed(v); } +static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, + uint64_t at_tsc) +{ + hvm_funcs.set_tsc_offset(v, offset, at_tsc); +} + /* * Called to ensure than all guest-specific mappings in a tagged TLB are * flushed; does *not* flush Xen's TLB entries, and on processors without a -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |