[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/HVM: patch indirect calls through hvm_funcs to direct ones
commit 26c871bc2c4c186a92f5a83200ed46b39766c1f6 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Fri May 17 14:37:25 2019 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri May 17 14:37:25 2019 +0200 x86/HVM: patch indirect calls through hvm_funcs to direct ones This is intentionally not touching hooks used rarely (or not at all) during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up, as well as nested, VM event, and altp2m ones (they can all be done later, if so desired). Virtual Interrupt delivery ones will be dealt with in a subsequent patch. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 12 +++++++----- xen/arch/x86/hvm/hvm.c | 27 +++++++++++++++------------ xen/include/asm-x86/hvm/hvm.h | 29 +++++++++++++++-------------- 3 files changed, 37 insertions(+), 31 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index bfa3e1ad93..78cef47359 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -2152,7 +2152,7 @@ static int hvmemul_write_msr( static int hvmemul_wbinvd( struct x86_emulate_ctxt *ctxt) { - hvm_funcs.wbinvd_intercept(); + alternative_vcall(hvm_funcs.wbinvd_intercept); return X86EMUL_OKAY; } @@ -2170,7 +2170,7 @@ static int hvmemul_get_fpu( struct vcpu *curr = current; if ( !curr->fpu_dirtied ) - hvm_funcs.fpu_dirty_intercept(); + alternative_vcall(hvm_funcs.fpu_dirty_intercept); else if ( type == X86EMUL_FPU_fpu ) { const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt = @@ -2287,7 +2287,7 @@ static void hvmemul_put_fpu( { curr->fpu_dirtied = false; stts(); - hvm_funcs.fpu_leave(curr); + alternative_vcall(hvm_funcs.fpu_leave, curr); } } } @@ -2449,7 +2449,8 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, if ( hvmemul_ctxt->intr_shadow != new_intr_shadow ) { hvmemul_ctxt->intr_shadow = new_intr_shadow; - hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow); + alternative_vcall(hvm_funcs.set_interrupt_shadow, + curr, new_intr_shadow); } if ( hvmemul_ctxt->ctxt.retire.hlt && @@ -2586,7 +2587,8 @@ void hvm_emulate_init_once( memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt)); - hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr); + hvmemul_ctxt->intr_shadow = + alternative_call(hvm_funcs.get_interrupt_shadow, curr); hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index ed1ff9c87f..8993c2aa57 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -273,12 +273,12 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable) struct vcpu *v; for_each_vcpu ( d, v ) - hvm_funcs.set_rdtsc_exiting(v, enable); + alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable); } void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat) { - if ( !hvm_funcs.get_guest_pat(v, guest_pat) ) + if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) ) *guest_pat = v->arch.hvm.pat_cr; } @@ -303,7 +303,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat) return 0; } - if ( !hvm_funcs.set_guest_pat(v, guest_pat) ) + if ( !alternative_call(hvm_funcs.set_guest_pat, v, guest_pat) ) v->arch.hvm.pat_cr = guest_pat; return 1; @@ -343,7 +343,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val) /* nothing, best effort only */; } - return hvm_funcs.set_guest_bndcfgs(v, val); + return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val); } /* @@ -507,7 +507,8 @@ void hvm_migrate_pirqs(struct vcpu *v) static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info) { info->cr2 = v->arch.hvm.guest_cr[2]; - return hvm_funcs.get_pending_event(v, info); + + return alternative_call(hvm_funcs.get_pending_event, v, info); } void hvm_do_resume(struct vcpu *v) @@ -1674,7 +1675,7 @@ void hvm_inject_event(const struct x86_event *event) } } - hvm_funcs.inject_event(event); + alternative_vcall(hvm_funcs.inject_event, event); } int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, @@ -2262,7 +2263,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer) (!rangeset_is_empty(d->iomem_caps) || !rangeset_is_empty(d->arch.ioport_caps) || has_arch_pdevs(d)) ) - hvm_funcs.handle_cd(v, value); + alternative_vcall(hvm_funcs.handle_cd, v, value); hvm_update_cr(v, 0, value); @@ -3479,7 +3480,8 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) goto gp_fault; /* If ret == 0 then this is not an MCE MSR, see other MSRs. */ ret = ((ret == 0) - ? hvm_funcs.msr_read_intercept(msr, msr_content) + ? alternative_call(hvm_funcs.msr_read_intercept, + msr, msr_content) : X86EMUL_OKAY); break; } @@ -3612,7 +3614,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, goto gp_fault; /* If ret == 0 then this is not an MCE MSR, see other MSRs. */ ret = ((ret == 0) - ? hvm_funcs.msr_write_intercept(msr, msr_content) + ? alternative_call(hvm_funcs.msr_write_intercept, + msr, msr_content) : X86EMUL_OKAY); break; } @@ -3804,7 +3807,7 @@ void hvm_hypercall_page_initialise(struct domain *d, void *hypercall_page) { hvm_latch_shinfo_size(d); - hvm_funcs.init_hypercall_page(d, hypercall_page); + alternative_vcall(hvm_funcs.init_hypercall_page, d, hypercall_page); } void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip) @@ -5053,7 +5056,7 @@ void hvm_domain_soft_reset(struct domain *d) void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg, struct segment_register *reg) { - hvm_funcs.get_segment_register(v, seg, reg); + alternative_vcall(hvm_funcs.get_segment_register, v, seg, reg); switch ( seg ) { @@ -5199,7 +5202,7 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, return; } - hvm_funcs.set_segment_register(v, seg, reg); + alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg); } /* diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 157f0debc6..1921422c61 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -384,42 +384,42 @@ static inline int hvm_guest_x86_mode(struct vcpu *v) { ASSERT(v == current); - return hvm_funcs.guest_x86_mode(v); + return alternative_call(hvm_funcs.guest_x86_mode, v); } static inline void hvm_update_host_cr3(struct vcpu *v) { if ( hvm_funcs.update_host_cr3 ) - hvm_funcs.update_host_cr3(v); + alternative_vcall(hvm_funcs.update_host_cr3, v); } static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr) { - hvm_funcs.update_guest_cr(v, cr, 0); + alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0); } static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush) { unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0; - hvm_funcs.update_guest_cr(v, 3, flags); + alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags); } static inline void hvm_update_guest_efer(struct vcpu *v) { - hvm_funcs.update_guest_efer(v); + alternative_vcall(hvm_funcs.update_guest_efer, v); } static inline void hvm_cpuid_policy_changed(struct vcpu *v) { - hvm_funcs.cpuid_policy_changed(v); + alternative_vcall(hvm_funcs.cpuid_policy_changed, v); } static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc) { - hvm_funcs.set_tsc_offset(v, offset, at_tsc); + alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc); } /* @@ -436,18 +436,18 @@ static inline void hvm_flush_guest_tlbs(void) static inline unsigned int hvm_get_cpl(struct vcpu *v) { - return hvm_funcs.get_cpl(v); + return alternative_call(hvm_funcs.get_cpl, v); } static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v) { - return hvm_funcs.get_shadow_gs_base(v); + return alternative_call(hvm_funcs.get_shadow_gs_base, v); } static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val) { return hvm_funcs.get_guest_bndcfgs && - hvm_funcs.get_guest_bndcfgs(v, val); + alternative_call(hvm_funcs.get_guest_bndcfgs, v, val); } #define has_hvm_params(d) \ @@ -510,12 +510,12 @@ static inline void hvm_inject_page_fault(int errcode, unsigned long cr2) static inline bool hvm_event_pending(const struct vcpu *v) { - return hvm_funcs.event_pending(v); + return alternative_call(hvm_funcs.event_pending, v); } static inline void hvm_invlpg(struct vcpu *v, unsigned long linear) { - hvm_funcs.invlpg(v, linear); + alternative_vcall(hvm_funcs.invlpg, v, linear); } /* These bits in CR4 are owned by the host. */ @@ -540,13 +540,14 @@ static inline void hvm_cpu_down(void) static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf) { - return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) : 0); + return (hvm_funcs.get_insn_bytes + ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0); } static inline void hvm_set_info_guest(struct vcpu *v) { if ( hvm_funcs.set_info_guest ) - return hvm_funcs.set_info_guest(v); + alternative_vcall(hvm_funcs.set_info_guest, v); } static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs) -- generated by git-patchbot for /home/xen/git/xen.git#staging _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |