x86/nHVM: generic hook adjustments Some of the generic hooks were unused altogether - drop them. Some of the hooks were used only to handle calls from the specific vendor's code (SVM) - drop them too. Several more hooks were pointlessly implementaed as out-of-line functions, when most (all?) other HVM hooks use inline ones - make them inlines. None of them are implemented by only one of SVM or VMX, so also drop the conditionals. Funnily nhvm_vmcx_hap_enabled(), having return type bool_t, nevertheless returned -EOPNOTSUPP. nhvm_vmcx_guest_intercepts_trap() and its hook and implementations are being made return bool_t, as they should have been from the beginning (its sole caller only checks for a non-zero result). Finally, make static whatever can as a result be static. Signed-off-by: Jan Beulich --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -6431,66 +6431,6 @@ int hvm_debug_op(struct vcpu *v, int32_t return rc; } -int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) -{ - if (hvm_funcs.nhvm_vcpu_hostrestore) - return hvm_funcs.nhvm_vcpu_hostrestore(v, regs); - return -EOPNOTSUPP; -} - -int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode) -{ - if (hvm_funcs.nhvm_vcpu_vmexit) - return hvm_funcs.nhvm_vcpu_vmexit(v, regs, exitcode); - return -EOPNOTSUPP; -} - -int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap) -{ - return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap); -} - -uint64_t nhvm_vcpu_guestcr3(struct vcpu *v) -{ - if (hvm_funcs.nhvm_vcpu_guestcr3) - return hvm_funcs.nhvm_vcpu_guestcr3(v); - return -EOPNOTSUPP; -} - -uint64_t nhvm_vcpu_p2m_base(struct vcpu *v) -{ - if ( hvm_funcs.nhvm_vcpu_p2m_base ) - return hvm_funcs.nhvm_vcpu_p2m_base(v); - return -EOPNOTSUPP; -} - -uint32_t nhvm_vcpu_asid(struct vcpu *v) -{ - if (hvm_funcs.nhvm_vcpu_asid) - return hvm_funcs.nhvm_vcpu_asid(v); - return -EOPNOTSUPP; -} - -int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, unsigned int trap, int errcode) -{ - if (hvm_funcs.nhvm_vmcx_guest_intercepts_trap) - return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap, errcode); - return -EOPNOTSUPP; -} - -bool_t nhvm_vmcx_hap_enabled(struct vcpu *v) -{ - if (hvm_funcs.nhvm_vmcx_hap_enabled) - return hvm_funcs.nhvm_vmcx_hap_enabled(v); - return -EOPNOTSUPP; -} - -enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v) -{ - return hvm_funcs.nhvm_intr_blocked(v); -} - /* * Local variables: * mode: C --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -246,7 +246,7 @@ static int nsvm_vcpu_hostsave(struct vcp return 0; } -int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) +static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct nestedsvm *svm = &vcpu_nestedsvm(v); @@ -761,7 +761,7 @@ nsvm_vcpu_vmrun(struct vcpu *v, struct c return 0; } -int +static int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, uint64_t exitcode) { @@ -821,21 +821,11 @@ nsvm_vcpu_vmexit_trap(struct vcpu *v, st return NESTEDHVM_VMEXIT_DONE; } -uint64_t nsvm_vcpu_guestcr3(struct vcpu *v) -{ - return vcpu_nestedsvm(v).ns_vmcb_guestcr3; -} - uint64_t nsvm_vcpu_hostcr3(struct vcpu *v) { return vcpu_nestedsvm(v).ns_vmcb_hostcr3; } -uint32_t nsvm_vcpu_asid(struct vcpu *v) -{ - return vcpu_nestedsvm(v).ns_guest_asid; -} - static int nsvm_vmcb_guest_intercepts_msr(unsigned long *msr_bitmap, uint32_t msr, bool_t write) @@ -911,7 +901,7 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t return NESTEDHVM_VMEXIT_INJECT; } -int +static bool_t nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, struct cpu_user_regs *regs, uint64_t exitcode) { @@ -994,7 +984,7 @@ nsvm_vmcb_guest_intercepts_exitcode(stru return 1; } -int +bool_t nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr, int errcode) { return nsvm_vmcb_guest_intercepts_exitcode(v, @@ -1409,7 +1399,7 @@ nestedsvm_vmexit_n2n1(struct vcpu *v, st if (rc) ret = NESTEDHVM_VMEXIT_ERROR; - rc = nhvm_vcpu_hostrestore(v, regs); + rc = nsvm_vcpu_hostrestore(v, regs); if (rc) ret = NESTEDHVM_VMEXIT_FATALERROR; @@ -1461,7 +1451,7 @@ nestedsvm_vcpu_vmexit(struct vcpu *v, st /* Prepare for running the l1 guest. Make the actual * modifications to the virtual VMCB/VMCS. */ - rc = nhvm_vcpu_vmexit(v, regs, exitcode); + rc = nsvm_vcpu_vmexit_inject(v, regs, exitcode); /* If l1 guest uses shadow paging, update the paging mode. */ if (!nestedhvm_paging_mode_hap(v)) --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2274,12 +2274,8 @@ static struct hvm_function_table __initd .nhvm_vcpu_initialise = nsvm_vcpu_initialise, .nhvm_vcpu_destroy = nsvm_vcpu_destroy, .nhvm_vcpu_reset = nsvm_vcpu_reset, - .nhvm_vcpu_hostrestore = nsvm_vcpu_hostrestore, - .nhvm_vcpu_vmexit = nsvm_vcpu_vmexit_inject, .nhvm_vcpu_vmexit_trap = nsvm_vcpu_vmexit_trap, - .nhvm_vcpu_guestcr3 = nsvm_vcpu_guestcr3, .nhvm_vcpu_p2m_base = nsvm_vcpu_hostcr3, - .nhvm_vcpu_asid = nsvm_vcpu_asid, .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap, .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled, .nhvm_intr_blocked = nsvm_intr_blocked, --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1805,9 +1805,7 @@ static struct hvm_function_table __initd .nhvm_vcpu_initialise = nvmx_vcpu_initialise, .nhvm_vcpu_destroy = nvmx_vcpu_destroy, .nhvm_vcpu_reset = nvmx_vcpu_reset, - .nhvm_vcpu_guestcr3 = nvmx_vcpu_guestcr3, .nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base, - .nhvm_vcpu_asid = nvmx_vcpu_asid, .nhvm_vmcx_hap_enabled = nvmx_ept_enabled, .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception, .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap, --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -174,13 +174,6 @@ int nvmx_vcpu_reset(struct vcpu *v) return 0; } -uint64_t nvmx_vcpu_guestcr3(struct vcpu *v) -{ - /* TODO */ - ASSERT(0); - return 0; -} - uint64_t nvmx_vcpu_eptp_base(struct vcpu *v) { uint64_t eptp_base; @@ -190,13 +183,6 @@ uint64_t nvmx_vcpu_eptp_base(struct vcpu return eptp_base & PAGE_MASK; } -uint32_t nvmx_vcpu_asid(struct vcpu *v) -{ - /* TODO */ - ASSERT(0); - return 0; -} - bool_t nvmx_ept_enabled(struct vcpu *v) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); @@ -514,8 +500,8 @@ static void vmreturn(struct cpu_user_reg regs->eflags = eflags; } -int nvmx_intercepts_exception(struct vcpu *v, unsigned int trap, - int error_code) +bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap, + int error_code) { struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u32 exception_bitmap, pfec_match=0, pfec_mask=0; --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -172,16 +172,11 @@ struct hvm_function_table { int (*nhvm_vcpu_initialise)(struct vcpu *v); void (*nhvm_vcpu_destroy)(struct vcpu *v); int (*nhvm_vcpu_reset)(struct vcpu *v); - int (*nhvm_vcpu_hostrestore)(struct vcpu *v, - struct cpu_user_regs *regs); - int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode); int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap); - uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v); uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v); - uint32_t (*nhvm_vcpu_asid)(struct vcpu *v); - int (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v, - unsigned int trapnr, int errcode); + bool_t (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v, + unsigned int trapnr, + int errcode); bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v); @@ -479,35 +474,42 @@ int hvm_x2apic_msr_write(struct vcpu *v, * Nested HVM */ -/* Restores l1 guest state */ -int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs); -/* Fill l1 guest's VMCB/VMCS with data provided by generic exit codes - * (do conversion as needed), other misc SVM/VMX specific tweaks to make - * it work */ -int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode); /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to * 'trapnr' exception. */ -int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap); +static inline int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap) +{ + return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap); +} -/* returns l2 guest cr3 in l2 guest physical address space. */ -uint64_t nhvm_vcpu_guestcr3(struct vcpu *v); /* returns l1 guest's cr3 that points to the page table used to * translate l2 guest physical address to l1 guest physical address. */ -uint64_t nhvm_vcpu_p2m_base(struct vcpu *v); -/* returns the asid number l1 guest wants to use to run the l2 guest */ -uint32_t nhvm_vcpu_asid(struct vcpu *v); +static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v) +{ + return hvm_funcs.nhvm_vcpu_p2m_base(v); +} /* returns true, when l1 guest intercepts the specified trap */ -int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, - unsigned int trapnr, int errcode); +static inline bool_t nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, + unsigned int trap, + int errcode) +{ + return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap, errcode); +} /* returns true when l1 guest wants to use hap to run l2 guest */ -bool_t nhvm_vmcx_hap_enabled(struct vcpu *v); +static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v) +{ + return hvm_funcs.nhvm_vmcx_hap_enabled(v); +} + /* interrupt */ -enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v); +static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v) +{ + return hvm_funcs.nhvm_intr_blocked(v); +} + #ifndef NDEBUG /* Permit use of the Forced Emulation Prefix in HVM guests */ --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h @@ -110,18 +110,11 @@ nestedsvm_check_intercepts(struct vcpu * void nsvm_vcpu_destroy(struct vcpu *v); int nsvm_vcpu_initialise(struct vcpu *v); int nsvm_vcpu_reset(struct vcpu *v); -int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs); int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs); -int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode); int nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap); -uint64_t nsvm_vcpu_guestcr3(struct vcpu *v); uint64_t nsvm_vcpu_hostcr3(struct vcpu *v); -uint32_t nsvm_vcpu_asid(struct vcpu *v); -int nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v, - struct cpu_user_regs *regs, uint64_t exitcode); -int nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr, - int errcode); +bool_t nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr, + int errcode); bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -111,12 +111,10 @@ union vmx_inst_info { int nvmx_vcpu_initialise(struct vcpu *v); void nvmx_vcpu_destroy(struct vcpu *v); int nvmx_vcpu_reset(struct vcpu *v); -uint64_t nvmx_vcpu_guestcr3(struct vcpu *v); uint64_t nvmx_vcpu_eptp_base(struct vcpu *v); -uint32_t nvmx_vcpu_asid(struct vcpu *v); enum hvm_intblk nvmx_intr_blocked(struct vcpu *v); -int nvmx_intercepts_exception(struct vcpu *v, - unsigned int trap, int error_code); +bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap, + int error_code); void nvmx_domain_relinquish_resources(struct domain *d); bool_t nvmx_ept_enabled(struct vcpu *v);