[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/svm: add EFER SVME support for VGIF/VLOAD
commit da3d180d101cf81d085f0c6560d70acd16c934bd Author: Brian Woods <brian.woods@xxxxxxx> AuthorDate: Tue Feb 20 16:27:02 2018 -0600 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Wed Feb 21 17:17:39 2018 +0000 x86/svm: add EFER SVME support for VGIF/VLOAD Only enable virtual VMLOAD/SAVE and VGIF if the guest EFER.SVME is set. Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Brian Woods <brian.woods@xxxxxxx> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/nestedsvm.c | 66 +++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/svm/svm.c | 6 +++ xen/arch/x86/hvm/svm/vmcb.c | 17 --------- xen/include/asm-x86/hvm/svm/nestedsvm.h | 1 + 4 files changed, 73 insertions(+), 17 deletions(-) diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index 1f5981f..6457532 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -1665,3 +1665,69 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v) __update_guest_eip(regs, inst_len); } + +/* + * This runs on EFER change to see if nested features need to either be + * turned off or on. + */ +void svm_nested_features_on_efer_update(struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + struct nestedsvm *svm = &vcpu_nestedsvm(v); + u32 general2_intercepts; + vintr_t vintr; + + /* + * Need state for transfering the nested gif status so only write on + * the hvm_vcpu EFER.SVME changing. + */ + if ( v->arch.hvm_vcpu.guest_efer & EFER_SVME ) + { + if ( !vmcb->virt_ext.fields.vloadsave_enable && + paging_mode_hap(v->domain) && + cpu_has_svm_vloadsave ) + { + vmcb->virt_ext.fields.vloadsave_enable = 1; + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts &= ~(GENERAL2_INTERCEPT_VMLOAD | + GENERAL2_INTERCEPT_VMSAVE); + vmcb_set_general2_intercepts(vmcb, general2_intercepts); + } + + if ( !vmcb->_vintr.fields.vgif_enable && + cpu_has_svm_vgif ) + { + vintr = vmcb_get_vintr(vmcb); + vintr.fields.vgif = svm->ns_gif; + vintr.fields.vgif_enable = 1; + vmcb_set_vintr(vmcb, vintr); + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts &= ~(GENERAL2_INTERCEPT_STGI | + GENERAL2_INTERCEPT_CLGI); + vmcb_set_general2_intercepts(vmcb, general2_intercepts); + } + } + else + { + if ( vmcb->virt_ext.fields.vloadsave_enable ) + { + vmcb->virt_ext.fields.vloadsave_enable = 0; + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts |= (GENERAL2_INTERCEPT_VMLOAD | + GENERAL2_INTERCEPT_VMSAVE); + vmcb_set_general2_intercepts(vmcb, general2_intercepts); + } + + if ( vmcb->_vintr.fields.vgif_enable ) + { + vintr = vmcb_get_vintr(vmcb); + svm->ns_gif = vintr.fields.vgif; + vintr.fields.vgif_enable = 0; + vmcb_set_vintr(vmcb, vintr); + general2_intercepts = vmcb_get_general2_intercepts(vmcb); + general2_intercepts |= (GENERAL2_INTERCEPT_STGI | + GENERAL2_INTERCEPT_CLGI); + vmcb_set_general2_intercepts(vmcb, general2_intercepts); + } + } +} diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 9f58afc..64d2955 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -631,6 +631,12 @@ static void svm_update_guest_efer(struct vcpu *v) if ( lma ) new_efer |= EFER_LME; vmcb_set_efer(vmcb, new_efer); + + ASSERT(nestedhvm_enabled(v->domain) || + !(v->arch.hvm_vcpu.guest_efer & EFER_SVME)); + + if ( nestedhvm_enabled(v->domain) ) + svm_nested_features_on_efer_update(v); } static void svm_cpuid_policy_changed(struct vcpu *v) diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 0e6cba5..997e759 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -200,29 +200,12 @@ static int construct_vmcb(struct vcpu *v) /* PAT is under complete control of SVM when using nested paging. */ svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT); - - /* Use virtual VMLOAD/VMSAVE if available. */ - if ( cpu_has_svm_vloadsave ) - { - vmcb->virt_ext.fields.vloadsave_enable = 1; - vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMLOAD; - vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMSAVE; - } } else { vmcb->_exception_intercepts |= (1U << TRAP_page_fault); } - /* if available, enable and configure virtual gif */ - if ( cpu_has_svm_vgif ) - { - vmcb->_vintr.fields.vgif = 1; - vmcb->_vintr.fields.vgif_enable = 1; - vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_STGI; - vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_CLGI; - } - if ( cpu_has_pause_filter ) { vmcb->_pause_filter_count = SVM_PAUSEFILTER_INIT; diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h b/xen/include/asm-x86/hvm/svm/nestedsvm.h index a619b61..abcf2e7 100644 --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h @@ -104,6 +104,7 @@ nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs); enum nestedhvm_vmexits nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, uint64_t exitcode); +void svm_nested_features_on_efer_update(struct vcpu *v); /* Interface methods */ void nsvm_vcpu_destroy(struct vcpu *v); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |