[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/msr: Handle MSR_AMD64_DR{0-3}_ADDRESS_MASK in the new MSR infrastructure
commit 86d47adcd3c4f494456a3d6ee06e941b132e3eb2 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Dec 1 13:29:36 2017 +0000 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Mon Nov 19 16:58:35 2018 +0000 x86/msr: Handle MSR_AMD64_DR{0-3}_ADDRESS_MASK in the new MSR infrastructure This is a followup to c/s 96f235c26 which fulfils the remaining TODO item. First of all, the pre-existing SVM code has a bug. The value in msrs->dr_mask[] may be stale, as we allow direct access to these MSRs. Resolve this in guest_rdmsr() by reading directly from hardware in the affected case. With the reading/writing logic moved to the common guest_{rd,wr}msr() infrastructure, the migration logic can be simplified. The PV migration logic drops all of its special casing, and SVM's entire {init,save,load}_msr() infrastructure becomes unnecessary. The resulting diffstat shows quite how expensive the PV special cases where in arch_do_domctl(). add/remove: 0/3 grow/shrink: 4/6 up/down: 465/-1494 (-1029) Function old new delta guest_rdmsr 252 484 +232 guest_wrmsr 653 822 +169 msrs_to_send 8 48 +40 hvm_load_cpu_msrs 489 513 +24 svm_init_msr 21 - -21 hvm_save_cpu_msrs 365 343 -22 read_msr 1089 1001 -88 write_msr 1829 1689 -140 svm_msr_read_intercept 1124 970 -154 svm_load_msr 195 - -195 svm_save_msr 196 - -196 svm_msr_write_intercept 1461 1265 -196 arch_do_domctl 9581 9099 -482 Total: Before=3314610, After=3313581, chg -0.03% Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/domctl.c | 51 +++-------------------- xen/arch/x86/hvm/hvm.c | 6 +++ xen/arch/x86/hvm/svm/svm.c | 95 ------------------------------------------ xen/arch/x86/msr.c | 38 +++++++++++++++++ xen/arch/x86/pv/emul-priv-op.c | 28 ------------- xen/include/asm-x86/msr.h | 5 ++- 6 files changed, 54 insertions(+), 169 deletions(-) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 175a0c9ae0..aa8ad19479 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1274,6 +1274,10 @@ long arch_do_domctl( static const uint32_t msrs_to_send[] = { MSR_SPEC_CTRL, MSR_INTEL_MISC_FEATURES_ENABLES, + MSR_AMD64_DR0_ADDRESS_MASK, + MSR_AMD64_DR1_ADDRESS_MASK, + MSR_AMD64_DR2_ADDRESS_MASK, + MSR_AMD64_DR3_ADDRESS_MASK, }; uint32_t nr_msrs = ARRAY_SIZE(msrs_to_send); @@ -1340,35 +1344,6 @@ long arch_do_domctl( ++i; } - if ( boot_cpu_has(X86_FEATURE_DBEXT) ) - { - if ( v->arch.msrs->dr_mask[0] ) - { - if ( i < vmsrs->msr_count && !ret ) - { - msr.index = MSR_AMD64_DR0_ADDRESS_MASK; - msr.value = v->arch.msrs->dr_mask[0]; - if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) ) - ret = -EFAULT; - } - ++i; - } - - for ( j = 0; j < 3; ++j ) - { - if ( !v->arch.msrs->dr_mask[1 + j] ) - continue; - if ( i < vmsrs->msr_count && !ret ) - { - msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j; - msr.value = v->arch.msrs->dr_mask[1 + j]; - if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) ) - ret = -EFAULT; - } - ++i; - } - } - vcpu_unpause(v); if ( i > vmsrs->msr_count && !ret ) @@ -1398,24 +1373,10 @@ long arch_do_domctl( { case MSR_SPEC_CTRL: case MSR_INTEL_MISC_FEATURES_ENABLES: - if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY ) - break; - continue; - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) || - (msr.value >> 32) ) - break; - v->arch.msrs->dr_mask[0] = msr.value; - continue; - - case MSR_AMD64_DR1_ADDRESS_MASK ... - MSR_AMD64_DR3_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) || - (msr.value >> 32) ) + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY ) break; - msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1; - v->arch.msrs->dr_mask[msr.index] = msr.value; continue; } break; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0bc676cd94..e2e420444f 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1305,6 +1305,10 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) static const uint32_t msrs_to_send[] = { MSR_SPEC_CTRL, MSR_INTEL_MISC_FEATURES_ENABLES, + MSR_AMD64_DR0_ADDRESS_MASK, + MSR_AMD64_DR1_ADDRESS_MASK, + MSR_AMD64_DR2_ADDRESS_MASK, + MSR_AMD64_DR3_ADDRESS_MASK, }; static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send); @@ -1438,6 +1442,8 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h) case MSR_SPEC_CTRL: case MSR_INTEL_MISC_FEATURES_ENABLES: + case MSR_AMD64_DR0_ADDRESS_MASK: + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val); if ( rc != X86EMUL_OKAY ) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 396ee4a370..b9a8900ced 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -413,72 +413,6 @@ static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) return 0; } -static unsigned int __init svm_init_msr(void) -{ - return boot_cpu_has(X86_FEATURE_DBEXT) ? 4 : 0; -} - -static void svm_save_msr(struct vcpu *v, struct hvm_msr *ctxt) -{ - if ( boot_cpu_has(X86_FEATURE_DBEXT) ) - { - ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[0]; - if ( ctxt->msr[ctxt->count].val ) - ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK; - - ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[1]; - if ( ctxt->msr[ctxt->count].val ) - ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK; - - ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[2]; - if ( ctxt->msr[ctxt->count].val ) - ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK; - - ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[3]; - if ( ctxt->msr[ctxt->count].val ) - ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK; - } -} - -static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt) -{ - unsigned int i, idx; - int err = 0; - - for ( i = 0; i < ctxt->count; ++i ) - { - switch ( idx = ctxt->msr[i].index ) - { - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) - err = -ENXIO; - else if ( ctxt->msr[i].val >> 32 ) - err = -EDOM; - else - v->arch.msrs->dr_mask[0] = ctxt->msr[i].val; - break; - - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) - err = -ENXIO; - else if ( ctxt->msr[i].val >> 32 ) - err = -EDOM; - else - v->arch.msrs->dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] = - ctxt->msr[i].val; - break; - - default: - continue; - } - if ( err ) - break; - ctxt->msr[i]._rsvd = 1; - } - - return err; -} - static void svm_fpu_enter(struct vcpu *v) { struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx; @@ -2094,19 +2028,6 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) goto gpf; break; - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !v->domain->arch.cpuid->extd.dbext ) - goto gpf; - *msr_content = v->arch.msrs->dr_mask[0]; - break; - - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !v->domain->arch.cpuid->extd.dbext ) - goto gpf; - *msr_content = - v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1]; - break; - case MSR_AMD_OSVW_ID_LENGTH: case MSR_AMD_OSVW_STATUS: if ( !d->arch.cpuid->extd.osvw ) @@ -2292,19 +2213,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) */ break; - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) ) - goto gpf; - v->arch.msrs->dr_mask[0] = msr_content; - break; - - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) ) - goto gpf; - v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] = - msr_content; - break; - case MSR_AMD_OSVW_ID_LENGTH: case MSR_AMD_OSVW_STATUS: if ( !d->arch.cpuid->extd.osvw ) @@ -2627,9 +2535,6 @@ static struct hvm_function_table __initdata svm_function_table = { .vcpu_destroy = svm_vcpu_destroy, .save_cpu_ctxt = svm_save_vmcb_ctxt, .load_cpu_ctxt = svm_load_vmcb_ctxt, - .init_msr = svm_init_msr, - .save_msr = svm_save_msr, - .load_msr = svm_load_msr, .get_interrupt_shadow = svm_get_interrupt_shadow, .set_interrupt_shadow = svm_set_interrupt_shadow, .guest_x86_mode = svm_guest_x86_mode, diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index c9e87b1fac..76cb6efa52 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -21,7 +21,10 @@ #include <xen/init.h> #include <xen/lib.h> +#include <xen/nospec.h> #include <xen/sched.h> + +#include <asm/debugreg.h> #include <asm/msr.h> DEFINE_PER_CPU(uint32_t, tsc_aux); @@ -159,6 +162,27 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) ret = guest_rdmsr_xen(v, msr, val); break; + case MSR_AMD64_DR0_ADDRESS_MASK: + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + if ( !cp->extd.dbext ) + goto gp_fault; + + /* + * In HVM context when we've allowed the guest direct access to debug + * registers, the value in msrs->dr_mask[] may be stale. Re-read it + * out of hardware. + */ +#ifdef CONFIG_HVM + if ( v == current && is_hvm_domain(d) && v->arch.hvm.flag_dr_dirty ) + rdmsrl(msr, *val); + else +#endif + *val = msrs->dr_mask[ + array_index_nospec((msr == MSR_AMD64_DR0_ADDRESS_MASK) + ? 0 : (msr - MSR_AMD64_DR1_ADDRESS_MASK + 1), + ARRAY_SIZE(msrs->dr_mask))]; + break; + default: return X86EMUL_UNHANDLEABLE; } @@ -285,6 +309,20 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) ret = guest_wrmsr_xen(v, msr, val); break; + case MSR_AMD64_DR0_ADDRESS_MASK: + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + if ( !cp->extd.dbext || val != (uint32_t)val ) + goto gp_fault; + + msrs->dr_mask[ + array_index_nospec((msr == MSR_AMD64_DR0_ADDRESS_MASK) + ? 0 : (msr - MSR_AMD64_DR1_ADDRESS_MASK + 1), + ARRAY_SIZE(msrs->dr_mask))] = val; + + if ( v == curr && (curr->arch.dr7 & DR7_ACTIVE_MASK) ) + wrmsrl(msr, val); + break; + default: return X86EMUL_UNHANDLEABLE; } diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index 83441b68b3..a84f3f1a86 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -912,18 +912,6 @@ static int read_msr(unsigned int reg, uint64_t *val, *val = guest_misc_enable(*val); return X86EMUL_OKAY; - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) - break; - *val = curr->arch.msrs->dr_mask[0]; - return X86EMUL_OKAY; - - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) - break; - *val = curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1]; - return X86EMUL_OKAY; - case MSR_IA32_PERF_CAPABILITIES: /* No extra capabilities are supported. */ *val = 0; @@ -1106,22 +1094,6 @@ static int write_msr(unsigned int reg, uint64_t val, return X86EMUL_OKAY; break; - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) - break; - curr->arch.msrs->dr_mask[0] = val; - if ( curr->arch.dr7 & DR7_ACTIVE_MASK ) - wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val); - return X86EMUL_OKAY; - - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) - break; - curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val; - if ( curr->arch.dr7 & DR7_ACTIVE_MASK ) - wrmsrl(reg, val); - return X86EMUL_OKAY; - case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7): case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3): case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2: diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h index c1cb38fab3..05d905bafd 100644 --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -290,7 +290,10 @@ struct vcpu_msrs /* * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK - * TODO: Not yet handled by guest_{rd,wr}msr() infrastructure. + * + * Loaded into hardware for guests which have active %dr7 settings. + * Furthermore, HVM guests are offered direct access, meaning that the + * values here may be stale in current context. */ uint32_t dr_mask[4]; }; -- generated by git-patchbot for /home/xen/git/xen.git#staging _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |