[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86: Consolidate the storage of MSR_AMD64_DR{0-3}_ADDRESS_MASK
commit 96f235c26f8ee346f2ca7c2576ec6e314a49d833 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Oct 19 16:14:22 2018 +0100 Commit: Wei Liu <wei.liu2@xxxxxxxxxx> CommitDate: Thu Nov 1 10:15:10 2018 +0000 x86: Consolidate the storage of MSR_AMD64_DR{0-3}_ADDRESS_MASK The PV and HVM code both have a copy of these, which gives the false impression in the context switch code that they are PV/HVM specific. Move the storage into struct vcpu_msrs, and update all users to match. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/domctl.c | 12 ++++++------ xen/arch/x86/hvm/svm/svm.c | 36 ++++++++++++++++++------------------ xen/arch/x86/pv/emul-priv-op.c | 8 ++++---- xen/arch/x86/traps.c | 8 ++++---- xen/include/asm-x86/domain.h | 3 --- xen/include/asm-x86/hvm/svm/vmcb.h | 3 --- xen/include/asm-x86/msr.h | 6 ++++++ 7 files changed, 38 insertions(+), 38 deletions(-) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index cc85395e96..f79827e6e4 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1328,12 +1328,12 @@ long arch_do_domctl( if ( boot_cpu_has(X86_FEATURE_DBEXT) ) { - if ( v->arch.pv.dr_mask[0] ) + if ( v->arch.msrs->dr_mask[0] ) { if ( i < vmsrs->msr_count && !ret ) { msr.index = MSR_AMD64_DR0_ADDRESS_MASK; - msr.value = v->arch.pv.dr_mask[0]; + msr.value = v->arch.msrs->dr_mask[0]; if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) ) ret = -EFAULT; } @@ -1342,12 +1342,12 @@ long arch_do_domctl( for ( j = 0; j < 3; ++j ) { - if ( !v->arch.pv.dr_mask[1 + j] ) + if ( !v->arch.msrs->dr_mask[1 + j] ) continue; if ( i < vmsrs->msr_count && !ret ) { msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j; - msr.value = v->arch.pv.dr_mask[1 + j]; + msr.value = v->arch.msrs->dr_mask[1 + j]; if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) ) ret = -EFAULT; } @@ -1392,7 +1392,7 @@ long arch_do_domctl( if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (msr.value >> 32) ) break; - v->arch.pv.dr_mask[0] = msr.value; + v->arch.msrs->dr_mask[0] = msr.value; continue; case MSR_AMD64_DR1_ADDRESS_MASK ... @@ -1401,7 +1401,7 @@ long arch_do_domctl( (msr.value >> 32) ) break; msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1; - v->arch.pv.dr_mask[msr.index] = msr.value; + v->arch.msrs->dr_mask[msr.index] = msr.value; continue; } break; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 41427e7b9b..be48ca72c5 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -210,10 +210,10 @@ static void svm_save_dr(struct vcpu *v) svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW); svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW); - rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]); - rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]); - rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]); - rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]); + rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]); + rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]); + rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]); + rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]); } v->arch.dr[0] = read_debugreg(0); @@ -241,10 +241,10 @@ static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v) svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE); svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE); - wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]); - wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]); - wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]); - wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]); + wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]); + wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]); + wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]); + wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]); } write_debugreg(0, v->arch.dr[0]); @@ -422,19 +422,19 @@ static void svm_save_msr(struct vcpu *v, struct hvm_msr *ctxt) { if ( boot_cpu_has(X86_FEATURE_DBEXT) ) { - ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[0]; + ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[0]; if ( ctxt->msr[ctxt->count].val ) ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK; - ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[1]; + ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[1]; if ( ctxt->msr[ctxt->count].val ) ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK; - ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[2]; + ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[2]; if ( ctxt->msr[ctxt->count].val ) ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK; - ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[3]; + ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[3]; if ( ctxt->msr[ctxt->count].val ) ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK; } @@ -455,7 +455,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt) else if ( ctxt->msr[i].val >> 32 ) err = -EDOM; else - v->arch.hvm.svm.dr_mask[0] = ctxt->msr[i].val; + v->arch.msrs->dr_mask[0] = ctxt->msr[i].val; break; case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: @@ -464,7 +464,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt) else if ( ctxt->msr[i].val >> 32 ) err = -EDOM; else - v->arch.hvm.svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] = + v->arch.msrs->dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] = ctxt->msr[i].val; break; @@ -2079,14 +2079,14 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) case MSR_AMD64_DR0_ADDRESS_MASK: if ( !v->domain->arch.cpuid->extd.dbext ) goto gpf; - *msr_content = v->arch.hvm.svm.dr_mask[0]; + *msr_content = v->arch.msrs->dr_mask[0]; break; case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: if ( !v->domain->arch.cpuid->extd.dbext ) goto gpf; *msr_content = - v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1]; + v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1]; break; case MSR_AMD_OSVW_ID_LENGTH: @@ -2277,13 +2277,13 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) case MSR_AMD64_DR0_ADDRESS_MASK: if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) ) goto gpf; - v->arch.hvm.svm.dr_mask[0] = msr_content; + v->arch.msrs->dr_mask[0] = msr_content; break; case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) ) goto gpf; - v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] = + v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] = msr_content; break; diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index aecf517cf0..f73ea4a163 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -916,13 +916,13 @@ static int read_msr(unsigned int reg, uint64_t *val, case MSR_AMD64_DR0_ADDRESS_MASK: if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) break; - *val = curr->arch.pv.dr_mask[0]; + *val = curr->arch.msrs->dr_mask[0]; return X86EMUL_OKAY; case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) break; - *val = curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1]; + *val = curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1]; return X86EMUL_OKAY; case MSR_IA32_PERF_CAPABILITIES: @@ -1110,7 +1110,7 @@ static int write_msr(unsigned int reg, uint64_t val, case MSR_AMD64_DR0_ADDRESS_MASK: if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) break; - curr->arch.pv.dr_mask[0] = val; + curr->arch.msrs->dr_mask[0] = val; if ( curr->arch.dr7 & DR7_ACTIVE_MASK ) wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val); return X86EMUL_OKAY; @@ -1118,7 +1118,7 @@ static int write_msr(unsigned int reg, uint64_t val, case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) break; - curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val; + curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val; if ( curr->arch.dr7 & DR7_ACTIVE_MASK ) wrmsrl(reg, val); return X86EMUL_OKAY; diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 9b532199a8..c60c8f5c2a 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2071,10 +2071,10 @@ void activate_debugregs(const struct vcpu *curr) if ( boot_cpu_has(X86_FEATURE_DBEXT) ) { - wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv.dr_mask[0]); - wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv.dr_mask[1]); - wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv.dr_mask[2]); - wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv.dr_mask[3]); + wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.msrs->dr_mask[0]); + wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.msrs->dr_mask[1]); + wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.msrs->dr_mask[2]); + wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.msrs->dr_mask[3]); } } diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 1a88cac083..7214037820 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -553,9 +553,6 @@ struct pv_vcpu */ uint32_t dr7_emul; - /* data breakpoint extension MSRs */ - uint32_t dr_mask[4]; - /* Deferred VA-based update state. */ bool_t need_update_runstate_area; struct vcpu_time_info pending_system_time; diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h index 48aed78292..70177059e7 100644 --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ b/xen/include/asm-x86/hvm/svm/vmcb.h @@ -538,9 +538,6 @@ struct svm_vcpu { /* AMD lightweight profiling MSR */ uint64_t guest_lwp_cfg; /* guest version */ uint64_t cpu_lwp_cfg; /* CPU version */ - - /* data breakpoint extension MSRs */ - uint32_t dr_mask[4]; }; struct vmcb_struct *alloc_vmcb(void); diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h index 7a061b28e8..c1cb38fab3 100644 --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -287,6 +287,12 @@ struct vcpu_msrs bool cpuid_faulting:1; }; } misc_features_enables; + + /* + * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK + * TODO: Not yet handled by guest_{rd,wr}msr() infrastructure. + */ + uint32_t dr_mask[4]; }; void init_guest_msr_policy(void); -- generated by git-patchbot for /home/xen/git/xen.git#staging _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |