[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 07/10] x86: Add Intel Processor Trace MSRs read/write emulation
Add Intel Processor Trace MSRs read/write emulation. If Intel Processor Trace is not supported in guest, access not supported MSRs or access reserved bits, a #GP will be injected to guest. Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx> --- xen/arch/x86/cpu/ipt.c | 108 +++++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/vmx/vmx.c | 18 ++++++++ xen/include/asm-x86/ipt.h | 3 ++ 3 files changed, 129 insertions(+) diff --git a/xen/arch/x86/cpu/ipt.c b/xen/arch/x86/cpu/ipt.c index 977a3d7..dcb7a8d 100644 --- a/xen/arch/x86/cpu/ipt.c +++ b/xen/arch/x86/cpu/ipt.c @@ -33,6 +33,14 @@ #define BIT(nr) (1UL << (nr)) +#define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTER_EN | \ + RTIT_STATUS_CONTEXT_EN | RTIT_STATUS_TRIGGER_EN | \ + RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ + RTIT_STATUS_BYTECNT)) + +#define MSR_IA32_RTIT_OUTPUT_BASE_MASK(maxphyaddr) \ + (~((1UL << (maxphyaddr)) - 1) | 0x7f) + /* ipt: Flag to enable Intel Processor Trace (default off). */ unsigned int __read_mostly ipt_mode = IPT_MODE_OFF; static int parse_ipt_params(const char *str); @@ -106,6 +114,105 @@ static int __init parse_ipt_params(const char *str) return 0; } +int ipt_do_rdmsr(unsigned int msr, uint64_t *msr_content) +{ + const struct ipt_desc *ipt_desc = current->arch.hvm_vmx.ipt_desc; + const struct cpuid_policy *p = current->domain->arch.cpuid; + unsigned int index; + + if ( !ipt_desc ) + return 1; + + switch ( msr ) + { + case MSR_IA32_RTIT_CTL: + *msr_content = ipt_desc->ipt_guest.ctl; + break; + case MSR_IA32_RTIT_STATUS: + *msr_content = ipt_desc->ipt_guest.status; + break; + case MSR_IA32_RTIT_OUTPUT_BASE: + if ( !ipt_cap(p->ipt.raw, IPT_CAP_single_range_output) && + !ipt_cap(p->ipt.raw, IPT_CAP_topa_output) ) + return 1; + *msr_content = ipt_desc->ipt_guest.output_base; + break; + case MSR_IA32_RTIT_OUTPUT_MASK: + if ( !ipt_cap(p->ipt.raw, IPT_CAP_single_range_output) && + !ipt_cap(p->ipt.raw, IPT_CAP_topa_output) ) + return 1; + *msr_content = ipt_desc->ipt_guest.output_mask | + RTIT_OUTPUT_MASK_DEFAULT; + break; + case MSR_IA32_RTIT_CR3_MATCH: + if ( !ipt_cap(p->ipt.raw, IPT_CAP_cr3_filter) ) + return 1; + *msr_content = ipt_desc->ipt_guest.cr3_match; + break; + default: + index = msr - MSR_IA32_RTIT_ADDR_A(0); + if ( index >= ipt_cap(p->ipt.raw, IPT_CAP_addr_range) * 2 ) + return 1; + *msr_content = ipt_desc->ipt_guest.addr[index]; + } + + return 0; +} + +int ipt_do_wrmsr(unsigned int msr, uint64_t msr_content) +{ + struct ipt_desc *ipt_desc = current->arch.hvm_vmx.ipt_desc; + const struct cpuid_policy *p = current->domain->arch.cpuid; + unsigned int index; + + if ( !ipt_desc ) + return 1; + + switch ( msr ) + { + case MSR_IA32_RTIT_CTL: + ipt_desc->ipt_guest.ctl = msr_content; + __vmwrite(GUEST_IA32_RTIT_CTL, msr_content); + break; + case MSR_IA32_RTIT_STATUS: + if ( (ipt_desc->ipt_guest.ctl & RTIT_CTL_TRACEEN) || + (msr_content & MSR_IA32_RTIT_STATUS_MASK) ) + return 1; + ipt_desc->ipt_guest.status = msr_content; + break; + case MSR_IA32_RTIT_OUTPUT_BASE: + if ( (ipt_desc->ipt_guest.ctl & RTIT_CTL_TRACEEN) || + (msr_content & + MSR_IA32_RTIT_OUTPUT_BASE_MASK(p->extd.maxphysaddr)) || + (!ipt_cap(p->ipt.raw, IPT_CAP_single_range_output) && + !ipt_cap(p->ipt.raw, IPT_CAP_topa_output)) ) + return 1; + ipt_desc->ipt_guest.output_base = msr_content; + break; + case MSR_IA32_RTIT_OUTPUT_MASK: + if ( (ipt_desc->ipt_guest.ctl & RTIT_CTL_TRACEEN) || + (!ipt_cap(p->ipt.raw, IPT_CAP_single_range_output) && + !ipt_cap(p->ipt.raw, IPT_CAP_topa_output)) ) + return 1; + ipt_desc->ipt_guest.output_mask = msr_content | + RTIT_OUTPUT_MASK_DEFAULT; + break; + case MSR_IA32_RTIT_CR3_MATCH: + if ( (ipt_desc->ipt_guest.ctl & RTIT_CTL_TRACEEN) || + !ipt_cap(p->ipt.raw, IPT_CAP_cr3_filter) ) + return 1; + ipt_desc->ipt_guest.cr3_match = msr_content; + break; + default: + index = msr - MSR_IA32_RTIT_ADDR_A(0); + if ( index >= ipt_cap(p->ipt.raw, IPT_CAP_addr_range) * 2 ) + return 1; + ipt_desc->ipt_guest.addr[index] = msr_content; + } + + return 0; +} + static inline void ipt_load_msr(const struct ipt_ctx *ctx, unsigned int addr_range) { @@ -204,3 +311,4 @@ void ipt_destroy(struct vcpu *v) v->arch.hvm_vmx.ipt_desc = NULL; } } + diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 060ab65..fa1ca0c 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2898,6 +2898,15 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) if ( vpmu_do_rdmsr(msr, msr_content) ) goto gp_fault; break; + case MSR_IA32_RTIT_CTL: + case MSR_IA32_RTIT_STATUS: + case MSR_IA32_RTIT_OUTPUT_BASE: + case MSR_IA32_RTIT_OUTPUT_MASK: + case MSR_IA32_RTIT_CR3_MATCH: + case MSR_IA32_RTIT_ADDR_A(0) ... MSR_IA32_RTIT_ADDR_B(3): + if ( ipt_do_rdmsr(msr, msr_content) ) + goto gp_fault; + break; default: if ( passive_domain_do_rdmsr(msr, msr_content) ) @@ -3148,6 +3157,15 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( vpmu_do_wrmsr(msr, msr_content, 0) ) goto gp_fault; break; + case MSR_IA32_RTIT_CTL: + case MSR_IA32_RTIT_STATUS: + case MSR_IA32_RTIT_OUTPUT_BASE: + case MSR_IA32_RTIT_OUTPUT_MASK: + case MSR_IA32_RTIT_CR3_MATCH: + case MSR_IA32_RTIT_ADDR_A(0) ... MSR_IA32_RTIT_ADDR_B(3): + if ( ipt_do_wrmsr(msr, msr_content) ) + goto gp_fault; + break; default: if ( passive_domain_do_wrmsr(msr, msr_content) ) diff --git a/xen/include/asm-x86/ipt.h b/xen/include/asm-x86/ipt.h index 422f46a..961de0b 100644 --- a/xen/include/asm-x86/ipt.h +++ b/xen/include/asm-x86/ipt.h @@ -64,6 +64,9 @@ struct ipt_desc { struct ipt_ctx ipt_guest; }; +extern int ipt_do_rdmsr(unsigned int msr, uint64_t *pdata); +extern int ipt_do_wrmsr(unsigned int msr, uint64_t data); + extern void ipt_guest_enter(struct vcpu *v); extern void ipt_guest_exit(struct vcpu *v); -- 1.8.3.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |