[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RESEND v1 5/7] x86: Implement Intel Processor Trace context switch



Load/Store Intel processor trace register in context switch.
MSR IA32_RTIT_CTL is loaded/stored automatically from VMCS.
When Intel PT is supported in guest, we need load/restore
PT MSRs only when PT is enabled in guest.

Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx>
---
 xen/arch/x86/cpu/intel_pt.c        | 69 ++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c         |  4 +++
 xen/include/asm-x86/hvm/vmx/vmcs.h |  2 ++
 xen/include/asm-x86/intel_pt.h     |  4 +++
 4 files changed, 79 insertions(+)

diff --git a/xen/arch/x86/cpu/intel_pt.c b/xen/arch/x86/cpu/intel_pt.c
index 520e0ca..c0e9e68 100644
--- a/xen/arch/x86/cpu/intel_pt.c
+++ b/xen/arch/x86/cpu/intel_pt.c
@@ -21,7 +21,76 @@
 #include <xen/types.h>
 #include <xen/cache.h>
 #include <xen/init.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <asm/intel_pt.h>
 
 /* intel_pt: Flag to enable Intel Processor Trace (default on). */
 bool_t __read_mostly opt_intel_pt = 1;
 boolean_param("intel_pt", opt_intel_pt);
+
+static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_num)
+{
+    u32 i;
+    wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+    wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+    wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+    wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+    for ( i = 0; i < addr_num; i++ )
+        wrmsrl(MSR_IA32_RTIT_ADDR0_A + i, ctx->addr[i]);
+}
+
+static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_num)
+{
+    u32 i;
+    rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+    rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+    rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+    rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+    for ( i = 0; i < addr_num; i++ )
+        rdmsrl(MSR_IA32_RTIT_ADDR0_A + i, ctx->addr[i]);
+}
+
+void pt_guest_enter(struct vcpu *v)
+{
+    struct pt_desc *pt = &v->arch.hvm_vmx.pt_desc;
+
+    if ( pt->intel_pt_enabled &&
+       (pt->guest_pt_ctx.ctl & MSR_IA32_RTIT_CTL_TRACEEN) )
+        pt_load_msr(&pt->guest_pt_ctx, pt->addr_num);
+}
+
+void pt_guest_exit(struct vcpu *v)
+{
+    struct pt_desc *pt = &v->arch.hvm_vmx.pt_desc;
+
+    if ( pt->intel_pt_enabled &&
+       (pt->guest_pt_ctx.ctl & MSR_IA32_RTIT_CTL_TRACEEN) )
+        pt_save_msr(&pt->guest_pt_ctx, pt->addr_num);
+}
+
+void pt_vcpu_init(struct vcpu *v)
+{
+    struct pt_desc *pt = &v->arch.hvm_vmx.pt_desc;
+    unsigned int eax, ebx, ecx, edx;
+
+    memset(pt, 0, sizeof(struct pt_desc));
+    pt->intel_pt_enabled = false;
+
+    if ( !cpu_has_intel_pt || !opt_intel_pt ||
+         !(v->arch.hvm_vmx.secondary_exec_control & SECONDARY_EXEC_PT_USE_GPA) 
)
+        return;
+
+    /* get the number of address ranges */
+    if ( cpuid_eax(0x14) == 1 )
+        cpuid_count(0x14, 1, &eax, &ebx, &ecx, &edx);
+    else
+        return;
+
+    pt->addr_num = eax & 0x7;
+    pt->guest_pt_ctx.output_mask = 0x7F;
+    pt->intel_pt_enabled = true;
+
+    vmx_vmcs_enter(v);
+    __vmwrite(GUEST_IA32_RTIT_CTL, 0);
+    vmx_vmcs_exit(v);
+}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e036303..f386933 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -467,6 +467,8 @@ static int vmx_vcpu_initialise(struct vcpu *v)
     if ( v->vcpu_id == 0 )
         v->arch.user_regs.rax = 1;
 
+    pt_vcpu_init(v);
+
     return 0;
 }
 
@@ -3513,6 +3515,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     __vmread(GUEST_RSP,    &regs->rsp);
     __vmread(GUEST_RFLAGS, &regs->rflags);
 
+    pt_guest_exit(v);
     hvm_invalidate_regs_fields(regs);
 
     if ( paging_mode_hap(v->domain) )
@@ -4281,6 +4284,7 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
         }
     }
 
+    pt_guest_enter(curr);
  out:
     if ( unlikely(curr->arch.hvm_vmx.lbr_fixup_enabled) )
         lbr_fixup();
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 33ec3e6..46c386f 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -421,6 +421,8 @@ enum vmcs_field {
     GUEST_PDPTE0                    = 0x0000280a,
 #define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
     GUEST_BNDCFGS                   = 0x00002812,
+    GUEST_IA32_RTIT_CTL             = 0x00002814,
+    GUEST_IA32_RTIT_CTL_HIGH        = 0x00002815,
     HOST_PAT                        = 0x00002c00,
     HOST_EFER                       = 0x00002c02,
     HOST_PERF_GLOBAL_CTRL           = 0x00002c04,
diff --git a/xen/include/asm-x86/intel_pt.h b/xen/include/asm-x86/intel_pt.h
index 909e22f..9505c8f 100644
--- a/xen/include/asm-x86/intel_pt.h
+++ b/xen/include/asm-x86/intel_pt.h
@@ -40,4 +40,8 @@ struct pt_desc {
 
 extern bool_t opt_intel_pt;
 
+void pt_vcpu_init(struct vcpu *v);
+void pt_guest_enter(struct vcpu *v);
+void pt_guest_exit(struct vcpu *v);
+
 #endif /* __ASM_X86_HVM_INTEL_PT_H_ */
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.