[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 08/10] x86: Introduce a function to check the value of RTIT_CTL



Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
result in a #GP unless the same write also clears TraceEn.
Writes to IA32_RTIT_CTL that do not modify any bits will not
cause a #GP, even if TraceEn remains set.
MSR write that attempts to change bits marked reserved, or
utilize encodings marked reserved, will cause a #GP fault.

Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx>
---
 xen/arch/x86/cpu/ipt.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 110 insertions(+)

diff --git a/xen/arch/x86/cpu/ipt.c b/xen/arch/x86/cpu/ipt.c
index dcb7a8d..fd75a01 100644
--- a/xen/arch/x86/cpu/ipt.c
+++ b/xen/arch/x86/cpu/ipt.c
@@ -114,6 +114,114 @@ static int __init parse_ipt_params(const char *str)
     return 0;
 }
 
+static int rtit_ctl_check(uint64_t new, uint64_t old)
+{
+    const struct cpuid_policy *p = current->domain->arch.cpuid;
+    const struct ipt_desc *ipt_desc = current->arch.hvm_vmx.ipt_desc;
+    uint64_t rtit_ctl_mask = ~((uint64_t)0);
+    unsigned int addr_range = ipt_cap(p->ipt.raw, IPT_CAP_addr_range);
+    unsigned int val, i;
+
+    if  ( new == old )
+        return 0;
+
+    /* Clear no dependency bits */
+    rtit_ctl_mask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
+                RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DIS_RETC);
+
+    /* If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_cr3_filter) )
+        rtit_ctl_mask &= ~RTIT_CTL_CR3_FILTER;
+
+    /*
+     * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
+     * PSBFreq can be set
+     */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_psb_cyc) )
+        rtit_ctl_mask &= ~(RTIT_CTL_CYCEN |
+                RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
+    /*
+     * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and
+     * MTCFreq can be set
+     */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_mtc) )
+        rtit_ctl_mask &= ~(RTIT_CTL_MTC_EN |
+                RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_FREQ);
+
+    /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_ptwrite) )
+        rtit_ctl_mask &= ~(RTIT_CTL_FUP_ON_PTW |
+                                        RTIT_CTL_PTW_EN);
+
+    /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_power_event) )
+        rtit_ctl_mask &= ~RTIT_CTL_PWR_EVT_EN;
+
+    /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_topa_output) )
+        rtit_ctl_mask &= ~RTIT_CTL_TOPA;
+    /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_output_subsys))
+        rtit_ctl_mask &= ~RTIT_CTL_FABRIC_EN;
+    /* unmask address range configure area */
+    for (i = 0; i < addr_range; i++)
+        rtit_ctl_mask &= ~(0xf << (32 + i * 4));
+
+    /*
+     * Any MSR write that attempts to change bits marked reserved will
+     * case a #GP fault.
+     */
+    if ( new & rtit_ctl_mask )
+        return 1;
+
+    /*
+     * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
+     * result in a #GP unless the same write also clears TraceEn.
+     */
+    if ( (ipt_desc->ipt_guest.ctl & RTIT_CTL_TRACEEN) &&
+        ((ipt_desc->ipt_guest.ctl ^ new) & ~RTIT_CTL_TRACEEN) )
+        return 1;
+
+    /*
+     * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
+     * and FabricEn would cause #GP, if
+     * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
+     */
+   if ( (new & RTIT_CTL_TRACEEN) && !(new & RTIT_CTL_TOPA) &&
+        !(new & RTIT_CTL_FABRIC_EN) &&
+        !ipt_cap(p->ipt.raw, IPT_CAP_single_range_output) )
+        return 1;
+    /*
+     * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
+     * utilize encodings marked reserved will casue a #GP fault.
+     */
+    val = ipt_cap(p->ipt.raw, IPT_CAP_mtc_period);
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_mtc) &&
+                !test_bit((new & RTIT_CTL_MTC_FREQ) >>
+                RTIT_CTL_MTC_FREQ_OFFSET, &val) )
+        return 1;
+    val = ipt_cap(p->ipt.raw, IPT_CAP_cycle_threshold);
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_psb_cyc) &&
+                !test_bit((new & RTIT_CTL_CYC_THRESH) >>
+                RTIT_CTL_CYC_THRESH_OFFSET, &val) )
+        return 1;
+    val = ipt_cap(p->ipt.raw, IPT_CAP_psb_freq);
+    if ( ipt_cap(p->ipt.raw, IPT_CAP_psb_cyc) &&
+                !test_bit((new & RTIT_CTL_PSB_FREQ) >>
+                RTIT_CTL_PSB_FREQ_OFFSET, &val) )
+        return 1;
+
+    /*
+     * If ADDRx_CFG is reserved or the encodings is >2 will
+     * cause a #GP fault.
+     */
+    for (i = 0; i < addr_range; i++)
+        if ( ((new & RTIT_CTL_ADDR(i)) >> RTIT_CTL_ADDR_OFFSET(i)) > 2 )
+            return 1;
+
+    return 0;
+}
+
 int ipt_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 {
     const struct ipt_desc *ipt_desc = current->arch.hvm_vmx.ipt_desc;
@@ -171,6 +279,8 @@ int ipt_do_wrmsr(unsigned int msr, uint64_t msr_content)
     switch ( msr )
     {
     case MSR_IA32_RTIT_CTL:
+        if ( rtit_ctl_check(msr_content, ipt_desc->ipt_guest.ctl) )
+            return 1;
         ipt_desc->ipt_guest.ctl = msr_content;
         __vmwrite(GUEST_IA32_RTIT_CTL, msr_content);
         break;
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.