[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] svm: last branch recording MSR emulation



Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-08-08/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/hvm/svm/svm.c  2007-08-07 15:00:27.000000000 
+0200
+++ 2007-08-08/xen/arch/x86/hvm/svm/svm.c       2007-08-08 11:40:11.000000000 
+0200
@@ -68,6 +68,9 @@ static void *hsa[NR_CPUS] __read_mostly;
 /* vmcb used for extended host state */
 static void *root_vmcb[NR_CPUS] __read_mostly;
 
+/* SVM feature flags */
+u32 svm_feature_flags;
+
 /* hardware assisted paging bits */
 extern int opt_hap_enabled;
 
@@ -991,20 +994,6 @@ static struct hvm_function_table svm_fun
     .event_pending        = svm_event_pending
 };
 
-static void svm_npt_detect(void)
-{
-    u32 eax, ebx, ecx, edx;
-
-    /* Check CPUID for nested paging support. */
-    cpuid(0x8000000A, &eax, &ebx, &ecx, &edx);
-
-    if ( !(edx & 1) && opt_hap_enabled )
-    {
-        printk("SVM: Nested paging is not supported by this CPU.\n");
-        opt_hap_enabled = 0;
-    }
-}
-
 int start_svm(struct cpuinfo_x86 *c)
 {
     u32 eax, ecx, edx;
@@ -1027,14 +1016,25 @@ int start_svm(struct cpuinfo_x86 *c)
         return 0;
     }
 
+    edx = cpuid_edx(0x8000000A);
+    if ( cpu == 0 )
+        svm_feature_flags = edx;
+    else
+        svm_feature_flags &= edx;
+
+    /* Check for nested paging support. */
+    if ( !cpu_has_npt && opt_hap_enabled )
+    {
+        printk("SVM: Nested paging is not supported by this CPU.\n");
+        opt_hap_enabled = 0;
+    }
+
     if ( ((hsa[cpu] = alloc_host_save_area()) == NULL) ||
          ((root_vmcb[cpu] = alloc_vmcb()) == NULL) )
         return 0;
 
     write_efer(read_efer() | EFER_SVME);
 
-    svm_npt_detect();
-
     /* Initialize the HSA for this core. */
     phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
     phys_hsa_lo = (u32) phys_hsa;
@@ -2164,6 +2164,14 @@ static void svm_do_msr_access(
             msr_content = 0;
             break;
 
+        case MSR_IA32_DEBUGCTLMSR:
+        case MSR_IA32_LASTBRANCHFROMIP:
+        case MSR_IA32_LASTBRANCHTOIP:
+        case MSR_IA32_LASTINTFROMIP:
+        case MSR_IA32_LASTINTTOIP:
+            msr_content = 0;
+            break;
+
         default:
             if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
                  rdmsr_safe(ecx, eax, edx) == 0 )
@@ -2206,6 +2214,24 @@ static void svm_do_msr_access(
             svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             break;
 
+        case MSR_IA32_DEBUGCTLMSR:
+            if ( msr_content && cpu_has_lbrv )
+            {
+                vmcb->debugctlmsr = msr_content;
+                vmcb->lbr_control.fields.enable = 1;
+                svm_disable_intercept_for_msr(v->arch.hvm_svm.msrpm,
+                                              MSR_IA32_DEBUGCTLMSR);
+                svm_disable_intercept_for_msr(v->arch.hvm_svm.msrpm,
+                                              MSR_IA32_LASTBRANCHFROMIP);
+                svm_disable_intercept_for_msr(v->arch.hvm_svm.msrpm,
+                                              MSR_IA32_LASTBRANCHTOIP);
+                svm_disable_intercept_for_msr(v->arch.hvm_svm.msrpm,
+                                              MSR_IA32_LASTINTFROMIP);
+                svm_disable_intercept_for_msr(v->arch.hvm_svm.msrpm,
+                                              MSR_IA32_LASTINTTOIP);
+            }
+            break;
+
         default:
             switch ( long_mode_do_msr_write(regs) )
             {
Index: 2007-08-08/xen/arch/x86/hvm/svm/vmcb.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/hvm/svm/vmcb.c 2007-08-06 15:08:40.000000000 
+0200
+++ 2007-08-08/xen/arch/x86/hvm/svm/vmcb.c      2007-08-08 11:40:11.000000000 
+0200
@@ -80,30 +80,6 @@ struct host_save_area *alloc_host_save_a
     return hsa;
 }
 
-static void disable_intercept_for_msr(char *msr_bitmap, u32 msr)
-{
-    /*
-     * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
-     */
-    if ( msr <= 0x1fff )
-    {
-        __clear_bit(msr*2, msr_bitmap + 0x000); 
-        __clear_bit(msr*2+1, msr_bitmap + 0x000); 
-    }
-    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
-    {
-        msr &= 0x1fff;
-        __clear_bit(msr*2, msr_bitmap + 0x800);
-        __clear_bit(msr*2+1, msr_bitmap + 0x800);
-    } 
-    else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) )
-    {
-        msr &= 0x1fff;
-        __clear_bit(msr*2, msr_bitmap + 0x1000);
-        __clear_bit(msr*2+1, msr_bitmap + 0x1000);
-    }
-}
-
 static int construct_vmcb(struct vcpu *v)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -138,16 +114,16 @@ static int construct_vmcb(struct vcpu *v
         return -ENOMEM;
     memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
 
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_FS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_GS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SHADOW_GS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_CSTAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_LSTAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_STAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SYSCALL_MASK);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_FS_BASE);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_GS_BASE);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_SHADOW_GS_BASE);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_CSTAR);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_LSTAR);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_STAR);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_SYSCALL_MASK);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
+    svm_disable_intercept_for_msr(arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
 
     vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
     vmcb->iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);
Index: 2007-08-08/xen/include/asm-x86/hvm/svm/svm.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/hvm/svm/svm.h   2007-07-10 
09:40:06.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/hvm/svm/svm.h        2007-08-08 
11:43:48.000000000 +0200
@@ -47,4 +47,16 @@ extern void svm_dump_vmcb(const char *fr
 #define SVM_REG_R14 (14)
 #define SVM_REG_R15 (15)
 
+extern u32 svm_feature_flags;
+
+#define SVM_FEATURE_NPT         0
+#define SVM_FEATURE_LBRV        1
+#define SVM_FEATURE_SVML        2
+#define SVM_FEATURE_NRIPS       3
+
+#define cpu_has_npt             test_bit(SVM_FEATURE_NPT, &svm_feature_flags)
+#define cpu_has_lbrv            test_bit(SVM_FEATURE_LBRV, &svm_feature_flags)
+#define cpu_has_svml            test_bit(SVM_FEATURE_SVML, &svm_feature_flags)
+#define cpu_has_nrips           test_bit(SVM_FEATURE_NRIPS, &svm_feature_flags)
+
 #endif /* __ASM_X86_HVM_SVM_H__ */
Index: 2007-08-08/xen/include/asm-x86/hvm/svm/vmcb.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/hvm/svm/vmcb.h  2007-08-06 
15:08:41.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/hvm/svm/vmcb.h       2007-08-08 
11:40:11.000000000 +0200
@@ -355,6 +355,15 @@ typedef union
     } fields;
 } __attribute__ ((packed)) ioio_info_t;
 
+typedef union
+{
+    u64 bytes;
+    struct
+    {
+        u64 enable:1;
+    } fields;
+} __attribute__ ((packed)) lbrctrl_t;
+
 struct vmcb_struct {
     u32 cr_intercepts;          /* offset 0x00 */
     u32 dr_intercepts;          /* offset 0x04 */
@@ -383,7 +392,8 @@ struct vmcb_struct {
     u64 res08[2];
     eventinj_t  eventinj;       /* offset 0xA8 */
     u64 h_cr3;                  /* offset 0xB0 */
-    u64 res09[105];             /* offset 0xB8 pad to save area */
+    lbrctrl_t lbr_control;      /* offset 0xB8 */
+    u64 res09[104];             /* offset 0xC0 pad to save area */
 
     svm_segment_register_t es;      /* offset 1024 */
     svm_segment_register_t cs;
@@ -426,7 +436,12 @@ struct vmcb_struct {
     u64 pdpe2;
     u64 pdpe3;
     u64 g_pat;
-    u64 res16[50];
+    u64 debugctlmsr;
+    u64 lastbranchfromip;
+    u64 lastbranchtoip;
+    u64 lastintfromip;
+    u64 lastinttoip;
+    u64 res16[45];
     u64 res17[128];
     u64 res18[128];
 } __attribute__ ((packed));
@@ -437,7 +452,7 @@ struct arch_svm_struct {
     u64                 vmcb_pa;
     u64                 asid_generation; /* ASID tracking, moved here to
                                             prevent cacheline misses. */
-    u32                *msrpm;
+    char               *msrpm;
     int                 launch_core;
     bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
     unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
@@ -447,6 +462,30 @@ struct arch_svm_struct {
     unsigned long       cpu_cr3;
 };
 
+static inline void svm_disable_intercept_for_msr(char *msr_bitmap, u32 msr)
+{
+    /*
+     * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
+     */
+    if ( msr <= 0x1fff )
+    {
+        __clear_bit(msr*2, msr_bitmap + 0x000);
+        __clear_bit(msr*2+1, msr_bitmap + 0x000);
+    }
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        __clear_bit(msr*2, msr_bitmap + 0x800);
+        __clear_bit(msr*2+1, msr_bitmap + 0x800);
+    }
+    else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) )
+    {
+        msr &= 0x1fff;
+        __clear_bit(msr*2, msr_bitmap + 0x1000);
+        __clear_bit(msr*2+1, msr_bitmap + 0x1000);
+    }
+}
+
 struct vmcb_struct *alloc_vmcb(void);
 struct host_save_area *alloc_host_save_area(void);
 void free_vmcb(struct vmcb_struct *vmcb);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.