[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Clean up RDTSCP/TSC_AUX handling.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261002398 0
# Node ID 7c85a4aa17fe8cb3ee5ad00ed02fbc85a70f1728
# Parent  3c5b5c4c1d79d85ecad8e7761dc380169c87af84
hvm: Clean up RDTSCP/TSC_AUX handling.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c             |   16 +++++++-
 xen/arch/x86/hvm/vmx/vmcs.c        |    6 ---
 xen/arch/x86/hvm/vmx/vmx.c         |   74 ++-----------------------------------
 xen/include/asm-x86/hvm/vcpu.h     |    2 +
 xen/include/asm-x86/hvm/vmx/vmcs.h |    1 
 xen/include/asm-x86/msr.h          |    2 -
 6 files changed, 23 insertions(+), 78 deletions(-)

diff -r 3c5b5c4c1d79 -r 7c85a4aa17fe xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Dec 16 22:26:15 2009 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Wed Dec 16 22:26:38 2009 +0000
@@ -478,6 +478,8 @@ static int hvm_save_cpu_ctxt(struct doma
         /* Architecture-specific vmcs/vmcb bits */
         hvm_funcs.save_cpu_ctxt(v, &ctxt);
 
+        ctxt.msr_tsc_aux = v->arch.hvm_vcpu.msr_tsc_aux;
+
         hvm_get_segment_register(v, x86_seg_idtr, &seg);
         ctxt.idtr_limit = seg.limit;
         ctxt.idtr_base = seg.base;
@@ -652,6 +654,8 @@ static int hvm_load_cpu_ctxt(struct doma
     /* Architecture-specific vmcs/vmcb bits */
     if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
         return -EINVAL;
+
+    v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
 
     seg.limit = ctxt.idtr_limit;
     seg.base = ctxt.idtr_base;
@@ -1929,6 +1933,10 @@ int hvm_msr_read_intercept(struct cpu_us
         msr_content = hvm_get_guest_tsc(v);
         break;
 
+    case MSR_TSC_AUX:
+        msr_content = v->arch.hvm_vcpu.msr_tsc_aux;
+        break;
+
     case MSR_IA32_APICBASE:
         msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
         break;
@@ -2017,8 +2025,14 @@ int hvm_msr_write_intercept(struct cpu_u
 
     switch ( ecx )
     {
-     case MSR_IA32_TSC:
+    case MSR_IA32_TSC:
         hvm_set_guest_tsc(v, msr_content);
+        break;
+
+    case MSR_TSC_AUX:
+        v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
+        if ( cpu_has_rdtscp )
+            wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content);
         break;
 
     case MSR_IA32_APICBASE:
diff -r 3c5b5c4c1d79 -r 7c85a4aa17fe xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Dec 16 22:26:15 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Dec 16 22:26:38 2009 +0000
@@ -156,6 +156,7 @@ static void vmx_init_vmcs_config(void)
         opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_ENABLE_EPT |
+               SECONDARY_EXEC_ENABLE_RDTSCP |
                SECONDARY_EXEC_PAUSE_LOOP_EXITING);
         if ( opt_vpid_enabled )
             opt |= SECONDARY_EXEC_ENABLE_VPID;
@@ -593,11 +594,6 @@ static int construct_vmcs(struct vcpu *v
         __vmwrite(PLE_GAP, ple_gap);
         __vmwrite(PLE_WINDOW, ple_window);
     }
-
-#ifdef __x86_64__
-    if ( cpu_has_rdtscp )
-        v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_RDTSCP;
-#endif
 
     if ( cpu_has_vmx_secondary_exec_control )
         __vmwrite(SECONDARY_VM_EXEC_CONTROL,
diff -r 3c5b5c4c1d79 -r 7c85a4aa17fe xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Dec 16 22:26:15 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Dec 16 22:26:38 2009 +0000
@@ -148,19 +148,8 @@ static void vmx_save_host_msrs(void)
     struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
     int i;
 
-    /*
-     * If new MSR is needed to add into msr_index[] and VMX_INDEX_MSR_*** enum,
-     * please note that elements in msr_index[] and VMX_INDEX_MSR_*** enum
-     * are not the same. Currently we only save three MSRs(MSR_LSTAR, MSR_STAR,
-     * and MSR_SYSCALL_MASK into host state. 
-     */
-    BUILD_BUG_ON(MSR_INDEX_SIZE != VMX_INDEX_MSR_TSC_AUX ||
-                 VMX_INDEX_MSR_TSC_AUX != VMX_MSR_COUNT - 1);
     for ( i = 0; i < MSR_INDEX_SIZE; i++ )
         rdmsrl(msr_index[i], host_msr_state->msrs[i]);
-
-    if ( cpu_has_rdtscp )
-        rdmsrl(MSR_TSC_AUX, host_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
 }
 
 #define WRITE_MSR(address)                                              \
@@ -211,21 +200,6 @@ static enum handler_return long_mode_do_
         msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
         break;
 
-    case MSR_TSC_AUX:
-        if ( cpu_has_rdtscp ) 
-        {
-            msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX];
-            break;
-        }
-        else
-        {
-            HVM_DBG_LOG(DBG_LEVEL_0, "Reading from nonexistence msr 0x%x\n",
-                        ecx);
-            vmx_inject_hw_exception(TRAP_gp_fault, 0);
-            return HNDL_exception_raised;
-        }
-            
-
     default:
         return HNDL_unhandled;
     }
@@ -286,20 +260,6 @@ static enum handler_return long_mode_do_
 
     case MSR_SYSCALL_MASK:
         WRITE_MSR(SYSCALL_MASK);
-
-    case MSR_TSC_AUX:
-        if ( cpu_has_rdtscp )
-        {
-            struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
-            guest_state->msrs[VMX_INDEX_MSR_TSC_AUX] = msr_content;
-            wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content);
-        }
-        else
-        {
-            HVM_DBG_LOG(DBG_LEVEL_0, "Writing to nonexistence msr 0x%x\n", 
ecx);
-            vmx_inject_hw_exception(TRAP_gp_fault, 0);
-            return HNDL_exception_raised;
-        }
 
     default:
         return HNDL_unhandled;
@@ -331,22 +291,15 @@ static void vmx_restore_host_msrs(void)
         wrmsrl(msr_index[i], host_msr_state->msrs[i]);
         clear_bit(i, &host_msr_state->flags);
     }
-
-    if ( cpu_has_rdtscp )
-        wrmsrl(MSR_TSC_AUX,
-               (uint32_t)host_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
 }
 
 static void vmx_save_guest_msrs(struct vcpu *v)
 {
-    struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
     /*
      * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
      * be updated at any time via SWAPGS, which we cannot trap.
      */
     rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
-    if ( cpu_has_rdtscp )
-        rdmsrl(MSR_TSC_AUX, guest_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
 }
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
@@ -384,8 +337,7 @@ static void vmx_restore_guest_msrs(struc
     }
 
     if ( cpu_has_rdtscp )
-        wrmsrl(MSR_TSC_AUX,
-               (uint32_t)guest_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
+        wrmsrl(MSR_TSC_AUX, v->arch.hvm_vcpu.msr_tsc_aux);
 }
 
 #else  /* __i386__ */
@@ -627,10 +579,6 @@ static void vmx_save_cpu_state(struct vc
     data->msr_lstar        = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
     data->msr_star         = guest_state->msrs[VMX_INDEX_MSR_STAR];
     data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
-    if ( cpu_has_rdtscp )
-        data->msr_tsc_aux = guest_state->msrs[VMX_INDEX_MSR_TSC_AUX];
-    else
-        data->msr_tsc_aux = 0;
 #endif
 
     data->tsc = hvm_get_guest_tsc(v);
@@ -649,10 +597,6 @@ static void vmx_load_cpu_state(struct vc
 
     v->arch.hvm_vmx.cstar     = data->msr_cstar;
     v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
-    if ( cpu_has_rdtscp )
-        guest_state->msrs[VMX_INDEX_MSR_TSC_AUX] = data->msr_tsc_aux;
-    else
-        guest_state->msrs[VMX_INDEX_MSR_TSC_AUX] = 0;
 #endif
 
     hvm_set_guest_tsc(v, data->tsc);
@@ -2545,29 +2489,19 @@ asmlinkage void vmx_vmexit_handler(struc
         hvm_hlt(regs->eflags);
         break;
     case EXIT_REASON_INVLPG:
-    {
         inst_len = __get_instruction_length(); /* Safe: INVLPG */
         __update_guest_eip(inst_len);
         exit_qualification = __vmread(EXIT_QUALIFICATION);
         vmx_invlpg_intercept(exit_qualification);
         break;
-    }
+    case EXIT_REASON_RDTSCP:
+        regs->ecx = v->arch.hvm_vcpu.msr_tsc_aux;
+        /* fall through */
     case EXIT_REASON_RDTSC:
         inst_len = __get_instruction_length();
         __update_guest_eip(inst_len);
         hvm_rdtsc_intercept(regs);
         break;
-#ifdef __x86_64__
-    case EXIT_REASON_RDTSCP:
-    {
-        struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
-        inst_len = __get_instruction_length();
-        __update_guest_eip(inst_len);
-        hvm_rdtsc_intercept(regs);
-        regs->ecx = (uint32_t)(guest_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
-        break;
-    }
-#endif
     case EXIT_REASON_VMCALL:
     {
         int rc;
diff -r 3c5b5c4c1d79 -r 7c85a4aa17fe xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Wed Dec 16 22:26:15 2009 +0000
+++ b/xen/include/asm-x86/hvm/vcpu.h    Wed Dec 16 22:26:38 2009 +0000
@@ -73,6 +73,8 @@ struct hvm_vcpu {
     u64                 asid_generation;
     u32                 asid;
 
+    u32                 msr_tsc_aux;
+
     union {
         struct arch_vmx_struct vmx;
         struct arch_svm_struct svm;
diff -r 3c5b5c4c1d79 -r 7c85a4aa17fe xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Dec 16 22:26:15 2009 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Dec 16 22:26:38 2009 +0000
@@ -44,7 +44,6 @@ enum {
     VMX_INDEX_MSR_LSTAR = 0,
     VMX_INDEX_MSR_STAR,
     VMX_INDEX_MSR_SYSCALL_MASK,
-    VMX_INDEX_MSR_TSC_AUX,
 
     VMX_MSR_COUNT
 };
diff -r 3c5b5c4c1d79 -r 7c85a4aa17fe xen/include/asm-x86/msr.h
--- a/xen/include/asm-x86/msr.h Wed Dec 16 22:26:15 2009 +0000
+++ b/xen/include/asm-x86/msr.h Wed Dec 16 22:26:38 2009 +0000
@@ -84,7 +84,7 @@ static inline void wrmsrl(unsigned int m
 
 #define write_tsc(val) wrmsrl(MSR_IA32_TSC, val)
 
-#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
+#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
 
 #define rdpmc(counter,low,high) \
      __asm__ __volatile__("rdpmc" \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.