[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] AMD_LWP: add interrupt support for AMD LWP



# HG changeset patch
# User Wei Huang <wei.huang2@xxxxxxx>
# Date 1333137954 -3600
# Node ID 1088c8557a46ab28e509bb9482e2a73a21590df8
# Parent  6765a2510ee7ad899dcb87eefdf206f8c8ae34ae
AMD_LWP: add interrupt support for AMD LWP

This patch adds interrupt support for AMD lightweight profiling. It
registers interrupt handler using alloc_direct_apic_vector(). When
notified, SVM reinjects virtual interrupts into guest VM using
guest's virtual local APIC.

Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 6765a2510ee7 -r 1088c8557a46 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Mar 30 10:01:15 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Mar 30 21:05:54 2012 +0100
@@ -745,6 +745,17 @@ static void svm_init_hypercall_page(stru
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
+static void svm_lwp_interrupt(struct cpu_user_regs *regs)
+{
+    struct vcpu *curr = current;
+
+    ack_APIC_irq();
+    vlapic_set_irq(
+        vcpu_vlapic(curr),
+        (curr->arch.hvm_svm.guest_lwp_cfg >> 40) && 0xff,
+        0);
+}
+
 static inline void svm_lwp_save(struct vcpu *v)
 {
     /* Don't mess up with other guests. Disable LWP for next VCPU. */
@@ -759,7 +770,7 @@ static inline void svm_lwp_load(struct v
 {
     /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */
    if ( v->arch.hvm_svm.guest_lwp_cfg ) 
-       wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
+       wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg);
 }
 
 /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
@@ -767,7 +778,8 @@ static int svm_update_lwp_cfg(struct vcp
 {
     unsigned int eax, ebx, ecx, edx;
     uint32_t msr_low;
-    
+    static uint8_t lwp_intr_vector;
+
     if ( xsave_enabled(v) && cpu_has_lwp )
     {
         hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
@@ -776,11 +788,23 @@ static int svm_update_lwp_cfg(struct vcp
         /* generate #GP if guest tries to turn on unsupported features. */
         if ( msr_low & ~edx)
             return -1;
+
+        v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+
+        /* setup interrupt handler if needed */
+        if ( (msr_content & 0x80000000) && ((msr_content >> 40) & 0xff) )
+        {
+            alloc_direct_apic_vector(&lwp_intr_vector, svm_lwp_interrupt);
+            v->arch.hvm_svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL)
+                | ((uint64_t)lwp_intr_vector << 40);
+        }
+        else
+        {
+            /* otherwise disable it */
+            v->arch.hvm_svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL;
+        }
         
-        wrmsrl(MSR_AMD64_LWP_CFG, msr_content);
-        /* CPU might automatically correct reserved bits. So read it back. */
-        rdmsrl(MSR_AMD64_LWP_CFG, msr_content);
-        v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+        wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg);
 
         /* track nonalzy state if LWP_CFG is non-zero. */
         v->arch.nonlazy_xstate_used = !!(msr_content);
diff -r 6765a2510ee7 -r 1088c8557a46 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Fri Mar 30 10:01:15 2012 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Fri Mar 30 21:05:54 2012 +0100
@@ -515,7 +515,8 @@ struct arch_svm_struct {
     uint64_t guest_sysenter_eip;
     
     /* AMD lightweight profiling MSR */
-    uint64_t guest_lwp_cfg;
+    uint64_t guest_lwp_cfg;      /* guest version */
+    uint64_t cpu_lwp_cfg;        /* CPU version */
 
     /* OSVW MSRs */
     struct {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.