[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86 svm: Make 32bit legacy guests boot again



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1246095213 -3600
# Node ID 49ae55a9c4429ea2ee34e7e723b99f7ad024a7b7
# Parent  43523102a8e9093acef4be9da8ec4ff4928ccea8
x86 svm: Make 32bit legacy guests boot again

Attached patch fixes a bug introduced in c/s 19648.

32bit legacy guests have the sysenter/sysexit instructions available.
Therefore, we have to disable intercepts for the sysenter MSRs or the
guest stucks in an infinite loop of #GPs, otherwise.

For guests in 64bit mode and 32bit compat mode, sysenter/sysexit
instructions aren't available. The sysenter MSRs have to be
intercepted to make the instruction emulation working.

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         |   11 +++++++++-
 xen/arch/x86/hvm/svm/vmcb.c        |   40 +++++++++++++++++++++----------------
 xen/include/asm-x86/hvm/svm/vmcb.h |    4 ++-
 3 files changed, 36 insertions(+), 19 deletions(-)

diff -r 43523102a8e9 -r 49ae55a9c442 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Sat Jun 27 10:02:52 2009 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Sat Jun 27 10:33:33 2009 +0100
@@ -452,10 +452,19 @@ static void svm_update_guest_efer(struct
 static void svm_update_guest_efer(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    bool_t lma = v->arch.hvm_vcpu.guest_efer & EFER_LMA;
 
     vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
-    if ( vmcb->efer & EFER_LMA )
+    if ( lma )
         vmcb->efer |= EFER_LME;
+
+    /*
+     * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with
+     * no need for MSR intercepts. Ehen EFER.LMA=1 we must trap and emulate.
+     */
+    svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma);
+    svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma);
+    svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
 }
 
 static void svm_flush_guest_tlbs(void)
diff -r 43523102a8e9 -r 49ae55a9c442 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Sat Jun 27 10:02:52 2009 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Sat Jun 27 10:33:33 2009 +0100
@@ -78,29 +78,34 @@ struct host_save_area *alloc_host_save_a
     return hsa;
 }
 
-void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
+void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable)
 {
     unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm;
+    unsigned long *msr_bit = NULL;
 
     /*
      * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
      */
     if ( msr <= 0x1fff )
-    {
-        __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG); 
-        __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG); 
-    }
+        msr_bit = msr_bitmap + 0x0000 / BYTES_PER_LONG;
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
-    {
-        msr &= 0x1fff;
-        __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG);
-        __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG);
-    } 
-    else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) )
-    {
-        msr &= 0x1fff;
-        __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG);
-        __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG);
+        msr_bit = msr_bitmap + 0x0800 / BYTES_PER_LONG;
+    else if ( (msr >= 0xc0010000) && (msr <= 0xc0011fff) )
+        msr_bit = msr_bitmap + 0x1000 / BYTES_PER_LONG;
+
+    BUG_ON(msr_bit == NULL);
+
+    msr &= 0x1fff;
+
+    if ( enable )
+    {
+        __set_bit(msr * 2, msr_bit);
+        __set_bit(msr * 2 + 1, msr_bit);
+    }
+    else
+    {
+        __clear_bit(msr * 2, msr_bit);
+        __clear_bit(msr * 2 + 1, msr_bit);
     }
 }
 
@@ -165,8 +170,9 @@ static int construct_vmcb(struct vcpu *v
     if ( opt_softtsc )
         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC;
 
-    /* Guest EFER: *must* contain SVME or VMRUN will fail. */
-    vmcb->efer = EFER_SVME;
+    /* Guest EFER. */
+    v->arch.hvm_vcpu.guest_efer = 0;
+    hvm_update_guest_efer(v);
 
     /* Guest segment limits. */
     vmcb->cs.limit = ~0u;
diff -r 43523102a8e9 -r 49ae55a9c442 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Sat Jun 27 10:02:52 2009 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Sat Jun 27 10:33:33 2009 +0100
@@ -481,7 +481,9 @@ void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
+#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 0)
+#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 1)
 
 #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.