[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.2-testing] vmx realmode: __hvm_copy() should not hvm_get_segment_register() when



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1203344078 0
# Node ID 61635a694ba3a58cef48b3b12eb14f490eae2c35
# Parent  9f835c84d2be1bf7aca8b6db0bc9cc32e65b7817
vmx realmode: __hvm_copy() should not hvm_get_segment_register() when
we are emulating. Firstly it is bogus, since VMCS segment state is
stale in this context. Secondly, real mode and real->protected
contexts are rather unlikely tohappen with SS.DPL == 3.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   17040:ec1fa84147ad8c72018f976c953341a5fb657eac
xen-unstable date:        Wed Feb 13 16:35:51 2008 +0000
---
 xen/arch/x86/hvm/hvm.c |   23 +++++++++++++++++------
 1 files changed, 17 insertions(+), 6 deletions(-)

diff -r 9f835c84d2be -r 61635a694ba3 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Feb 18 14:14:17 2008 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Mon Feb 18 14:14:38 2008 +0000
@@ -1272,6 +1272,7 @@ static enum hvm_copy_result __hvm_copy(
 static enum hvm_copy_result __hvm_copy(
     void *buf, paddr_t addr, int size, int dir, int virt, int fetch)
 {
+    struct vcpu *curr = current;
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     char *p;
@@ -1280,12 +1281,22 @@ static enum hvm_copy_result __hvm_copy(
 
     if ( virt )
     {
-        struct segment_register sreg;
-        hvm_get_segment_register(current, x86_seg_ss, &sreg);
-        if ( sreg.attr.fields.dpl == 3 )
-            pfec |= PFEC_user_mode;
+        /*
+         * We cannot use hvm_get_segment_register() while executing in
+         * vmx_realmode() as segment register state is cached. Furthermore,
+         * VMREADs on every data access hurts emulation performance.
+         */
+        if ( !curr->arch.hvm_vmx.vmxemul )
+        {
+            struct segment_register sreg;
+            hvm_get_segment_register(curr, x86_seg_ss, &sreg);
+            if ( sreg.attr.fields.dpl == 3 )
+                pfec |= PFEC_user_mode;
+        }
+
         if ( dir ) 
             pfec |= PFEC_write_access;
+
         if ( fetch ) 
             pfec |= PFEC_insn_fetch;
     }
@@ -1297,7 +1308,7 @@ static enum hvm_copy_result __hvm_copy(
 
         if ( virt )
         {
-            gfn = paging_gva_to_gfn(current, addr, &pfec);
+            gfn = paging_gva_to_gfn(curr, addr, &pfec);
             if ( gfn == INVALID_GFN )
             {
                 if ( virt == 2 ) /* 2 means generate a fault */
@@ -1321,7 +1332,7 @@ static enum hvm_copy_result __hvm_copy(
         if ( dir )
         {
             memcpy(p, buf, count); /* dir == TRUE:  *to* guest */
-            paging_mark_dirty(current->domain, mfn);
+            paging_mark_dirty(curr->domain, mfn);
         }
         else
             memcpy(buf, p, count); /* dir == FALSE: *from guest */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.