[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Skip shadowing of guest PTE writes when known to be safe



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1166615697 0
# Node ID caa1987679bddcb330e87adf787936ab94162e8d
# Parent  f7a2cd8b0a8e03e94babc88c9c25fb5008f7a125
[XEN] Skip shadowing of guest PTE writes when known to be safe
That is, when the guest replaces a not-present pte with another one
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/multi.c   |   49 +++++++++++++++++++++++++++++++++++----
 xen/arch/x86/mm/shadow/private.h |    4 +++
 xen/include/asm-x86/shadow.h     |    5 ++-
 3 files changed, 51 insertions(+), 7 deletions(-)

diff -r f7a2cd8b0a8e -r caa1987679bd xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Dec 20 11:53:01 2006 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Dec 20 11:54:57 2006 +0000
@@ -3839,12 +3839,43 @@ static inline void * emulate_map_dest(st
     return NULL;
 }
 
+static int safe_not_to_verify_write(mfn_t gmfn, void *dst, void *src, 
+                                    int bytes)
+{
+#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
+    struct page_info *pg = mfn_to_page(gmfn);
+    if ( !(pg->shadow_flags & SHF_32) 
+         && bytes == 4 
+         && ((unsigned long)dst & 3) == 0 )
+    {
+        /* Not shadowed 32-bit: aligned 64-bit writes that leave the
+         * present bit unset are safe to ignore. */
+        if ( (*(u64*)src & _PAGE_PRESENT) == 0 
+             && (*(u64*)dst & _PAGE_PRESENT) == 0 )
+            return 1;
+    }
+    else if ( !(pg->shadow_flags & (SHF_PAE|SHF_64)) 
+              && bytes == 8 
+              && ((unsigned long)dst & 7) == 0 )
+    {
+        /* Not shadowed PAE/64-bit: aligned 32-bit writes that leave the
+         * present bit unset are safe to ignore. */
+        if ( (*(u32*)src & _PAGE_PRESENT) == 0 
+             && (*(u32*)dst & _PAGE_PRESENT) == 0 )
+            return 1;        
+    }
+#endif
+    return 0;
+}
+
+
 int
 sh_x86_emulate_write(struct vcpu *v, unsigned long vaddr, void *src,
                       u32 bytes, struct sh_emulate_ctxt *sh_ctxt)
 {
     mfn_t mfn;
     void *addr;
+    int skip;
 
     if ( vaddr & (bytes-1) )
         return X86EMUL_UNHANDLEABLE;
@@ -3855,8 +3886,9 @@ sh_x86_emulate_write(struct vcpu *v, uns
     if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
         return X86EMUL_PROPAGATE_FAULT;
 
+    skip = safe_not_to_verify_write(mfn, addr, src, bytes);
     memcpy(addr, src, bytes);
-    shadow_validate_guest_pt_write(v, mfn, addr, bytes);
+    if ( !skip ) shadow_validate_guest_pt_write(v, mfn, addr, bytes);
 
     /* If we are writing zeros to this page, might want to unshadow */
     if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
@@ -3875,7 +3907,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     mfn_t mfn;
     void *addr;
     unsigned long prev;
-    int rv = X86EMUL_CONTINUE;
+    int rv = X86EMUL_CONTINUE, skip;
 
     ASSERT(shadow_locked_by_me(v->domain));
     ASSERT(bytes <= sizeof(unsigned long));
@@ -3885,6 +3917,8 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
 
     if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
         return X86EMUL_PROPAGATE_FAULT;
+
+    skip = safe_not_to_verify_write(mfn, &new, &old, bytes);
 
     switch ( bytes )
     {
@@ -3898,7 +3932,9 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     }
 
     if ( prev == old )
-        shadow_validate_guest_pt_write(v, mfn, addr, bytes);
+    {
+        if ( !skip ) shadow_validate_guest_pt_write(v, mfn, addr, bytes);
+    }
     else
         rv = X86EMUL_CMPXCHG_FAILED;
 
@@ -3924,7 +3960,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     mfn_t mfn;
     void *addr;
     u64 old, new, prev;
-    int rv = X86EMUL_CONTINUE;
+    int rv = X86EMUL_CONTINUE, skip;
 
     ASSERT(shadow_locked_by_me(v->domain));
 
@@ -3936,10 +3972,13 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
 
     old = (((u64) old_hi) << 32) | (u64) old_lo;
     new = (((u64) new_hi) << 32) | (u64) new_lo;
+    skip = safe_not_to_verify_write(mfn, &new, &old, 8);
     prev = cmpxchg(((u64 *)addr), old, new);
 
     if ( prev == old )
-        shadow_validate_guest_pt_write(v, mfn, addr, 8);
+    {
+        if ( !skip ) shadow_validate_guest_pt_write(v, mfn, addr, 8);
+    }
     else
         rv = X86EMUL_CMPXCHG_FAILED;
 
diff -r f7a2cd8b0a8e -r caa1987679bd xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Wed Dec 20 11:53:01 2006 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Wed Dec 20 11:54:57 2006 +0000
@@ -249,6 +249,10 @@ static inline int sh_type_is_pinnable(st
 #define SHF_L3_64   (1u << SH_type_l3_64_shadow)
 #define SHF_L4_64   (1u << SH_type_l4_64_shadow)
 
+#define SHF_32  (SHF_L1_32|SHF_FL1_32|SHF_L2_32)
+#define SHF_PAE (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE|SHF_L2H_PAE)
+#define SHF_64  (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L3_64|SHF_L4_64)
+
 /* Used for hysteresis when automatically unhooking mappings on fork/exit */
 #define SHF_unhooked_mappings (1u<<31)
 
diff -r f7a2cd8b0a8e -r caa1987679bd xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Dec 20 11:53:01 2006 +0000
+++ b/xen/include/asm-x86/shadow.h      Wed Dec 20 11:54:57 2006 +0000
@@ -159,8 +159,9 @@ extern int shadow_audit_enable;
 #define SHOPT_FAST_FAULT_PATH     0x04  /* Fast-path MMIO and not-present */
 #define SHOPT_PREFETCH            0x08  /* Shadow multiple entries per fault */
 #define SHOPT_LINUX_L3_TOPLEVEL   0x10  /* Pin l3es on early 64bit linux */
-
-#define SHADOW_OPTIMIZATIONS      0x1f
+#define SHOPT_SKIP_VERIFY         0x20  /* Skip PTE v'fy when safe to do so */
+
+#define SHADOW_OPTIMIZATIONS      0x3f
 
 
 /* With shadow pagetables, the different kinds of address start 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.