[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: ptwr adjustments



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1179220322 -3600
# Node ID 807f374e720d57210e60ef4611e8abe22f49e043
# Parent  b2adb797900ad15e884de72782efb7e0bf12d58b
x86: ptwr adjustments

Make sure MFN read from pte is valid before accessing the page info
structure associated with it. Drop guest-to-machine-physical
translation from ptwr code.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/mm.c |   19 ++++++++-----------
 1 files changed, 8 insertions(+), 11 deletions(-)

diff -r b2adb797900a -r 807f374e720d xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue May 15 10:08:20 2007 +0100
+++ b/xen/arch/x86/mm.c Tue May 15 10:12:02 2007 +0100
@@ -3238,13 +3238,14 @@ static int ptwr_emulated_update(
 
     /* We are looking only for read-only mappings of p.t. pages. */
     ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT);
+    ASSERT(mfn_valid(mfn));
     ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table);
     ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0);
     ASSERT(page_get_owner(page) == d);
 
     /* Check the new PTE. */
     nl1e = l1e_from_intpte(val);
-    if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
+    if ( unlikely(!get_page_from_l1e(nl1e, d)) )
     {
         if ( (CONFIG_PAGING_LEVELS >= 3) && is_pv_32bit_domain(d) &&
              (bytes == 4) && (addr & 4) && !do_cmpxchg &&
@@ -3270,7 +3271,7 @@ static int ptwr_emulated_update(
     adjust_guest_l1e(nl1e, d);
 
     /* Checked successfully: do the update (write or cmpxchg). */
-    pl1e = map_domain_page(page_to_mfn(page));
+    pl1e = map_domain_page(mfn);
     pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
@@ -3285,21 +3286,21 @@ static int ptwr_emulated_update(
         if ( !okay )
         {
             unmap_domain_page(pl1e);
-            put_page_from_l1e(gl1e_to_ml1e(d, nl1e), d);
+            put_page_from_l1e(nl1e, d);
             return X86EMUL_CMPXCHG_FAILED;
         }
     }
     else
     {
         ol1e = *pl1e;
-        if ( !UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, page_to_mfn(page), v) )
+        if ( !UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, mfn, v) )
             BUG();
     }
 
     unmap_domain_page(pl1e);
 
     /* Finally, drop the old PTE. */
-    put_page_from_l1e(gl1e_to_ml1e(d, ol1e), d);
+    put_page_from_l1e(ol1e, d);
 
     return X86EMUL_OKAY;
 }
@@ -3365,17 +3366,13 @@ int ptwr_do_page_fault(struct vcpu *v, u
 
     LOCK_BIGLOCK(d);
 
-    /*
-     * Attempt to read the PTE that maps the VA being accessed. By checking for
-     * PDE validity in the L2 we avoid many expensive fixups in __get_user().
-     */
+    /* Attempt to read the PTE that maps the VA being accessed. */
     guest_get_eff_l1e(v, addr, &pte);
-    if ( !(l1e_get_flags(pte) & _PAGE_PRESENT) )
-        goto bail;
     page = l1e_get_page(pte);
 
     /* We are looking only for read-only mappings of p.t. pages. */
     if ( ((l1e_get_flags(pte) & (_PAGE_PRESENT|_PAGE_RW)) != _PAGE_PRESENT) ||
+         !mfn_valid(l1e_get_pfn(pte)) ||
          ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
          ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
          (page_get_owner(page) != d) )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.