[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix writing to mmap'ed /dev/mem region mapped PROT_WRITE



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 7c2afbad0188ac64feb0f4122c7b262bc640c014
# Parent  cac138ea92841a1ea7bcfbcfb1a845f66878bdbe
Fix writing to mmap'ed /dev/mem region mapped PROT_WRITE
and MAP_PRIVATE. This is in fact a generic Linux bug.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r cac138ea9284 -r 7c2afbad0188 linux-2.6-xen-sparse/mm/memory.c
--- a/linux-2.6-xen-sparse/mm/memory.c  Fri Sep  2 14:20:12 2005
+++ b/linux-2.6-xen-sparse/mm/memory.c  Fri Sep  2 16:51:55 2005
@@ -1367,20 +1367,15 @@
        struct page *old_page, *new_page;
        unsigned long pfn = pte_pfn(pte);
        pte_t entry;
+       struct page invalid_page;
 
        if (unlikely(!pfn_valid(pfn))) {
-               /*
-                * This should really halt the system so it can be debugged or
-                * at least the kernel stops what it's doing before it corrupts
-                * data, but for the moment just pretend this is OOM.
-                */
-               pte_unmap(page_table);
-               printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
-                               address);
-               spin_unlock(&mm->page_table_lock);
-               return VM_FAULT_OOM;
-       }
-       old_page = pfn_to_page(pfn);
+               /* This can happen with /dev/mem (PROT_WRITE, MAP_PRIVATE). */
+               invalid_page.flags = (1<<PG_reserved) | (1<<PG_locked);
+               old_page = &invalid_page;
+       } else {
+               old_page = pfn_to_page(pfn);
+       }
 
        if (!TestSetPageLocked(old_page)) {
                int reuse = can_share_swap_page(old_page);
@@ -1416,7 +1411,13 @@
                new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
                if (!new_page)
                        goto no_new_page;
-               copy_user_highpage(new_page, old_page, address);
+               if (old_page == &invalid_page) {
+                       char *vto = kmap_atomic(new_page, KM_USER1);
+                       copy_page(vto, (void *)(address & PAGE_MASK));
+                       kunmap_atomic(vto, KM_USER1);
+               } else {
+                       copy_user_highpage(new_page, old_page, address);
+               }
        }
        /*
         * Re-check the pte - we dropped the lock

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.