[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/7] Update sparse tree with "read_zero_pagealigned() locking fix" patch from 2.6.18.8 tree



Update sparse tree with "read_zero_pagealigned() locking fix" patch from 
2.6.18.8 tree [1] 

[1] 
http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.18.y.git;a=commit;h=dbee2bf2f312a9d18fa3f305adc14e2ee58f65df

Signed-off-by: S.ÃaÄlar Onur <caglar@xxxxxxxxxxxxx>

--- a/linux-2.6-xen-sparse/drivers/char/mem.c
+++ b/linux-2.6-xen-sparse/drivers/char/mem.c
@@ -618,7 +618,8 @@ static inline size_t read_zero_pagealign
                        count = size;
 
                zap_page_range(vma, addr, count, NULL);
-               zeromap_page_range(vma, addr, count, PAGE_COPY);
+               if (zeromap_page_range(vma, addr, count, PAGE_COPY))
+                       break;
 
                size -= count;
                buf += count;
@@ -685,11 +686,14 @@ out:
 
 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
 {
+       int err;
+
        if (vma->vm_flags & VM_SHARED)
                return shmem_zero_setup(vma);
-       if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 
vma->vm_page_prot))
-               return -EAGAIN;
-       return 0;
+       err = zeromap_page_range(vma, vma->vm_start,
+                       vma->vm_end - vma->vm_start, vma->vm_page_prot);
+       BUG_ON(err == -EEXIST);
+       return err;
 }
 #else /* CONFIG_MMU */
 static ssize_t read_zero(struct file * file, char * buf, 
--- a/linux-2.6-xen-sparse/mm/memory.c
+++ b/linux-2.6-xen-sparse/mm/memory.c
@@ -1131,21 +1131,27 @@ static int zeromap_pte_range(struct mm_s
 {
        pte_t *pte;
        spinlock_t *ptl;
+       int err = 0;
 
        pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
-               return -ENOMEM;
+               return -EAGAIN;
        do {
                struct page *page = ZERO_PAGE(addr);
                pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
+
+               if (unlikely(!pte_none(*pte))) {
+                       err = -EEXIST;
+                       pte++;
+                       break;
+               }
                page_cache_get(page);
                page_add_file_rmap(page);
                inc_mm_counter(mm, file_rss);
-               BUG_ON(!pte_none(*pte));
                set_pte_at(mm, addr, pte, zero_pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
        pte_unmap_unlock(pte - 1, ptl);
-       return 0;
+       return err;
 }
 
 static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1153,16 +1159,18 @@ static inline int zeromap_pmd_range(stru
 {
        pmd_t *pmd;
        unsigned long next;
+       int err;
 
        pmd = pmd_alloc(mm, pud, addr);
        if (!pmd)
-               return -ENOMEM;
+               return -EAGAIN;
        do {
                next = pmd_addr_end(addr, end);
-               if (zeromap_pte_range(mm, pmd, addr, next, prot))
-                       return -ENOMEM;
+               err = zeromap_pte_range(mm, pmd, addr, next, prot);
+               if (err)
+                       break;
        } while (pmd++, addr = next, addr != end);
-       return 0;
+       return err;
 }
 
 static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
@@ -1170,16 +1178,18 @@ static inline int zeromap_pud_range(stru
 {
        pud_t *pud;
        unsigned long next;
+       int err;
 
        pud = pud_alloc(mm, pgd, addr);
        if (!pud)
-               return -ENOMEM;
+               return -EAGAIN;
        do {
                next = pud_addr_end(addr, end);
-               if (zeromap_pmd_range(mm, pud, addr, next, prot))
-                       return -ENOMEM;
+               err = zeromap_pmd_range(mm, pud, addr, next, prot);
+               if (err)
+                       break;
        } while (pud++, addr = next, addr != end);
-       return 0;
+       return err;
 }
 
 int zeromap_page_range(struct vm_area_struct *vma,
@@ -1674,7 +1684,14 @@ gotten:
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                lazy_mmu_prot_update(entry);
-               ptep_establish(vma, address, page_table, entry);
+               /*
+                * Clear the pte entry and flush it first, before updating the
+                * pte with the new entry. This will avoid a race condition
+                * seen in the presence of one thread doing SMC and another
+                * thread doing COW.
+                */
+               ptep_clear_flush(vma, address, page_table);
+               set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                lru_cache_add_active(new_page);
                page_add_new_anon_rmap(new_page, vma, address);

-- 
S.ÃaÄlar Onur <caglar@xxxxxxxxxxxxx>
http://cekirdek.pardus.org.tr/~caglar/

Linux is like living in a teepee. No Windows, no Gates and an Apache in house!

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.