[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] fix VM_FOREIGN users after c/s 878:eba6fe6d8d53


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Mon, 14 May 2012 16:29:46 +0000
  • Delivery-date: Mon, 14 May 2012 16:29:51 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1336749197 -7200
# Node ID 456451e35eca858a0ec0ae6d47febcb7038e0493
# Parent  7ba94caa95963bacd3557d276ea3362e566fb308
fix VM_FOREIGN users after c/s 878:eba6fe6d8d53

The level of indirection got increased by the blktap2 changes, yet
existing users were not properly updated. While c/s 901:9242c5b965c1
did so for a specific case in blktap, this was incomplete and left out
gntdev altogether.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 7ba94caa9596 -r 456451e35eca drivers/xen/blktap/blktap.c
--- a/drivers/xen/blktap/blktap.c       Tue May 08 13:28:08 2012 +0200
+++ b/drivers/xen/blktap/blktap.c       Fri May 11 17:13:17 2012 +0200
@@ -120,7 +120,7 @@ typedef struct tap_blkif {
                                        [req id, idx] tuple                  */
        blkif_t *blkif;               /*Associate blkif with tapdev          */
        struct domid_translate_ext trans; /*Translation from domid to bus.   */
-       struct vm_foreign_map foreign_map;    /*Mapping page */
+       struct vm_foreign_map *foreign_maps; /*Mapping pages                 */
 } tap_blkif_t;
 
 static struct tap_blkif *tapfds[MAX_TAP_DEV];
@@ -329,7 +329,7 @@ static pte_t blktap_clear_pte(struct vm_
 
        pg = idx_to_page(mmap_idx, pending_idx, seg);
        ClearPageReserved(pg);
-       info->foreign_map.map[offset + RING_PAGES] = NULL;
+       info->foreign_maps->map[offset + RING_PAGES] = NULL;
 
        khandle = &pending_handle(mmap_idx, pending_idx, seg);
 
@@ -377,12 +377,17 @@ static pte_t blktap_clear_pte(struct vm_
 static void blktap_vma_open(struct vm_area_struct *vma)
 {
        tap_blkif_t *info;
+       unsigned long idx;
+       struct vm_foreign_map *foreign_map;
+
        if (vma->vm_file == NULL)
                return;
 
        info = vma->vm_file->private_data;
-       vma->vm_private_data =
-               &info->foreign_map.map[(vma->vm_start - info->rings_vstart) >> 
PAGE_SHIFT];
+       idx = (vma->vm_start - info->rings_vstart) >> PAGE_SHIFT;
+       foreign_map = &info->foreign_maps[idx];
+       foreign_map->map = &info->foreign_maps->map[idx];
+       vma->vm_private_data = foreign_map;
 }
 
 /* tricky part
@@ -392,7 +397,6 @@ static void blktap_vma_open(struct vm_ar
  */
 static void blktap_vma_close(struct vm_area_struct *vma)
 {
-       tap_blkif_t *info;
        struct vm_area_struct *next = vma->vm_next;
 
        if (next == NULL ||
@@ -402,9 +406,7 @@ static void blktap_vma_close(struct vm_a
            vma->vm_file != next->vm_file)
                return;
 
-       info = vma->vm_file->private_data;
-       next->vm_private_data =
-               &info->foreign_map.map[(next->vm_start - info->rings_vstart) >> 
PAGE_SHIFT];
+       blktap_vma_open(next);
 }
 
 static struct vm_operations_struct blktap_vm_ops = {
@@ -640,8 +642,9 @@ static int blktap_release(struct inode *
        mm = xchg(&info->mm, NULL);
        if (mm)
                mmput(mm);
-       kfree(info->foreign_map.map);
-       info->foreign_map.map = NULL;
+       kfree(info->foreign_maps->map);
+       kfree(info->foreign_maps);
+       info->foreign_maps = NULL;
 
        /* Free the ring page. */
        ClearPageReserved(virt_to_page(info->ufe_ring.sring));
@@ -730,14 +733,19 @@ static int blktap_mmap(struct file *filp
        }
 
        /* Mark this VM as containing foreign pages, and set up mappings. */
-       info->foreign_map.map = kzalloc(((vma->vm_end - vma->vm_start) >> 
PAGE_SHIFT) *
-                           sizeof(*info->foreign_map.map), GFP_KERNEL);
-       if (info->foreign_map.map == NULL) {
+       info->foreign_maps = kcalloc(size, sizeof(*info->foreign_maps),
+                                    GFP_KERNEL);
+       if (info->foreign_maps)
+               info->foreign_maps->map =
+                       kcalloc(size, sizeof(*info->foreign_maps->map),
+                               GFP_KERNEL);
+       if (!info->foreign_maps || !info->foreign_maps->map) {
+               kfree(info->foreign_maps);
                WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
                goto fail;
        }
 
-       vma->vm_private_data = &info->foreign_map;
+       vma->vm_private_data = info->foreign_maps;
        vma->vm_flags |= VM_FOREIGN;
        vma->vm_flags |= VM_DONTCOPY;
 
@@ -1242,7 +1250,7 @@ static int blktap_read_ufe_ring(tap_blki
                        pg = idx_to_page(mmap_idx, pending_idx, j);
                        ClearPageReserved(pg);
                        offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
-                       info->foreign_map.map[offset] = NULL;
+                       info->foreign_maps->map[offset] = NULL;
                }
                fast_flush_area(pending_req, pending_idx, usr_idx, info);
                make_response(blkif, pending_req->id, res.operation,
@@ -1530,7 +1538,7 @@ static void dispatch_rw_block_io(blkif_t
                                            FOREIGN_FRAME(map[i].dev_bus_addr
                                                          >> PAGE_SHIFT));
                        offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
-                       info->foreign_map.map[offset] = pg;
+                       info->foreign_maps->map[offset] = pg;
                }
        } else {
                for (i = 0; i < nseg; i++) {
@@ -1556,7 +1564,7 @@ static void dispatch_rw_block_io(blkif_t
 
                        offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
                        pg = idx_to_page(mmap_idx, pending_idx, i);
-                       info->foreign_map.map[offset] = pg;
+                       info->foreign_maps->map[offset] = pg;
                }
        }
 
diff -r 7ba94caa9596 -r 456451e35eca drivers/xen/gntdev/gntdev.c
--- a/drivers/xen/gntdev/gntdev.c       Tue May 08 13:28:08 2012 +0200
+++ b/drivers/xen/gntdev/gntdev.c       Fri May 11 17:13:17 2012 +0200
@@ -459,6 +459,7 @@ static int gntdev_mmap (struct file *fli
        int i;
        struct page *page;
        gntdev_file_private_data_t *private_data = flip->private_data;
+       struct vm_foreign_map *foreign_map;
 
        if (unlikely(!private_data)) {
                printk(KERN_ERR "File's private data is NULL.\n");
@@ -487,6 +488,14 @@ static int gntdev_mmap (struct file *fli
                return -EINVAL;
        }
 
+       foreign_map = kmalloc(sizeof(*foreign_map), GFP_KERNEL);
+       if (!foreign_map) {
+               printk(KERN_ERR "Couldn't allocate mapping structure for VM "
+                      "area.\n");
+               return -ENOMEM;
+       }
+       foreign_map->map = &private_data->foreign_pages[slot_index];
+
        /* Slots must be in the NOT_YET_MAPPED state. */
        down_write(&private_data->grants_sem);
        for (i = 0; i < size; ++i) {
@@ -496,6 +505,7 @@ static int gntdev_mmap (struct file *fli
                               "state (%d).\n", slot_index + i, 
                               private_data->grants[slot_index + i].state);
                        up_write(&private_data->grants_sem);
+                       kfree(foreign_map);
                        return -EINVAL;
                }
        }
@@ -504,14 +514,8 @@ static int gntdev_mmap (struct file *fli
        vma->vm_ops = &gntdev_vmops;
     
        /* The VM area contains pages from another VM. */
+       vma->vm_private_data = foreign_map;
        vma->vm_flags |= VM_FOREIGN;
-       vma->vm_private_data = kzalloc(size * sizeof(struct page *),
-                                      GFP_KERNEL);
-       if (vma->vm_private_data == NULL) {
-               printk(KERN_ERR "Couldn't allocate mapping structure for VM "
-                      "area.\n");
-               return -ENOMEM;
-       }
 
        /* This flag prevents Bad PTE errors when the memory is unmapped. */
        vma->vm_flags |= VM_RESERVED;
@@ -567,11 +571,6 @@ static int gntdev_mmap (struct file *fli
                        goto undo_map_out;
                }
 
-               /* Store a reference to the page that will be mapped into user
-                * space.
-                */
-               ((struct page **) vma->vm_private_data)[i] = page;
-
                /* Mark mapped page as reserved. */
                SetPageReserved(page);
 
@@ -676,7 +675,8 @@ undo_map_out:
         * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
         * All we need to do here is free the vma_private_data.
         */
-       kfree(vma->vm_private_data);
+       vma->vm_flags &= ~VM_FOREIGN;
+       kfree(foreign_map);
 
        /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
         * to NULL on failure. However, we need this in gntdev_clear_pte() to
@@ -780,9 +780,8 @@ static pte_t gntdev_clear_pte(struct vm_
 /* "Destructor" for a VM area.
  */
 static void gntdev_vma_close(struct vm_area_struct *vma) {
-       if (vma->vm_private_data) {
+       if (vma->vm_flags & VM_FOREIGN)
                kfree(vma->vm_private_data);
-       }
 }
 
 /* Called when an ioctl is made on the device.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.