[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 3/6] drm/i915: use vmap in shmem_pin_map



shmem_pin_map somewhat awkwardly reimplements vmap using
alloc_vm_area and manual pte setup.  The only practical difference
is that alloc_vm_area prefeaults the vmalloc area PTEs, which doesn't
seem to be required here (and could be added to vmap using a flag
if actually required).

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 drivers/gpu/drm/i915/gt/shmem_utils.c | 90 +++++++++++----------------
 1 file changed, 38 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c 
b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 43c7acbdc79dea..77410091597f19 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -49,80 +49,66 @@ struct file *shmem_create_from_object(struct 
drm_i915_gem_object *obj)
        return file;
 }
 
-static size_t shmem_npte(struct file *file)
+static size_t shmem_npages(struct file *file)
 {
        return file->f_mapping->host->i_size >> PAGE_SHIFT;
 }
 
-static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
-{
-       unsigned long pfn;
-
-       vunmap(ptr);
-
-       for (pfn = 0; pfn < n_pte; pfn++) {
-               struct page *page;
-
-               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
-                                                  GFP_KERNEL);
-               if (!WARN_ON(IS_ERR(page))) {
-                       put_page(page);
-                       put_page(page);
-               }
-       }
-}
-
 void *shmem_pin_map(struct file *file)
 {
-       const size_t n_pte = shmem_npte(file);
-       pte_t *stack[32], **ptes, **mem;
-       struct vm_struct *area;
-       unsigned long pfn;
-
-       mem = stack;
-       if (n_pte > ARRAY_SIZE(stack)) {
-               mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
-               if (!mem)
+       const size_t n_pages = shmem_npages(file);
+       struct page **pages, *stack[32];
+       void *vaddr;
+       long i;
+
+       pages = stack;
+       if (n_pages > ARRAY_SIZE(stack)) {
+               pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+               if (!pages)
                        return NULL;
        }
 
-       area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
-       if (!area) {
-               if (mem != stack)
-                       kvfree(mem);
-               return NULL;
-       }
-
-       ptes = mem;
-       for (pfn = 0; pfn < n_pte; pfn++) {
-               struct page *page;
-
-               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
-                                                  GFP_KERNEL);
-               if (IS_ERR(page))
+       for (i = 0; i < n_pages; i++) {
+               pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
+                                                      GFP_KERNEL);
+               if (IS_ERR(pages[i]))
                        goto err_page;
-
-               **ptes++ = mk_pte(page,  PAGE_KERNEL);
        }
 
-       if (mem != stack)
-               kvfree(mem);
+       vaddr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+       if (!vaddr)
+               goto err_page;
 
+       if (pages != stack)
+               kvfree(pages);
        mapping_set_unevictable(file->f_mapping);
-       return area->addr;
+       return vaddr;
 
 err_page:
-       if (mem != stack)
-               kvfree(mem);
-
-       __shmem_unpin_map(file, area->addr, pfn);
+       while (--i >= 0)
+               put_page(pages[i]);
+       if (pages != stack)
+               kvfree(pages);
        return NULL;
 }
 
 void shmem_unpin_map(struct file *file, void *ptr)
 {
+       long i = shmem_npages(file);
+
        mapping_clear_unevictable(file->f_mapping);
-       __shmem_unpin_map(file, ptr, shmem_npte(file));
+       vunmap(ptr);
+
+       for (i = 0; i < shmem_npages(file); i++) {
+               struct page *page;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, i,
+                                                  GFP_KERNEL);
+               if (!WARN_ON(IS_ERR(page))) {
+                       put_page(page);
+                       put_page(page);
+               }
+       }
 }
 
 static int __shmem_rw(struct file *file, loff_t off,
-- 
2.28.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.