[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: ATI radeon fails with "iommu=soft swiotlb=force" (seen on RV730/RV740 and RS780/RS800)



On 10/01/09 12:07, Jeremy Fitzhardinge wrote:
> Could modify drm_vmalloc_dma to do the vmalloc "manually":
>
>    1. call __get_vm_area to reserve a chunk of vmalloc address space
>    2. allocate a bunch of individual pages with dma_alloc_coherent
>    3. insert them into the vmalloc mapping with map_vm_area
>
> That will guarantee a normal-looking vmalloc area with device-friendly
> pages that subsequent pci_map_page operations will use as-is.
>   

Like this (untested):

diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index c7823c8..73bfa63 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -32,16 +32,60 @@
  */
 
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include "drmP.h"
 
 #define DEBUG_SCATTER 0
 
-static inline void *drm_vmalloc_dma(unsigned long size)
+static inline void *drm_vmalloc_dma(struct drm_device *drmdev, unsigned long 
size)
 {
 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
        return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
 #else
-       return vmalloc_32(size);
+       struct device *dev = &drmdev->pdev->dev;
+       struct vm_struct *vma;
+       struct page **pages;
+       const int npages = PFN_UP(size);
+       int i;
+
+       pages = kmalloc(npages * sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               goto out_free_pagearr;
+
+       vma = __get_vm_area(size, VM_ALLOC, VMALLOC_START, VMALLOC_END);
+       if (!vma)
+               goto out_release_vma;
+
+       for (i = 0; i < npages; i++) {
+               dma_addr_t phys;
+               void *addr;
+               addr = dma_alloc_coherent(dev, PAGE_SIZE, &phys, GFP_KERNEL);
+               if (addr == NULL)
+                       goto out_free_pages;
+
+               pages[i] = virt_to_page(addr);
+       }
+
+       if (map_vm_area(vma, PAGE_KERNEL, &pages))
+               goto out_free_pages;
+
+       kfree(pages);
+
+       return vma->addr;
+
+out_free_pages:
+       while(i > 0) {
+               void *addr = page_address(pages[--i]);
+               dma_free_coherent(dev, PAGE_SIZE, addr, virt_to_bus(addr));
+       }
+
+out_release_vma:
+       vunmap(vma->addr);
+
+out_free_pagearr:
+       kfree(pages);
+
+       return NULL;
 #endif
 }
 
@@ -107,7 +151,7 @@ int drm_sg_alloc(struct drm_device *dev, struct 
drm_scatter_gather * request)
        }
        memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
 
-       entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+       entry->virtual = drm_vmalloc_dma(dev, pages << PAGE_SHIFT);
        if (!entry->virtual) {
                kfree(entry->busaddr);
                kfree(entry->pagelist);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.