[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Cleanups, and fix allocating DMA memory via



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID c2705e74efbaba2bf1867a7391e6b76225dd10f9
# Parent  12ff9c954aceb9c84c9e730886d3cd538a6ec56a
Cleanups, and fix allocating DMA memory via
HYPERVISOR_memory_op().
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 12ff9c954ace -r c2705e74efba xen/common/memory.c
--- a/xen/common/memory.c       Thu Sep  8 01:07:15 2005
+++ b/xen/common/memory.c       Thu Sep  8 15:22:01 2005
@@ -31,8 +31,8 @@
     struct pfn_info *page;
     unsigned long    i;
 
-    if ( (extent_list != NULL)
-         && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+    if ( (extent_list != NULL) &&
+         !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
         return 0;
 
     if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
@@ -52,13 +52,14 @@
         if ( unlikely((page = alloc_domheap_pages(
             d, extent_order, flags)) == NULL) )
         {
-            DPRINTK("Could not allocate a frame id=%d %d flags=%x\n", 
d->domain_id, extent_order, flags);
+            DPRINTK("Could not allocate order=%d extent: id=%d flags=%x\n",
+                    extent_order, d->domain_id, flags);
             return i;
         }
 
         /* Inform the domain of the new page's machine address. */ 
-        if ( (extent_list != NULL)
-             && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
+        if ( (extent_list != NULL) &&
+             (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
             return i;
     }
 
@@ -152,8 +153,8 @@
             reservation.extent_start += start_extent;
         reservation.nr_extents -= start_extent;
 
-        if ( unlikely(reservation.address_bits != 0)
-             && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) )
+        if ( (reservation.address_bits != 0) &&
+             (reservation.address_bits < (get_order(max_page) + PAGE_SHIFT)) )
         {
             if ( reservation.address_bits < 31 )
                 return -ENOMEM;
diff -r 12ff9c954ace -r c2705e74efba xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Thu Sep  8 01:07:15 2005
+++ b/xen/common/page_alloc.c   Thu Sep  8 15:22:01 2005
@@ -216,7 +216,7 @@
 #define NR_ZONES    3
 
 
-#define MAX_DMADOM_PFN 0x7FFFF /* 31 addressable bits */
+#define MAX_DMADOM_PFN 0x7FFFFUL /* 31 addressable bits */
 #define pfn_dom_zone_type(_pfn)                                 \
     (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
 
@@ -485,43 +485,40 @@
 
 void init_domheap_pages(physaddr_t ps, physaddr_t pe)
 {
+    unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
+
     ASSERT(!in_irq());
 
-    ps = round_pgup(ps) >> PAGE_SHIFT;
-    pe = round_pgdown(pe) >> PAGE_SHIFT;
-    if ( pe <= ps )
-        return;
-
-    if ( (ps < MAX_DMADOM_PFN) && (pe > MAX_DMADOM_PFN) )
-    {
-        init_heap_pages(
-            MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
-        init_heap_pages(
-            MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), pe - MAX_DMADOM_PFN);
-    }
-    else
-    {
-        init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
-    }
+    s_tot = round_pgup(ps) >> PAGE_SHIFT;
+    e_tot = round_pgdown(pe) >> PAGE_SHIFT;
+
+    s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
+    e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
+    if ( s_dma < e_dma )
+        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(s_dma), e_dma - s_dma);
+
+    s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
+    e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
+    if ( s_nrm < e_nrm )
+        init_heap_pages(MEMZONE_DOM, pfn_to_page(s_nrm), e_nrm - s_nrm);
 }
 
 
 struct pfn_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int flags)
 {
-    struct pfn_info *pg;
+    struct pfn_info *pg = NULL;
     cpumask_t mask;
     int i;
 
     ASSERT(!in_irq());
 
-    pg = NULL;
-    if (! (flags & ALLOC_DOM_DMA))
+    if ( !(flags & ALLOC_DOM_DMA) )
         pg = alloc_heap_pages(MEMZONE_DOM, order);
-    if (pg == NULL) {
-        if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
+
+    if ( pg == NULL )
+        if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
             return NULL;
-    }
 
     mask = pg->u.free.cpumask;
     tlbflush_filter(mask, pg->tlbflush_timestamp);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.