diff -rN -u -p old-xen-64-dma-pure/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c new-xen-64-dma-pure/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c --- old-xen-64-dma-pure/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-07-19 18:54:14.000000000 +0000 +++ new-xen-64-dma-pure/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-07-29 02:32:08.000000000 +0000 @@ -296,7 +296,7 @@ void xen_contig_memory(unsigned long vst /* 2. Get a new contiguous memory extent. */ BUG_ON(HYPERVISOR_dom_mem_op( - MEMOP_increase_reservation, &mfn, 1, order) != 1); + MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1); /* 3. Map the new extent in place of old pages. */ for (i = 0; i < (1<> L2_PAGETABLE_SHIFT); i++ ) { - if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL ) + if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0) == NULL ) panic("Not enough memory to bootstrap Xen.\n"); idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i] = l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE); diff -rN -u -p old-xen-64-dma-pure/xen/arch/x86/x86_64/mm.c new-xen-64-dma-pure/xen/arch/x86/x86_64/mm.c --- old-xen-64-dma-pure/xen/arch/x86/x86_64/mm.c 2005-07-19 18:51:39.000000000 +0000 +++ new-xen-64-dma-pure/xen/arch/x86/x86_64/mm.c 2005-07-29 01:17:07.000000000 +0000 @@ -100,7 +100,7 @@ void __init paging_init(void) */ for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) ) { - pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER); + pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0); if ( pg == NULL ) panic("Not enough memory for m2p table\n"); map_pages_to_xen( diff -rN -u -p old-xen-64-dma-pure/xen/common/dom_mem_ops.c new-xen-64-dma-pure/xen/common/dom_mem_ops.c --- old-xen-64-dma-pure/xen/common/dom_mem_ops.c 2005-07-19 18:51:39.000000000 +0000 +++ new-xen-64-dma-pure/xen/common/dom_mem_ops.c 2005-07-29 01:17:07.000000000 +0000 @@ -37,7 +37,8 @@ alloc_dom_mem(struct domain *d, unsigned long *extent_list, unsigned long start_extent, unsigned int nr_extents, - unsigned int extent_order) + unsigned int extent_order, + unsigned int flags) { struct pfn_info *page; unsigned long i; @@ -56,7 +57,8 @@ alloc_dom_mem(struct domain *d, { PREEMPT_CHECK(MEMOP_increase_reservation); - if ( unlikely((page = alloc_domheap_pages(d, extent_order)) == NULL) ) + if ( unlikely((page = alloc_domheap_pages(d, extent_order, + flags)) == NULL) ) { DPRINTK("Could not allocate a frame\n"); return i; @@ -131,11 +133,16 @@ do_dom_mem_op(unsigned long op, { struct domain *d; unsigned long rc, start_extent; + unsigned int address_bits_order; /* Extract @start_extent from @op. */ start_extent = op >> START_EXTENT_SHIFT; op &= (1 << START_EXTENT_SHIFT) - 1; + /* seperate extent_order and address_bits_order */ + address_bits_order = (extent_order >> 1) & 0xff; + extent_order &= 0xff; + if ( unlikely(start_extent > nr_extents) ) return -EINVAL; @@ -150,7 +157,8 @@ do_dom_mem_op(unsigned long op, { case MEMOP_increase_reservation: rc = alloc_dom_mem( - d, extent_list, start_extent, nr_extents, extent_order); + d, extent_list, start_extent, nr_extents, extent_order, + (address_bits_order <= 32) ? ALLOC_DOM_DMA : 0); break; case MEMOP_decrease_reservation: rc = free_dom_mem( diff -rN -u -p old-xen-64-dma-pure/xen/common/page_alloc.c new-xen-64-dma-pure/xen/common/page_alloc.c --- old-xen-64-dma-pure/xen/common/page_alloc.c 2005-07-19 18:54:15.000000000 +0000 +++ new-xen-64-dma-pure/xen/common/page_alloc.c 2005-07-29 02:17:25.000000000 +0000 @@ -207,7 +207,13 @@ unsigned long alloc_boot_pages(unsigned #define MEMZONE_XEN 0 #define MEMZONE_DOM 1 -#define NR_ZONES 2 +#define MEMZONE_DMADOM 2 +#define NR_ZONES 3 + + +#define MAX_DMADOM_PFN 0xFFFFF +#define pfn_dom_zone_type(_pfn) \ + (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM) /* Up to 2^20 pages can be allocated at once. */ #define MAX_ORDER 20 @@ -236,7 +242,7 @@ void end_boot_allocator(void) if ( next_free ) map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */ if ( curr_free ) - free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0); + free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0); } } @@ -474,14 +480,21 @@ void init_domheap_pages(physaddr_t ps, p { ASSERT(!in_irq()); - ps = round_pgup(ps); - pe = round_pgdown(pe); + ps = round_pgup(ps) >> PAGE_SHIFT; + pe = round_pgdown(pe) >> PAGE_SHIFT; - init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT); + if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) { + init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps); + init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), + pe - MAX_DMADOM_PFN); + } + else + init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps); } -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order, + unsigned int flags) { struct pfn_info *pg; cpumask_t mask; @@ -489,8 +502,13 @@ struct pfn_info *alloc_domheap_pages(str ASSERT(!in_irq()); - if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) ) - return NULL; + pg = NULL; + if (! (flags & ALLOC_DOM_DMA)) + pg = alloc_heap_pages(MEMZONE_DOM, order); + if (pg == NULL) { + if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) ) + return NULL; + } mask = pg->u.free.cpumask; tlbflush_filter(mask, pg->tlbflush_timestamp); @@ -531,7 +549,7 @@ struct pfn_info *alloc_domheap_pages(str DPRINTK("...or the domain is dying (%d)\n", !!test_bit(_DOMF_dying, &d->domain_flags)); spin_unlock(&d->page_alloc_lock); - free_heap_pages(MEMZONE_DOM, pg, order); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order); return NULL; } @@ -596,7 +614,7 @@ void free_domheap_pages(struct pfn_info if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) ) { - free_heap_pages(MEMZONE_DOM, pg, order); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order); } else { @@ -616,7 +634,7 @@ void free_domheap_pages(struct pfn_info else { /* Freeing an anonymous domain-heap page. */ - free_heap_pages(MEMZONE_DOM, pg, order); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order); drop_dom_ref = 0; } @@ -627,7 +645,7 @@ void free_domheap_pages(struct pfn_info unsigned long avail_domheap_pages(void) { - return avail[MEMZONE_DOM]; + return avail[MEMZONE_DOM] + avail[MEMZONE_DMADOM]; } @@ -676,7 +694,7 @@ static void page_scrub_softirq(void) p = map_domain_page(page_to_pfn(pg)); clear_page(p); unmap_domain_page(p); - free_heap_pages(MEMZONE_DOM, pg, 0); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0); } } while ( (NOW() - start) < MILLISECS(1) ); } diff -rN -u -p old-xen-64-dma-pure/xen/include/xen/mm.h new-xen-64-dma-pure/xen/include/xen/mm.h --- old-xen-64-dma-pure/xen/include/xen/mm.h 2005-07-19 18:51:39.000000000 +0000 +++ new-xen-64-dma-pure/xen/include/xen/mm.h 2005-07-29 01:17:07.000000000 +0000 @@ -33,12 +33,15 @@ void free_xenheap_pages(void *v, unsigne /* Domain suballocator. These functions are *not* interrupt-safe.*/ void init_domheap_pages(physaddr_t ps, physaddr_t pe); -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order); +struct pfn_info *alloc_domheap_pages( + struct domain *d, unsigned int order, unsigned int flags); void free_domheap_pages(struct pfn_info *pg, unsigned int order); unsigned long avail_domheap_pages(void); -#define alloc_domheap_page(d) (alloc_domheap_pages(d,0)) +#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0)) #define free_domheap_page(p) (free_domheap_pages(p,0)) +#define ALLOC_DOM_DMA 1 + /* Automatic page scrubbing for dead domains. */ extern struct list_head page_scrub_list; #define page_scrub_schedule_work() \