diff -r 5655c27da930 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/arch/x86/domain.c Fri Mar 28 16:18:29 2008 +0800 @@ -477,7 +477,7 @@ int arch_domain_create(struct domain *d, #else /* __x86_64__ */ - if ( (pg = alloc_domheap_page(NULL)) == NULL ) + if ( (pg = alloc_domheap_page_on_node(NULL, d->vcpu[0])) == NULL ) goto fail; d->arch.mm_perdomain_l2 = page_to_virt(pg); clear_page(d->arch.mm_perdomain_l2); @@ -486,7 +486,7 @@ int arch_domain_create(struct domain *d, l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i, __PAGE_HYPERVISOR); - if ( (pg = alloc_domheap_page(NULL)) == NULL ) + if ( (pg = alloc_domheap_page_on_node(NULL, d->vcpu[0])) == NULL ) goto fail; d->arch.mm_perdomain_l3 = page_to_virt(pg); clear_page(d->arch.mm_perdomain_l3); diff -r 5655c27da930 xen/arch/x86/hvm/stdvga.c --- a/xen/arch/x86/hvm/stdvga.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/arch/x86/hvm/stdvga.c Fri Mar 28 16:19:03 2008 +0800 @@ -513,7 +513,7 @@ void stdvga_init(struct domain *d) for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ ) { - if ( (pg = alloc_domheap_page(NULL)) == NULL ) + if ( (pg = alloc_domheap_page_on_node(NULL, d->vcpu[0])) == NULL ) break; s->vram_page[i] = pg; p = map_domain_page(page_to_mfn(pg)); diff -r 5655c27da930 xen/arch/x86/hvm/vlapic.c --- a/xen/arch/x86/hvm/vlapic.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/arch/x86/hvm/vlapic.c Thu Mar 27 17:06:53 2008 +0800 @@ -928,7 +928,7 @@ int vlapic_init(struct vcpu *v) memflags = MEMF_bits(32); #endif - vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags); + vlapic->regs_page = alloc_domheap_pages_on_node(NULL, 0, memflags, v); if ( vlapic->regs_page == NULL ) { dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n", diff -r 5655c27da930 xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/arch/x86/mm/hap/hap.c Fri Mar 28 16:19:22 2008 +0800 @@ -135,7 +135,7 @@ static struct page_info *hap_alloc_p2m_p && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) ) { free_domheap_page(pg); - pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32)); + pg = alloc_domheap_pages_on_node(NULL, 0, MEMF_bits(32), d->vcpu[0]); if ( likely(pg != NULL) ) { void *p = hap_map_domain_page(page_to_mfn(pg)); @@ -199,7 +199,7 @@ hap_set_allocation(struct domain *d, uns if ( d->arch.paging.hap.total_pages < pages ) { /* Need to allocate more memory from domheap */ - pg = alloc_domheap_page(NULL); + pg = alloc_domheap_page_on_node(NULL, d->vcpu[0]); if ( pg == NULL ) { HAP_PRINTK("failed to allocate hap pages.\n"); diff -r 5655c27da930 xen/arch/x86/mm/paging.c --- a/xen/arch/x86/mm/paging.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/arch/x86/mm/paging.c Fri Mar 28 16:19:39 2008 +0800 @@ -99,7 +99,7 @@ static mfn_t paging_new_log_dirty_page(s static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p) { mfn_t mfn; - struct page_info *page = alloc_domheap_page(NULL); + struct page_info *page = alloc_domheap_page_on_node(NULL, d->vcpu[0]); if ( unlikely(page == NULL) ) { diff -r 5655c27da930 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/arch/x86/mm/shadow/common.c Fri Mar 28 16:25:56 2008 +0800 @@ -1249,7 +1249,7 @@ static unsigned int sh_set_allocation(st { /* Need to allocate more memory from domheap */ sp = (struct shadow_page_info *) - alloc_domheap_pages(NULL, order, 0); + alloc_domheap_pages_on_node(NULL, order, 0, d->vcpu[0]); if ( sp == NULL ) { SHADOW_PRINTK("failed to allocate shadow pages.\n"); diff -r 5655c27da930 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/common/page_alloc.c Thu Mar 27 17:00:47 2008 +0800 @@ -829,6 +829,13 @@ struct page_info *alloc_domheap_pages( return __alloc_domheap_pages(d, smp_processor_id(), order, flags); } +struct page_info *alloc_domheap_pages_on_node( + struct domain *d, unsigned int order, unsigned int flags, struct vcpu *v) +{ + unsigned int cpu = v ? v->processor : smp_processor_id(); + return __alloc_domheap_pages(d, cpu, order, flags); +} + void free_domheap_pages(struct page_info *pg, unsigned int order) { int i, drop_dom_ref; diff -r 5655c27da930 xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/drivers/passthrough/vtd/iommu.c Fri Mar 28 16:25:18 2008 +0800 @@ -269,7 +269,7 @@ static struct page_info *addr_to_dma_pag if ( dma_pte_addr(*pte) == 0 ) { - pg = alloc_domheap_page(NULL); + pg = alloc_domheap_page_on_node(NULL, domain->vcpu[0]); vaddr = map_domain_page(page_to_mfn(pg)); if ( !vaddr ) { diff -r 5655c27da930 xen/include/xen/mm.h --- a/xen/include/xen/mm.h Wed Mar 26 16:14:39 2008 +0800 +++ b/xen/include/xen/mm.h Thu Mar 27 17:10:33 2008 +0800 @@ -54,6 +54,8 @@ void init_domheap_pages(paddr_t ps, padd void init_domheap_pages(paddr_t ps, paddr_t pe); struct page_info *alloc_domheap_pages( struct domain *d, unsigned int order, unsigned int memflags); +struct page_info *alloc_domheap_pages_on_node( + struct domain *d, unsigned int order, unsigned int memflags, struct vcpu *v); struct page_info *__alloc_domheap_pages( struct domain *d, unsigned int cpu, unsigned int order, unsigned int memflags); @@ -62,6 +64,7 @@ unsigned long avail_domheap_pages_region unsigned int node, unsigned int min_width, unsigned int max_width); unsigned long avail_domheap_pages(void); #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0)) +#define alloc_domheap_page_on_node(d, v) (alloc_domheap_pages_on_node(d,0,0,v)) #define free_domheap_page(p) (free_domheap_pages(p,0)) void scrub_heap_pages(void);