[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [POWERPC][XEN] Implement guest_physmap_{add, remove}_page().
# HG changeset patch # User Hollis Blanchard <hollisb@xxxxxxxxxx> # Date 1172876879 21600 # Node ID b75609e1fa81b5aba1f06f5b9c6bff6bf897a6de # Parent f56981f78d731b60fdb9ce2ee1b78727e004f848 [POWERPC][XEN] Implement guest_physmap_{add,remove}_page(). - Use p2m array in pfn2mfn() and DOMCTL_getmemlist. - Remove domain extent list. - Create and use an m2p array for mfn_to_gmfn(). Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx> Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx> --- xen/arch/powerpc/domain.c | 3 xen/arch/powerpc/domain_build.c | 28 ++++- xen/arch/powerpc/domctl.c | 22 ++-- xen/arch/powerpc/iommu.c | 3 xen/arch/powerpc/memory.c | 1 xen/arch/powerpc/mm.c | 189 ++++++++++++++++++------------------ xen/arch/powerpc/ofd_fixup_memory.c | 37 +++++-- xen/include/asm-powerpc/domain.h | 3 xen/include/asm-powerpc/mm.h | 26 ---- 9 files changed, 168 insertions(+), 144 deletions(-) diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/domain.c --- a/xen/arch/powerpc/domain.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/domain.c Fri Mar 02 17:07:59 2007 -0600 @@ -88,8 +88,6 @@ int arch_domain_create(struct domain *d) d->arch.large_page_sizes = cpu_large_page_orders( d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order)); - INIT_LIST_HEAD(&d->arch.extent_list); - d->arch.foreign_mfn_count = 1024; d->arch.foreign_mfns = xmalloc_array(uint, d->arch.foreign_mfn_count); BUG_ON(d->arch.foreign_mfns == NULL); @@ -311,7 +309,6 @@ void domain_relinquish_resources(struct { relinquish_memory(d, &d->xenpage_list); relinquish_memory(d, &d->page_list); - free_extents(d); xfree(d->arch.foreign_mfns); xfree(d->arch.p2m); return; diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/domain_build.c --- a/xen/arch/powerpc/domain_build.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/domain_build.c Fri Mar 02 17:07:59 2007 -0600 @@ -16,6 +16,8 @@ * Copyright IBM Corp. 2005, 2007 * * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> + * Ryan Harper <ryanh@xxxxxxxxxx> + * Hollis Blanchard <hollisb@xxxxxxxxxx> */ #include <xen/config.h> @@ -27,7 +29,9 @@ #include <xen/shadow.h> #include <xen/domain.h> #include <xen/version.h> +#include <xen/shadow.h> #include <asm/processor.h> +#include <asm/platform.h> #include <asm/papr.h> #include <public/arch-powerpc.h> #include <public/libelf.h> @@ -73,6 +77,7 @@ int construct_dom0(struct domain *d, ulong mod_start = 0; ulong mod_len = 0; ulong shared_info_addr; + uint extent_size = 1 << cpu_extent_order(); /* Sanity! */ BUG_ON(d->domain_id != 0); @@ -110,12 +115,31 @@ int construct_dom0(struct domain *d, dom0_nrpages = CONFIG_MIN_DOM0_PAGES; } - /* DOM0 has to be at least RMA size. */ + /* Dom0 has to be at least RMA size. */ if (dom0_nrpages < rma_nrpages) { dom0_nrpages = rma_nrpages; - printk("Forcing DOM0 memory size to %u MiB\n", + printk("Increasing DOM0 memory size to %u MiB for RMA.\n", ((rma_nrpages << PAGE_SHIFT) >> 20)); } + + /* Ensure Dom0 is cpu_extent_order aligned. Round up if + not and let user know we did so. */ + if (dom0_nrpages != ALIGN_UP(dom0_nrpages, extent_size)) { + dom0_nrpages = ALIGN_UP(dom0_nrpages, extent_size); + printk("Increasing DOM0 memory size to %u MiB for large pages.\n", + ((dom0_nrpages << PAGE_SHIFT) >> 20)); + } + + /* XXX Dom0 currently can't extend past the IO hole. */ + if (dom0_nrpages > (platform_iohole_base() >> PAGE_SHIFT)) { + dom0_nrpages = (platform_iohole_base() >> PAGE_SHIFT); + printk("Limiting DOM0 memory size to %u MiB to avoid IO hole.\n", + ((dom0_nrpages << PAGE_SHIFT) >> 20)); + } + + /* Set Dom0 max mem, triggering p2m table creation. */ + if ((guest_physmap_max_mem_pages(d, dom0_nrpages)) != 0) + panic("Failed to set DOM0 max mem pages value\n"); d->max_pages = dom0_nrpages; if (0 > allocate_rma(d, cpu_default_rma_order_pages())) diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/domctl.c --- a/xen/arch/powerpc/domctl.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/domctl.c Fri Mar 02 17:07:59 2007 -0600 @@ -13,9 +13,10 @@ * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * - * Copyright (C) IBM Corp. 2005 + * Copyright IBM Corp. 2005, 2007 * * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> + * Ryan Harper <ryanh@xxxxxxxxxx> */ #include <xen/config.h> @@ -50,7 +51,6 @@ long arch_do_domctl(struct xen_domctl *d struct domain *d = get_domain_by_id(domctl->domain); unsigned long max_pfns = domctl->u.getmemlist.max_pfns; uint64_t mfn; - struct list_head *list_ent; ret = -EINVAL; if ( d != NULL ) @@ -58,18 +58,20 @@ long arch_do_domctl(struct xen_domctl *d ret = 0; spin_lock(&d->page_alloc_lock); - list_ent = d->page_list.next; - for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ ) - { - mfn = page_to_mfn(list_entry( - list_ent, struct page_info, list)); - if ( copy_to_guest_offset(domctl->u.getmemlist.buffer, - i, &mfn, 1) ) + for (i = 0; i < max_pfns; i++) { + /* bail if index is beyond p2m size */ + if (i >= d->arch.p2m_entries) + break; + + /* translate */ + mfn = d->arch.p2m[i]; + + if (copy_to_guest_offset(domctl->u.getmemlist.buffer, + i, &mfn, 1)) { ret = -EFAULT; break; } - list_ent = mfn_to_page(mfn)->list.next; } spin_unlock(&d->page_alloc_lock); diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/iommu.c --- a/xen/arch/powerpc/iommu.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/iommu.c Fri Mar 02 17:07:59 2007 -0600 @@ -13,7 +13,7 @@ * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * - * Copyright (C) IBM Corp. 2005 + * Copyright IBM Corp. 2005, 2007 * * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> */ @@ -62,7 +62,6 @@ int iommu_put(u32 buid, ulong ioba, unio mfn = pfn2mfn(d, gmfn, &mtype); if (mfn != INVALID_MFN) { switch (mtype) { - case PFN_TYPE_RMA: case PFN_TYPE_LOGICAL: break; case PFN_TYPE_FOREIGN: diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/memory.c --- a/xen/arch/powerpc/memory.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/memory.c Fri Mar 02 17:07:59 2007 -0600 @@ -176,6 +176,7 @@ void memory_init(module_t *mod, int mcou DBG("total_pages: 0x%016lx\n", total_pages); init_frametable(); + init_machine_to_phys_table(); numa_initmem_init(0, max_page); diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/mm.c --- a/xen/arch/powerpc/mm.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/mm.c Fri Mar 02 17:07:59 2007 -0600 @@ -17,6 +17,7 @@ * * Authors: Hollis Blanchard <hollisb@xxxxxxxxxx> * Jimi Xenidis <jimix@xxxxxxxxxxxxxx> + * Ryan Harper <ryanh@xxxxxxxxxx> */ #include <xen/config.h> @@ -29,6 +30,7 @@ #include <asm/page.h> #include <asm/platform.h> #include <asm/string.h> +#include <asm/platform.h> #include <public/arch-powerpc.h> #ifdef VERBOSE @@ -44,6 +46,9 @@ unsigned long max_page; unsigned long max_page; unsigned long total_pages; +/* machine to phys mapping to used by all domains */ +unsigned long *machine_phys_mapping; + void __init init_frametable(void) { unsigned long p; @@ -57,6 +62,24 @@ void __init init_frametable(void) panic("Not enough memory for frame table\n"); frame_table = (struct page_info *)(p << PAGE_SHIFT); + for (i = 0; i < nr_pages; i += 1) + clear_page((void *)((p + i) << PAGE_SHIFT)); +} + +/* Array of PFNs, indexed by MFN. */ +void __init init_machine_to_phys_table(void) +{ + unsigned long p; + unsigned long nr_pages; + int i; + + nr_pages = PFN_UP(max_page * sizeof(unsigned long)); + + p = alloc_boot_pages(nr_pages, 1); + if (p == 0) + panic("Not enough memory for machine phys mapping table\n"); + + machine_phys_mapping = (unsigned long *)(p << PAGE_SHIFT); for (i = 0; i < nr_pages; i += 1) clear_page((void *)((p + i) << PAGE_SHIFT)); } @@ -290,46 +313,16 @@ extern void copy_page(void *dp, void *sp } } -/* XXX should probably replace with faster data structure */ -static uint add_extent(struct domain *d, struct page_info *pg, uint order) -{ - struct page_extents *pe; - - pe = xmalloc(struct page_extents); - if (pe == NULL) - return -ENOMEM; - - pe->pg = pg; - pe->order = order; - - list_add_tail(&pe->pe_list, &d->arch.extent_list); - - return 0; -} - -void free_extents(struct domain *d) -{ - /* we just need to free the memory behind list */ - struct list_head *list; - struct list_head *ent; - struct list_head *next; - - list = &d->arch.extent_list; - ent = list->next; - - while (ent != list) { - next = ent->next; - xfree(ent); - ent = next; - } -} - +/* Allocate (rma_nrpages - nrpages) more memory for domain in proper size. */ uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages) { + struct page_info *pg; + ulong mfn; + ulong gpfn = rma_nrpages; /* starting PFN at end of RMA */ uint ext_order; uint ext_nrpages; uint total_nrpages; - struct page_info *pg; + int i; ext_order = cpu_extent_order(); ext_nrpages = 1 << ext_order; @@ -337,16 +330,20 @@ uint allocate_extents(struct domain *d, total_nrpages = rma_nrpages; /* We only allocate in nr_extsz chunks so if you are not divisible - * you get more than you asked for */ + * you get more than you asked for. */ while (total_nrpages < nrpages) { pg = alloc_domheap_pages(d, ext_order, 0); if (pg == NULL) return total_nrpages; - if (add_extent(d, pg, ext_order) < 0) { - free_domheap_pages(pg, ext_order); - return total_nrpages; - } + /* Build p2m mapping for newly allocated extent. */ + mfn = page_to_mfn(pg); + for (i = 0; i < (1 << ext_order); i++) + guest_physmap_add_page(d, gpfn + i, mfn + i); + + /* Bump starting PFN by extent size pages. */ + gpfn += ext_nrpages; + total_nrpages += ext_nrpages; } @@ -358,6 +355,7 @@ int allocate_rma(struct domain *d, unsig struct vcpu *v; ulong rma_base; ulong rma_sz; + ulong mfn; int i; if (d->arch.rma_page) @@ -379,10 +377,14 @@ int allocate_rma(struct domain *d, unsig printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n", d->domain_id, rma_base, rma_sz); + mfn = page_to_mfn(d->arch.rma_page); + for (i = 0; i < (1 << d->arch.rma_order); i++ ) { - /* Add in any extra CPUs that need flushing because of this page. */ d->arch.rma_page[i].count_info |= PGC_page_RMA; clear_page((void *)page_to_maddr(&d->arch.rma_page[i])); + + /* Set up p2m mapping for RMA. */ + guest_physmap_add_page(d, i, mfn+i); } /* shared_info uses last page of RMA */ @@ -406,9 +408,6 @@ void free_rma_check(struct page_info *pa ulong pfn2mfn(struct domain *d, ulong pfn, int *type) { - ulong rma_base_mfn = page_to_mfn(d->arch.rma_page); - ulong rma_size_mfn = 1UL << d->arch.rma_order; - struct page_extents *pe; ulong mfn = INVALID_MFN; int t = PFN_TYPE_NONE; ulong foreign_map_pfn = 1UL << cpu_foreign_map_order(); @@ -431,23 +430,9 @@ ulong pfn2mfn(struct domain *d, ulong pf t = PFN_TYPE_IO; mfn = pfn; } else { - if (pfn < rma_size_mfn) { - t = PFN_TYPE_RMA; - mfn = pfn + rma_base_mfn; - } else { - ulong cur_pfn = rma_size_mfn; - - list_for_each_entry (pe, &d->arch.extent_list, pe_list) { - uint pe_pages = 1UL << pe->order; - uint end_pfn = cur_pfn + pe_pages; - - if (pfn >= cur_pfn && pfn < end_pfn) { - t = PFN_TYPE_LOGICAL; - mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn); - break; - } - cur_pfn += pe_pages; - } + if (pfn < d->arch.p2m_entries) { + t = PFN_TYPE_LOGICAL; + mfn = d->arch.p2m[pfn]; } #ifdef DEBUG if (t != PFN_TYPE_NONE && @@ -496,10 +481,12 @@ ulong pfn2mfn(struct domain *d, ulong pf unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn) { - struct page_extents *pe; - ulong cur_pfn; + struct page_info *pg = mfn_to_page(mfn); ulong gnttab_mfn; - ulong rma_mfn; + + /* is this our mfn? */ + if (page_get_owner(pg) != d) + return INVALID_M2P_ENTRY; /* XXX access d->grant_table->nr_grant_frames without lock. * Currently on powerpc dynamic expanding grant table is @@ -516,24 +503,8 @@ unsigned long mfn_to_gmfn(struct domain if (d->is_privileged && platform_io_mfn(mfn)) return mfn; - rma_mfn = page_to_mfn(d->arch.rma_page); - if (mfn >= rma_mfn && - mfn < (rma_mfn + (1 << d->arch.rma_order))) - return mfn - rma_mfn; - - /* Extent? */ - cur_pfn = 1UL << d->arch.rma_order; - list_for_each_entry (pe, &d->arch.extent_list, pe_list) { - uint pe_pages = 1UL << pe->order; - uint b_mfn = page_to_mfn(pe->pg); - uint e_mfn = b_mfn + pe_pages; - - if (mfn >= b_mfn && mfn < e_mfn) { - return cur_pfn + (mfn - b_mfn); - } - cur_pfn += pe_pages; - } - return INVALID_M2P_ENTRY; + /* check m2p table */ + return get_gpfn_from_mfn(mfn); } /* NB: caller holds d->page_alloc lock, sets d->max_pages = new_max */ @@ -580,21 +551,53 @@ void guest_physmap_add_page( void guest_physmap_add_page( struct domain *d, unsigned long gpfn, unsigned long mfn) { - printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn); -} + if (page_get_owner(mfn_to_page(mfn)) != d) { + printk("Won't map foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id); + return; + } + + /* Check that pfn is within guest table. */ + if (gpfn >= d->arch.p2m_entries) { + printk("Won't map invalid PFN 0x%lx for DOM%d\n", gpfn, d->domain_id); + return; + } + + /* Warn if there is an existing mapping. */ + /* XXX: probably shouldn't let this happen, but + current interface doesn't throw errors. =( */ + if (d->arch.p2m[gpfn] != INVALID_MFN) + printk("Ack! PFN aliased. PFN%lx, old MFN=%x, new MFN=%lx\n", + gpfn, d->arch.p2m[gpfn], mfn); + + /* PFN and MFN ok, map in p2m table. */ + d->arch.p2m[gpfn] = mfn; + + /* Map in m2p table. */ + set_gpfn_from_mfn(mfn, gpfn); +} + void guest_physmap_remove_page( struct domain *d, unsigned long gpfn, unsigned long mfn) { - panic("%s\n", __func__); -} + if (page_get_owner(mfn_to_page(mfn)) != d) { + printk("Won't unmap foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id); + return; + } + + /* check that pfn is within guest table */ + if (gpfn >= d->arch.p2m_entries) { + printk("Won't unmap invalid PFN 0x%lx for DOM%d\n", gpfn, d->domain_id); + return; + } + + /* PFN and MFN ok, unmap from p2m table. */ + d->arch.p2m[gpfn] = INVALID_MFN; + + /* Unmap from m2p table. */ + set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY); +} + void shadow_drop_references( struct domain *d, struct page_info *page) { } - -int arch_domain_add_extent(struct domain *d, struct page_info *page, int order) -{ - if (add_extent(d, page, order) < 0) - return -ENOMEM; - return 0; -} diff -r f56981f78d73 -r b75609e1fa81 xen/arch/powerpc/ofd_fixup_memory.c --- a/xen/arch/powerpc/ofd_fixup_memory.c Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/arch/powerpc/ofd_fixup_memory.c Fri Mar 02 17:07:59 2007 -0600 @@ -13,14 +13,16 @@ * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * - * Copyright (C) IBM Corp. 2006 + * Copyright IBM Corp. 2006, 2007 * * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx> + * Ryan Harper <ryanh@xxxxxxxxxx> */ #include <xen/config.h> #include <xen/lib.h> #include <xen/sched.h> +#include <asm/platform.h> #include <public/xen.h> #include "of-devtree.h" #include "oftree.h" @@ -87,19 +89,34 @@ static void ofd_memory_extent_nodes(void ulong start; ulong size; ofdn_t n; - struct page_extents *pe; ulong cur_pfn = 1UL << d->arch.rma_order; - start = cur_pfn << PAGE_SHIFT; - size = 0; - list_for_each_entry (pe, &d->arch.extent_list, pe_list) { + /* if dom0 > 2G, shift ram past IO hole */ + if ((d->tot_pages << PAGE_SHIFT) > platform_iohole_base()) { + /* memory@RMA up to IO hole */ + start = cur_pfn << PAGE_SHIFT; + size = platform_iohole_base() - (cur_pfn << PAGE_SHIFT); + n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory, + start, size); - size += 1UL << (pe->order + PAGE_SHIFT); - if (pe->order != cpu_extent_order()) - panic("we don't handle this yet\n"); + BUG_ON(n <= 0); + + /* XXX Our p2m translation currnetly doesn't allow dom0 memory above + * the IO hole. */ +#if 0 + /* remaining memory shifted up to memory@IOHOLE_END */ + start = platform_iohole_base()+platform_iohole_size(); + size = (d->tot_pages << PAGE_SHIFT) - platform_iohole_base(); + n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory, + start, size); +#endif + } else { + /* we fit beneath the IO hole as one chunk */ + start = cur_pfn << PAGE_SHIFT; + size = (d->tot_pages - cur_pfn) << PAGE_SHIFT; + n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory, + start, size); } - n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory, - start, size); BUG_ON(n <= 0); } diff -r f56981f78d73 -r b75609e1fa81 xen/include/asm-powerpc/domain.h --- a/xen/include/asm-powerpc/domain.h Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/include/asm-powerpc/domain.h Fri Mar 02 17:07:59 2007 -0600 @@ -37,9 +37,6 @@ struct arch_domain { * processor is in real mode */ struct page_info *rma_page; uint rma_order; - - /* list of extents beyond RMA */ - struct list_head extent_list; uint foreign_mfn_count; uint *foreign_mfns; diff -r f56981f78d73 -r b75609e1fa81 xen/include/asm-powerpc/mm.h --- a/xen/include/asm-powerpc/mm.h Fri Mar 02 17:07:01 2007 -0600 +++ b/xen/include/asm-powerpc/mm.h Fri Mar 02 17:07:59 2007 -0600 @@ -79,15 +79,6 @@ struct page_info } u; -}; - -struct page_extents { - /* Each frame can be threaded onto a doubly-linked list. */ - struct list_head pe_list; - - /* page extent */ - struct page_info *pg; - uint order; }; /* The following page types are MUTUALLY EXCLUSIVE. */ @@ -145,6 +136,7 @@ extern unsigned long max_page; extern unsigned long max_page; extern unsigned long total_pages; void init_frametable(void); +void init_machine_to_phys_table(void); void free_rma_check(struct page_info *page); static inline void put_page(struct page_info *page) @@ -226,14 +218,12 @@ typedef struct { } vm_assist_info_t; extern vm_assist_info_t vm_assist_info[]; - -/* hope that accesses to this will fail spectacularly */ -#undef machine_to_phys_mapping +extern unsigned long *machine_phys_mapping; +#define machine_to_phys_mapping (machine_phys_mapping) #define INVALID_M2P_ENTRY (~0UL) -/* do nothing, its all calculated */ -#define set_gpfn_from_mfn(mfn, pfn) do { } while (0) -#define get_gpfn_from_mfn(mfn) (mfn) +#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn)) +#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn); @@ -243,7 +233,6 @@ extern unsigned long paddr_to_maddr(unsi #define INVALID_MFN (~0U) #define PFN_TYPE_NONE 0 -#define PFN_TYPE_RMA 1 #define PFN_TYPE_LOGICAL 2 #define PFN_TYPE_IO 3 #define PFN_TYPE_FOREIGN 4 @@ -258,7 +247,6 @@ static inline unsigned long gmfn_to_mfn( mfn = pfn2mfn(d, gmfn, &mtype); if (mfn != INVALID_MFN) { switch (mtype) { - case PFN_TYPE_RMA: case PFN_TYPE_LOGICAL: break; default: @@ -280,10 +268,6 @@ long arch_memory_op(int op, XEN_GUEST_HA extern int allocate_rma(struct domain *d, unsigned int order_pages); extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages); -extern void free_extents(struct domain *d); - -extern int arch_domain_add_extent(struct domain *d, struct page_info *page, - int order); extern int steal_page(struct domain *d, struct page_info *page, unsigned int memflags); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |