[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] 32bit-pv-VM 128G memory limited
This should all already happen by default. -- Keir On 02/11/2009 07:49, "James Song" <jsong@xxxxxxxxxx> wrote: > Hi, > > > Since 32-bit pv-domUs require memory below the 128G boundary (IIRC) but > tools don't enforce this. So we need a "memory pool" for 32bit pv-domUs. > When starting a 32-bit domU, allocate memory from this pool. If starting a > 64-bit domUs or 32bit hvm dom (which don't suffer the 128G boundary > limitation), allocate memory from above the boundary first, only allocating > from the lower pool when needed. > > Thanks, > James (Song Wei) > > Signed-off-by: James Song Wei <jsong@xxxxxxxxxx> > diff -r 059c01d69a08 tools/libxc/xc_dom_x86.c > --- a/tools/libxc/xc_dom_x86.c Thu Oct 29 14:48:28 2009 +0000 > +++ b/tools/libxc/xc_dom_x86.c Mon Nov 02 15:37:42 2009 +0800 > @@ -639,7 +639,7 @@ > xc_dom_register_arch_hooks(&xc_dom_64); > } > > -static int x86_compat(int xc, domid_t domid, char *guest_type) > +static int x86_compat(int xc, domid_t domid, char *guest_type, int > *guest_size) > { > static const struct { > char *guest; > @@ -660,6 +660,7 @@ > if ( domctl.u.address_size.size == 0 ) > /* nothing to do */ > return 0; > + *guest_size = domctl.u.address_size.size; > > xc_dom_printf("%s: guest %s, address size %" PRId32 "\n", __FUNCTION__, > guest_type, domctl.u.address_size.size); > @@ -696,10 +697,11 @@ > > int arch_setup_meminit(struct xc_dom_image *dom) > { > - int rc; > + int rc, host_64bit=0, mem_128_limit=0, guest_size=0; > xen_pfn_t pfn, allocsz, i, j, mfn; > + xc_physinfo_t put_info; > > - rc = x86_compat(dom->guest_xc, dom->guest_domid, dom->guest_type); > + rc = x86_compat(dom->guest_xc, dom->guest_domid, dom->guest_type, > &guest_size); > if ( rc ) > return rc; > if ( xc_dom_feature_translated(dom) ) > @@ -740,6 +742,12 @@ > for ( pfn = 0; pfn < dom->total_pages; pfn++ ) > dom->p2m_host[pfn] = pfn; > > + xc_physinfo(dom->guest_xc, &put_info); > + if((put_info.total_pages * (XC_PAGE_SIZE / 1024))/(1024*1024) > 128) > + mem_128_limit = 1; > + > + if(strstr(dom->xen_caps, "x86_64") != NULL) > + host_64bit=1; > /* allocate guest memory */ > for ( i = rc = allocsz = 0; > (i < dom->total_pages) && !rc; > @@ -751,6 +759,22 @@ > rc = xc_domain_memory_populate_physmap( > dom->guest_xc, dom->guest_domid, allocsz, > 0, 0, &dom->p2m_host[i]); > + if((host_64bit == 1) && (mem_128_limit)) > + { > + if( (guest_size == 32) )// 32bit guest > + rc = xc_domain_memory_populate_physmap( > + dom->guest_xc, dom->guest_domid, allocsz, 0, > XENMEMF_address_bits(37), &dom->p2m_host[i]); > + else if( (guest_size == 64) );//64bit > + rc = xc_domain_memory_populate_physmap( > + dom->guest_xc, dom->guest_domid, allocsz, 0, > XENMEMF_address_bits(37) | XENMEMF_above_bits, &dom->p2m_host[i]); > + if( rc != 0 ) > + rc = xc_domain_memory_populate_physmap( > + dom->guest_xc, dom->guest_domid, allocsz, 0, 0, > &dom->p2m_host[i]); > + } > + else > + rc = xc_domain_memory_populate_physmap( > + dom->guest_xc, dom->guest_domid, allocsz, 0, 0, > &dom->p2m_host[i]); > + > } > } > > diff -r 059c01d69a08 xen/common/page_alloc.c > --- a/xen/common/page_alloc.c Thu Oct 29 14:48:28 2009 +0000 > +++ b/xen/common/page_alloc.c Mon Nov 02 15:37:42 2009 +0800 > @@ -1085,9 +1085,9 @@ > struct page_info *pg = NULL; > unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1; > unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1), dma_zone; > + unsigned int is_limit_lo = memflags & _MEMF_above_bit; > > ASSERT(!in_irq()); > - > if ( (node == NUMA_NO_NODE) && (d != NULL) ) > node = domain_to_node(d); > > @@ -1099,8 +1099,9 @@ > pg = alloc_heap_pages(dma_zone + 1, zone_hi, node, order, memflags); > > if ( (pg == NULL) && > - ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi, > - node, order, memflags)) == NULL) ) > + ((pg = alloc_heap_pages(is_limit_lo ? bits : (MEMZONE_XEN + 1), > + is_limit_lo ? min_t(unsigned int, > bits_to_zone(BITS_PER_LONG+PAGE_SHIFT),NR_ZONES-1) :zone_hi, > + node, order,memflags)) == NULL) ) > return NULL; > > if ( (d != NULL) && assign_pages(d, pg, order, memflags) ) > diff -r 059c01d69a08 xen/include/public/memory.h > --- a/xen/include/public/memory.h Thu Oct 29 14:48:28 2009 +0000 > +++ b/xen/include/public/memory.h Mon Nov 02 15:37:42 2009 +0800 > @@ -52,6 +52,8 @@ > #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) > /* Flag to populate physmap with populate-on-demand entries */ > #define XENMEMF_populate_on_demand (1<<16) > +/* Allocate the pages above the bits */ > +#define XENMEMF_above_bits (1<<7) > #endif > > struct xen_memory_reservation { > diff -r 059c01d69a08 xen/include/xen/mm.h > --- a/xen/include/xen/mm.h Thu Oct 29 14:48:28 2009 +0000 > +++ b/xen/include/xen/mm.h Mon Nov 02 15:37:42 2009 +0800 > @@ -81,6 +81,7 @@ > #define _MEMF_node 8 > #define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node) > #define _MEMF_bits 24 > +#define _MEMF_above_bit (1<<7) > #define MEMF_bits(n) ((n)<<_MEMF_bits) > > #ifdef CONFIG_PAGEALLOC_MAX_ORDER > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |