[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 03/19] libxc: allocate memory with vNUMA information for PV guest
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Cc: Elena Ufimtseva <ufimtseva@xxxxxxxxx> --- tools/libxc/include/xc_dom.h | 5 +++ tools/libxc/xc_dom_x86.c | 72 +++++++++++++++++++++++++++++++++++------- tools/libxc/xc_private.h | 2 ++ 3 files changed, 68 insertions(+), 11 deletions(-) diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h index 6ae6a9f..eb8e2ce 100644 --- a/tools/libxc/include/xc_dom.h +++ b/tools/libxc/include/xc_dom.h @@ -162,6 +162,11 @@ struct xc_dom_image { struct xc_dom_loader *kernel_loader; void *private_loader; + /* vNUMA information */ + unsigned int *vnode_to_pnode; + uint64_t *vnode_size; + unsigned int nr_vnodes; + /* kernel loader */ struct xc_dom_arch *arch_hooks; /* allocate up to virt_alloc_end */ diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index bf06fe4..3286232 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -759,7 +759,8 @@ static int x86_shadow(xc_interface *xch, domid_t domid) int arch_setup_meminit(struct xc_dom_image *dom) { int rc; - xen_pfn_t pfn, allocsz, i, j, mfn; + xen_pfn_t pfn, allocsz, mfn, total, pfn_base; + int i, j; rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type); if ( rc ) @@ -811,18 +812,67 @@ int arch_setup_meminit(struct xc_dom_image *dom) /* setup initial p2m */ for ( pfn = 0; pfn < dom->total_pages; pfn++ ) dom->p2m_host[pfn] = pfn; - + + /* setup dummy vNUMA information if it's not provided */ + if ( dom->nr_vnodes == 0 ) + { + dom->nr_vnodes = 1; + dom->vnode_to_pnode = xc_dom_malloc(dom, + sizeof(*dom->vnode_to_pnode)); + dom->vnode_to_pnode[0] = XC_VNUMA_NO_NODE; + dom->vnode_size = xc_dom_malloc(dom, sizeof(*dom->vnode_size)); + dom->vnode_size[0] = ((dom->total_pages << PAGE_SHIFT) >> 20); + } + + total = 0; + for ( i = 0; i < dom->nr_vnodes; i++ ) + total += ((dom->vnode_size[i] << 20) >> PAGE_SHIFT); + if ( total != dom->total_pages ) + { + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, + "%s: number of pages requested by vNUMA (0x%"PRIpfn") mismatches number of pages configured for domain (0x%"PRIpfn")\n", + __FUNCTION__, total, dom->total_pages); + return -EINVAL; + } + /* allocate guest memory */ - for ( i = rc = allocsz = 0; - (i < dom->total_pages) && !rc; - i += allocsz ) + pfn_base = 0; + for ( i = 0; i < dom->nr_vnodes; i++ ) { - allocsz = dom->total_pages - i; - if ( allocsz > 1024*1024 ) - allocsz = 1024*1024; - rc = xc_domain_populate_physmap_exact( - dom->xch, dom->guest_domid, allocsz, - 0, 0, &dom->p2m_host[i]); + unsigned int memflags; + uint64_t pages; + + memflags = 0; + if ( dom->vnode_to_pnode[i] != XC_VNUMA_NO_NODE ) + { + memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[i]); + memflags |= XENMEMF_exact_node_request; + } + + pages = (dom->vnode_size[i] << 20) >> PAGE_SHIFT; + + for ( j = 0; j < pages; j += allocsz ) + { + allocsz = pages - j; + if ( allocsz > 1024*1024 ) + allocsz = 1024*1024; + + rc = xc_domain_populate_physmap_exact(dom->xch, + dom->guest_domid, allocsz, 0, memflags, + &dom->p2m_host[pfn_base+j]); + + if ( rc ) + { + if ( dom->vnode_to_pnode[i] != XC_VNUMA_NO_NODE ) + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, + "%s: fail to allocate 0x%"PRIx64" pages for vnode %d on pnode %d out of 0x%"PRIpfn"\n", + __FUNCTION__, pages, i, + dom->vnode_to_pnode[i], dom->total_pages); + return rc; + } + } + + pfn_base += pages; } /* Ensure no unclaimed pages are left unused. diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h index 45b8644..1809674 100644 --- a/tools/libxc/xc_private.h +++ b/tools/libxc/xc_private.h @@ -35,6 +35,8 @@ #include <xen/sys/privcmd.h> +#define XC_VNUMA_NO_NODE (~0U) + #if defined(HAVE_VALGRIND_MEMCHECK_H) && !defined(NDEBUG) && !defined(__MINIOS__) /* Compile in Valgrind client requests? */ #include <valgrind/memcheck.h> -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |