[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] libxc: unify handling of vNUMA layout
commit e1e566ca08b274cb1822f9faafdc9cabfba1ffe1 Author: Wei Liu <wei.liu2@xxxxxxxxxx> AuthorDate: Thu Jun 4 11:23:01 2015 +0100 Commit: Ian Campbell <ian.campbell@xxxxxxxxxx> CommitDate: Wed Jun 17 11:29:56 2015 +0100 libxc: unify handling of vNUMA layout This patch does the following: 1. Use local variables for dummy vNUMA layout in PV case. 2. Avoid leaking dummy layout back to caller in PV case. 3. Use local variables to reference vNUMA layout (whether it is dummy or provided by caller) for both PV and HVM. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- tools/libxc/xc_dom_x86.c | 49 +++++++++++++++++++++++-------------- tools/libxc/xc_hvm_build_x86.c | 52 ++++++++++++++++++++-------------------- 2 files changed, 56 insertions(+), 45 deletions(-) diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index 783f749..3d2fbd5 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -762,6 +762,11 @@ int arch_setup_meminit(struct xc_dom_image *dom) int rc; xen_pfn_t pfn, allocsz, mfn, total, pfn_base; int i, j; + xen_vmemrange_t dummy_vmemrange[1]; + unsigned int dummy_vnode_to_pnode[1]; + xen_vmemrange_t *vmemranges; + unsigned int *vnode_to_pnode; + unsigned int nr_vmemranges, nr_vnodes; rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type); if ( rc ) @@ -826,27 +831,33 @@ int arch_setup_meminit(struct xc_dom_image *dom) */ if ( dom->nr_vmemranges == 0 ) { - dom->nr_vmemranges = 1; - dom->vmemranges = xc_dom_malloc(dom, sizeof(*dom->vmemranges)); - dom->vmemranges[0].start = 0; - dom->vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT; - dom->vmemranges[0].flags = 0; - dom->vmemranges[0].nid = 0; - - dom->nr_vnodes = 1; - dom->vnode_to_pnode = xc_dom_malloc(dom, - sizeof(*dom->vnode_to_pnode)); - dom->vnode_to_pnode[0] = XC_NUMA_NO_NODE; + nr_vmemranges = 1; + vmemranges = dummy_vmemrange; + vmemranges[0].start = 0; + vmemranges[0].end = (uint64_t)dom->total_pages << PAGE_SHIFT; + vmemranges[0].flags = 0; + vmemranges[0].nid = 0; + + nr_vnodes = 1; + vnode_to_pnode = dummy_vnode_to_pnode; + vnode_to_pnode[0] = XC_NUMA_NO_NODE; + } + else + { + nr_vmemranges = dom->nr_vmemranges; + nr_vnodes = dom->nr_vnodes; + vmemranges = dom->vmemranges; + vnode_to_pnode = dom->vnode_to_pnode; } total = dom->p2m_size = 0; - for ( i = 0; i < dom->nr_vmemranges; i++ ) + for ( i = 0; i < nr_vmemranges; i++ ) { - total += ((dom->vmemranges[i].end - dom->vmemranges[i].start) + total += ((vmemranges[i].end - vmemranges[i].start) >> PAGE_SHIFT); dom->p2m_size = - dom->p2m_size > (dom->vmemranges[i].end >> PAGE_SHIFT) ? - dom->p2m_size : (dom->vmemranges[i].end >> PAGE_SHIFT); + dom->p2m_size > (vmemranges[i].end >> PAGE_SHIFT) ? + dom->p2m_size : (vmemranges[i].end >> PAGE_SHIFT); } if ( total != dom->total_pages ) { @@ -864,19 +875,19 @@ int arch_setup_meminit(struct xc_dom_image *dom) dom->p2m_host[pfn] = INVALID_P2M_ENTRY; /* allocate guest memory */ - for ( i = 0; i < dom->nr_vmemranges; i++ ) + for ( i = 0; i < nr_vmemranges; i++ ) { unsigned int memflags; uint64_t pages; - unsigned int pnode = dom->vnode_to_pnode[dom->vmemranges[i].nid]; + unsigned int pnode = vnode_to_pnode[vmemranges[i].nid]; memflags = 0; if ( pnode != XC_NUMA_NO_NODE ) memflags |= XENMEMF_exact_node(pnode); - pages = (dom->vmemranges[i].end - dom->vmemranges[i].start) + pages = (vmemranges[i].end - vmemranges[i].start) >> PAGE_SHIFT; - pfn_base = dom->vmemranges[i].start >> PAGE_SHIFT; + pfn_base = vmemranges[i].start >> PAGE_SHIFT; for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ ) dom->p2m_host[pfn] = pfn; diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c index 0e98c84..003ea06 100644 --- a/tools/libxc/xc_hvm_build_x86.c +++ b/tools/libxc/xc_hvm_build_x86.c @@ -257,7 +257,9 @@ static int setup_guest(xc_interface *xch, uint64_t total_pages; xen_vmemrange_t dummy_vmemrange[2]; unsigned int dummy_vnode_to_pnode[1]; - bool use_dummy = false; + xen_vmemrange_t *vmemranges; + unsigned int *vnode_to_pnode; + unsigned int nr_vmemranges, nr_vnodes; memset(&elf, 0, sizeof(elf)); if ( elf_init(&elf, image, image_size) != 0 ) @@ -290,7 +292,7 @@ static int setup_guest(xc_interface *xch, dummy_vmemrange[0].end = args->lowmem_end; dummy_vmemrange[0].flags = 0; dummy_vmemrange[0].nid = 0; - args->nr_vmemranges = 1; + nr_vmemranges = 1; if ( args->highmem_end > (1ULL << 32) ) { @@ -299,14 +301,13 @@ static int setup_guest(xc_interface *xch, dummy_vmemrange[1].flags = 0; dummy_vmemrange[1].nid = 0; - args->nr_vmemranges++; + nr_vmemranges++; } dummy_vnode_to_pnode[0] = XC_NUMA_NO_NODE; - args->nr_vnodes = 1; - args->vmemranges = dummy_vmemrange; - args->vnode_to_pnode = dummy_vnode_to_pnode; - use_dummy = true; + nr_vnodes = 1; + vmemranges = dummy_vmemrange; + vnode_to_pnode = dummy_vnode_to_pnode; } else { @@ -315,16 +316,21 @@ static int setup_guest(xc_interface *xch, PERROR("Cannot enable vNUMA and PoD at the same time"); goto error_out; } + + nr_vmemranges = args->nr_vmemranges; + nr_vnodes = args->nr_vnodes; + vmemranges = args->vmemranges; + vnode_to_pnode = args->vnode_to_pnode; } total_pages = 0; p2m_size = 0; - for ( i = 0; i < args->nr_vmemranges; i++ ) + for ( i = 0; i < nr_vmemranges; i++ ) { - total_pages += ((args->vmemranges[i].end - args->vmemranges[i].start) + total_pages += ((vmemranges[i].end - vmemranges[i].start) >> PAGE_SHIFT); - p2m_size = p2m_size > (args->vmemranges[i].end >> PAGE_SHIFT) ? - p2m_size : (args->vmemranges[i].end >> PAGE_SHIFT); + p2m_size = p2m_size > (vmemranges[i].end >> PAGE_SHIFT) ? + p2m_size : (vmemranges[i].end >> PAGE_SHIFT); } if ( total_pages != (args->mem_size >> PAGE_SHIFT) ) @@ -360,12 +366,12 @@ static int setup_guest(xc_interface *xch, for ( i = 0; i < p2m_size; i++ ) page_array[i] = ((xen_pfn_t)-1); - for ( vmemid = 0; vmemid < args->nr_vmemranges; vmemid++ ) + for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ ) { uint64_t pfn; - for ( pfn = args->vmemranges[vmemid].start >> PAGE_SHIFT; - pfn < args->vmemranges[vmemid].end >> PAGE_SHIFT; + for ( pfn = vmemranges[vmemid].start >> PAGE_SHIFT; + pfn < vmemranges[vmemid].end >> PAGE_SHIFT; pfn++ ) page_array[pfn] = pfn; } @@ -417,29 +423,29 @@ static int setup_guest(xc_interface *xch, xch, dom, 0xa0, 0, memflags, &page_array[0x00]); stat_normal_pages = 0; - for ( vmemid = 0; vmemid < args->nr_vmemranges; vmemid++ ) + for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ ) { unsigned int new_memflags = memflags; uint64_t end_pages; - unsigned int vnode = args->vmemranges[vmemid].nid; - unsigned int pnode = args->vnode_to_pnode[vnode]; + unsigned int vnode = vmemranges[vmemid].nid; + unsigned int pnode = vnode_to_pnode[vnode]; if ( pnode != XC_NUMA_NO_NODE ) new_memflags |= XENMEMF_exact_node(pnode); - end_pages = args->vmemranges[vmemid].end >> PAGE_SHIFT; + end_pages = vmemranges[vmemid].end >> PAGE_SHIFT; /* * Consider vga hole belongs to the vmemrange that covers * 0xA0000-0xC0000. Note that 0x00000-0xA0000 is populated just * before this loop. */ - if ( args->vmemranges[vmemid].start == 0 ) + if ( vmemranges[vmemid].start == 0 ) { cur_pages = 0xc0; stat_normal_pages += 0xc0; } else - cur_pages = args->vmemranges[vmemid].start >> PAGE_SHIFT; + cur_pages = vmemranges[vmemid].start >> PAGE_SHIFT; while ( (rc == 0) && (end_pages > cur_pages) ) { @@ -679,12 +685,6 @@ static int setup_guest(xc_interface *xch, error_out: rc = -1; out: - if ( use_dummy ) - { - args->nr_vnodes = 0; - args->vmemranges = NULL; - args->vnode_to_pnode = NULL; - } if ( elf_check_broken(&elf) ) ERROR("HVM ELF broken: %s", elf_check_broken(&elf)); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |