[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 9/9] libxc: create p2m list outside of kernel mapping if supported
In case the kernel of a new pv-domU indicates it is supporting a p2m list outside the initial kernel mapping by specifying INIT_P2M, let the domain builder allocate the memory for the p2m list from physical guest memory only and map it to the address the kernel is expecting. This will enable loading pv-domUs larger than 512 GB. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- tools/libxc/include/xc_dom.h | 1 + tools/libxc/xc_dom_core.c | 15 ++++++++++- tools/libxc/xc_dom_x86.c | 59 ++++++++++++++++++++++++++++++++++++++------ 3 files changed, 67 insertions(+), 8 deletions(-) diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h index 79d830f..9c000ca 100644 --- a/tools/libxc/include/xc_dom.h +++ b/tools/libxc/include/xc_dom.h @@ -239,6 +239,7 @@ struct xc_dom_arch { char *native_protocol; int page_shift; int sizeof_pfn; + int p2m_base_supported; int arch_private_size; struct xc_dom_arch *next; diff --git a/tools/libxc/xc_dom_core.c b/tools/libxc/xc_dom_core.c index 18b98c9..cfee598 100644 --- a/tools/libxc/xc_dom_core.c +++ b/tools/libxc/xc_dom_core.c @@ -777,6 +777,7 @@ struct xc_dom_image *xc_dom_allocate(xc_interface *xch, dom->parms.virt_hypercall = UNSET_ADDR; dom->parms.virt_hv_start_low = UNSET_ADDR; dom->parms.elf_paddr_offset = UNSET_ADDR; + dom->parms.p2m_base = UNSET_ADDR; dom->alloc_malloc += sizeof(*dom); return dom; @@ -1096,7 +1097,11 @@ int xc_dom_build_image(struct xc_dom_image *dom) } /* allocate other pages */ - if ( dom->arch_hooks->alloc_p2m_list && + if ( !dom->arch_hooks->p2m_base_supported || + dom->parms.p2m_base >= dom->parms.virt_base || + (dom->parms.p2m_base & (XC_DOM_PAGE_SIZE(dom) - 1)) ) + dom->parms.p2m_base = UNSET_ADDR; + if ( dom->arch_hooks->alloc_p2m_list && dom->parms.p2m_base == UNSET_ADDR && dom->arch_hooks->alloc_p2m_list(dom) != 0 ) goto err; if ( dom->arch_hooks->alloc_magic_pages(dom) != 0 ) @@ -1124,6 +1129,14 @@ int xc_dom_build_image(struct xc_dom_image *dom) dom->initrd_len = page_size * dom->ramdisk_seg.pages; } + /* Allocate p2m list if outside of initial kernel mapping. */ + if ( dom->arch_hooks->alloc_p2m_list && dom->parms.p2m_base != UNSET_ADDR ) + { + if ( dom->arch_hooks->alloc_p2m_list(dom) != 0 ) + goto err; + dom->p2m_seg.vstart = dom->parms.p2m_base; + } + return 0; err: diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index 333ef6b..0847761 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -68,6 +68,8 @@ #define bits_to_mask(bits) (((xen_vaddr_t)1 << (bits)) - 1) #define round_down(addr, mask) ((addr) & ~(mask)) #define round_up(addr, mask) ((addr) | (mask)) +#define round_pg(addr) (((addr) + PAGE_SIZE_X86 - 1) & ~(PAGE_SIZE_X86 - 1)) +#define round_pfn(addr) (((addr) + PAGE_SIZE_X86 - 1) / PAGE_SIZE_X86) struct xc_dom_x86_mapping_lvl { xen_vaddr_t from; @@ -84,7 +86,7 @@ struct xc_dom_x86_mapping { struct xc_dom_image_x86 { unsigned n_mappings; -#define MAPPING_MAX 1 +#define MAPPING_MAX 2 struct xc_dom_x86_mapping maps[MAPPING_MAX]; }; @@ -536,6 +538,7 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom) } } } + return 0; pfn_error: @@ -551,11 +554,8 @@ pfn_error: /* ------------------------------------------------------------------------ */ -static int alloc_p2m_list(struct xc_dom_image *dom) +static int alloc_p2m_list(struct xc_dom_image *dom, size_t p2m_alloc_size) { - size_t p2m_alloc_size = dom->p2m_size * dom->arch_hooks->sizeof_pfn; - - /* allocate phys2mach table */ if ( xc_dom_alloc_segment(dom, &dom->p2m_seg, "phys2mach", 0, p2m_alloc_size) ) return -1; @@ -566,6 +566,41 @@ static int alloc_p2m_list(struct xc_dom_image *dom) return 0; } +static int alloc_p2m_list_x86_32(struct xc_dom_image *dom) +{ + size_t p2m_alloc_size = dom->p2m_size * dom->arch_hooks->sizeof_pfn; + + p2m_alloc_size = round_pg(p2m_alloc_size); + return alloc_p2m_list(dom, p2m_alloc_size); +} + +static int alloc_p2m_list_x86_64(struct xc_dom_image *dom) +{ + struct xc_dom_image_x86 *domx86 = dom->arch_private; + struct xc_dom_x86_mapping *map = domx86->maps + domx86->n_mappings; + size_t p2m_alloc_size = dom->p2m_size * dom->arch_hooks->sizeof_pfn; + xen_vaddr_t from, to; + unsigned lvl; + + p2m_alloc_size = round_pg(p2m_alloc_size); + if ( dom->parms.p2m_base != UNSET_ADDR ) + { + from = dom->parms.p2m_base; + to = from + p2m_alloc_size - 1; + if ( count_pgtables(dom, 0, pgtblshift_x86_64, from, to, + dom->pfn_alloc_end) ) + return -1; + + map->area.pfn = dom->pfn_alloc_end; + for ( lvl = 0; lvl < 4; lvl++ ) + map->lvls[lvl].pfn += p2m_alloc_size >> PAGE_SHIFT_X86; + domx86->n_mappings++; + p2m_alloc_size += map->area.pgtables << PAGE_SHIFT_X86; + } + + return alloc_p2m_list(dom, p2m_alloc_size); +} + /* ------------------------------------------------------------------------ */ static int alloc_magic_pages(struct xc_dom_image *dom) @@ -784,6 +819,11 @@ static int start_info_x86_64(struct xc_dom_image *dom) start_info->pt_base = dom->pgtables_seg.vstart; start_info->nr_pt_frames = domx86->maps[0].area.pgtables; start_info->mfn_list = dom->p2m_seg.vstart; + if ( dom->parms.p2m_base != UNSET_ADDR ) + { + start_info->first_p2m_pfn = dom->p2m_seg.pfn; + start_info->nr_p2m_frames = dom->p2m_seg.pages; + } start_info->flags = dom->flags; start_info->store_mfn = xc_dom_p2m(dom, dom->xenstore_pfn); @@ -1671,7 +1711,10 @@ static int bootlate_pv(struct xc_dom_image *dom) if ( !xc_dom_feature_translated(dom) ) { /* paravirtualized guest */ + + /* Drop references to all initial page tables before pinning. */ xc_dom_unmap_one(dom, dom->pgtables_seg.pfn); + xc_dom_unmap_one(dom, dom->p2m_seg.pfn); rc = pin_table(dom->xch, pgd_type, xc_dom_p2m(dom, dom->pgtables_seg.pfn), dom->guest_domid); @@ -1750,10 +1793,11 @@ static struct xc_dom_arch xc_dom_32_pae = { .native_protocol = XEN_IO_PROTO_ABI_X86_32, .page_shift = PAGE_SHIFT_X86, .sizeof_pfn = 4, + .p2m_base_supported = 0, .arch_private_size = sizeof(struct xc_dom_image_x86), .alloc_magic_pages = alloc_magic_pages, .alloc_pgtables = alloc_pgtables_x86_32_pae, - .alloc_p2m_list = alloc_p2m_list, + .alloc_p2m_list = alloc_p2m_list_x86_32, .setup_pgtables = setup_pgtables_x86_32_pae, .start_info = start_info_x86_32, .shared_info = shared_info_x86_32, @@ -1768,10 +1812,11 @@ static struct xc_dom_arch xc_dom_64 = { .native_protocol = XEN_IO_PROTO_ABI_X86_64, .page_shift = PAGE_SHIFT_X86, .sizeof_pfn = 8, + .p2m_base_supported = 1, .arch_private_size = sizeof(struct xc_dom_image_x86), .alloc_magic_pages = alloc_magic_pages, .alloc_pgtables = alloc_pgtables_x86_64, - .alloc_p2m_list = alloc_p2m_list, + .alloc_p2m_list = alloc_p2m_list_x86_64, .setup_pgtables = setup_pgtables_x86_64, .start_info = start_info_x86_64, .shared_info = shared_info_x86_64, -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |