[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 4/8] libxenguest: restrict PV guest size
The P2M, the use of PFNs, and hence the maximum valid PFN are purely software constructs in PV. In principle a guest is free to use arbitrary PFNs. However, at least page table normalization requires that PFN space be, like MFN space, limited to the architectural 40 bits (52 address bits). And of course a 32-bit tool stack places further constraints. Bounding the values also makes sure that various subsequent calculations won't truncate values and then continue with inconsistencies (see e.g. fl_entries vs ctx->x86.pv.p2m_frames in map_p2m_tree()). While there correct an adjacent error message with wrong way round wording in restore code and another slightly malformed and misleading (off by one) one in core dumping code. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Integrate into series. --- In case the save/restore changes don't make it obvious enough: It escapes me why struct xc_sr_rec_x86_pv_p2m_frames has p2m_pfns[] with uint64_t element type but {start,end}_pfn both as uint32_t. Imo all three can sensibly only ever be of the same type. --- a/tools/include/xen-tools/libs.h +++ b/tools/include/xen-tools/libs.h @@ -13,6 +13,10 @@ #define ARRAY_SIZE(a) (sizeof(a) / sizeof(*a)) #endif +#ifndef sizeof_field +#define sizeof_field(type, field) sizeof(((type *)0)->field) +#endif + #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif --- a/tools/libs/guest/xg_core_x86.c +++ b/tools/libs/guest/xg_core_x86.c @@ -59,6 +59,43 @@ xc_core_arch_memory_map_get(xc_interface if ( xc_domain_nr_gpfns(xch, info->domid, &p2m_size) < 0 ) return -1; + if ( !p2m_size ) + { + ERROR("Cannot map a guest without P2M"); + errno = ENODATA; + return -1; + } + + if ( !info->hvm ) + { + unsigned int guest_width; + + if ( xc_domain_get_guest_width(xch, info->domid, &guest_width) != 0 ) + { + PERROR("Cannot get address size for PV guest"); + return -1; + } + + if ( p2m_size == (guest_width > 4 ? ~0UL : ~0U) ) + { + ERROR("Cannot map a PV guest with invalid P2M"); + errno = ENODATA; + return -1; + } + } + +#ifndef __i386__ + if ( (p2m_size - 1) >> 40 ) +#else + /* Very large domains (> 1TB) will exhaust virtual address space. */ + if ( (p2m_size - 1) >> 28 ) +#endif + { + ERROR("Cannot map a guest with P2M size %#lx", p2m_size); + errno = EOPNOTSUPP; + return -1; + } + map = malloc(sizeof(*map)); if ( map == NULL ) { @@ -333,10 +370,30 @@ xc_core_arch_map_p2m_rw(xc_interface *xc if ( dinfo->p2m_size < info->nr_pages ) { - ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1); + ERROR("p2m_size < nr_pages (%lx < %lx)", dinfo->p2m_size, info->nr_pages); goto out; } + if ( !info->hvm && dinfo->p2m_size == (dinfo->guest_width > 4 ? ~0UL : ~0U) ) + { + ERROR("Cannot r/%c-map a PV guest with invalid P2M", rw ? 'w' : 'o'); + errno = ENODATA; + return -1; + } + +#ifndef __i386__ + if ( (dinfo->p2m_size - 1) >> 40 ) +#else + /* Very large domains (> 1TB) will exhaust virtual address space. */ + if ( (dinfo->p2m_size - 1) >> 28 ) +#endif + { + ERROR("Cannot r/%c-map a guest with P2M size %#lx", + rw ? 'w' : 'o', dinfo->p2m_size); + errno = EOPNOTSUPP; + return -1; + } + p2m_cr3 = GET_FIELD(live_shinfo, arch.p2m_cr3, dinfo->guest_width); p2m_frame_list = p2m_cr3 ? xc_core_arch_map_p2m_list_rw(xch, dinfo, dom, live_shinfo, p2m_cr3) --- a/tools/libs/guest/xg_sr_restore_x86_pv.c +++ b/tools/libs/guest/xg_sr_restore_x86_pv.c @@ -709,10 +709,23 @@ static int handle_x86_pv_p2m_frames(stru return -1; } +#ifdef __i386__ + /* Very large domains (> 1TB) will exhaust virtual address space. */ + if ( data->end_pfn >> 28 ) +#elif 0 /* sizeof(data->end_pfn) > 4 */ + if ( data->end_pfn >> (ctx->x86.pv.width > 4 ? 40 : 32) ) +#else + if ( 0 ) +#endif + { + ERROR("End pfn in stream (%#x) too large", data->end_pfn); + return -1; + } + if ( data->start_pfn > data->end_pfn ) { - ERROR("End pfn in stream (%#x) exceeds Start (%#x)", - data->end_pfn, data->start_pfn); + ERROR("Start pfn in stream (%#x) exceeds End (%#x)", + data->start_pfn, data->end_pfn); return -1; } --- a/tools/libs/guest/xg_sr_save_x86_pv.c +++ b/tools/libs/guest/xg_sr_save_x86_pv.c @@ -464,11 +464,40 @@ static int map_p2m_list(struct xc_sr_con */ static int map_p2m(struct xc_sr_context *ctx) { + xc_interface *xch = ctx->xch; uint64_t p2m_cr3; + uint64_t max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn, + ctx->x86.pv.width); + + if ( !max_pfn ) + { + ERROR("Cannot save a guest without P2M"); + errno = ENODATA; + return -1; + } + + if ( max_pfn-- == (ctx->x86.pv.width > 4 ? ~0UL : ~0U) ) + { + ERROR("Cannot save a guest with invalid P2M"); + errno = ENODATA; + return -1; + } + +#ifndef __i386__ + if ( max_pfn >> (sizeof_field(struct xc_sr_rec_x86_pv_p2m_frames, + end_pfn) > 4 ? 40 : 32) ) +#else + /* Very large domains (> 1TB) will exhaust virtual address space. */ + if ( max_pfn >> 28 ) +#endif + { + ERROR("Cannot save a guest with maximum PFN %#"PRIx64, max_pfn); + errno = EOPNOTSUPP; + return -1; + } ctx->x86.pv.p2m_generation = ~0ULL; - ctx->x86.pv.max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn, - ctx->x86.pv.width) - 1; + ctx->x86.pv.max_pfn = max_pfn; p2m_cr3 = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_cr3, ctx->x86.pv.width); return p2m_cr3 ? map_p2m_list(ctx, p2m_cr3) : map_p2m_tree(ctx);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |