[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/memory: Fix mapping grant tables with XENMEM_acquire_resource
commit 34cc2e5f8dba6906da82fe8d76e839f9ab20f153 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Mon Jul 27 17:24:11 2020 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Fri Feb 5 17:37:27 2021 +0000 xen/memory: Fix mapping grant tables with XENMEM_acquire_resource A guest's default number of grant frames is 64, and XENMEM_acquire_resource will reject an attempt to map more than 32 frames. This limit is caused by the size of mfn_list[] on the stack. Fix mapping of arbitrary size requests by looping over batches of 32 in acquire_resource(), and using hypercall continuations when necessary. To start with, break _acquire_resource() out of acquire_resource() to cope with type-specific dispatching, and update the return semantics to indicate the number of mfns returned. Update gnttab_acquire_resource() and x86's arch_acquire_resource() to match these new semantics. Have do_memory_op() pass start_extent into acquire_resource() so it can pick up where it left off after a continuation, and loop over batches of 32 until all the work is done, or a continuation needs to occur. compat_memory_op() is a bit more complicated, because it also has to marshal frame_list in the XLAT buffer. Have it account for continuation information itself and hide details from the upper layer, so it can marshal the buffer in chunks if necessary. With these fixes in place, it is now possible to map the whole grant table for a guest. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Release-Acked-by: Ian Jackson <iwj@xxxxxxxxxxxxxx> --- xen/common/compat/memory.c | 114 +++++++++++++++++++++++++++++++++-------- xen/common/grant_table.c | 3 ++ xen/common/memory.c | 124 +++++++++++++++++++++++++++++++++------------ 3 files changed, 187 insertions(+), 54 deletions(-) diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c index 834c5e19d1..c43fa97cf1 100644 --- a/xen/common/compat/memory.c +++ b/xen/common/compat/memory.c @@ -402,23 +402,10 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) case XENMEM_acquire_resource: { xen_pfn_t *xen_frame_list = NULL; - unsigned int max_nr_frames; if ( copy_from_guest(&cmp.mar, compat, 1) ) return -EFAULT; - /* - * The number of frames handled is currently limited to a - * small number by the underlying implementation, so the - * scratch space should be sufficient for bouncing the - * frame addresses. - */ - max_nr_frames = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) / - sizeof(*xen_frame_list); - - if ( cmp.mar.nr_frames > max_nr_frames ) - return -E2BIG; - /* Marshal the frame list in the remainder of the xlat space. */ if ( !compat_handle_is_null(cmp.mar.frame_list) ) xen_frame_list = (xen_pfn_t *)(nat.mar + 1); @@ -432,6 +419,28 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) if ( xen_frame_list && cmp.mar.nr_frames ) { + unsigned int xlat_max_frames = + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) / + sizeof(*xen_frame_list); + + if ( start_extent >= cmp.mar.nr_frames ) + return -EINVAL; + + /* + * Adjust nat to account for work done on previous + * continuations, leaving cmp pristine. Hide the continaution + * from the native code to prevent double accounting. + */ + nat.mar->nr_frames -= start_extent; + nat.mar->frame += start_extent; + cmd &= MEMOP_CMD_MASK; + + /* + * If there are two many frames to fit within the xlat buffer, + * we'll need to loop to marshal them all. + */ + nat.mar->nr_frames = min(nat.mar->nr_frames, xlat_max_frames); + /* * frame_list is an input for translated guests, and an output * for untranslated guests. Only copy in for translated guests. @@ -444,14 +453,14 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) cmp.mar.nr_frames) || __copy_from_compat_offset( compat_frame_list, cmp.mar.frame_list, - 0, cmp.mar.nr_frames) ) + start_extent, nat.mar->nr_frames) ) return -EFAULT; /* * Iterate backwards over compat_frame_list[] expanding * compat_pfn_t to xen_pfn_t in place. */ - for ( int x = cmp.mar.nr_frames - 1; x >= 0; --x ) + for ( int x = nat.mar->nr_frames - 1; x >= 0; --x ) xen_frame_list[x] = compat_frame_list[x]; } } @@ -600,9 +609,11 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) case XENMEM_acquire_resource: { DEFINE_XEN_GUEST_HANDLE(compat_mem_acquire_resource_t); + unsigned int done; if ( compat_handle_is_null(cmp.mar.frame_list) ) { + ASSERT(split == 0 && rc == 0); if ( __copy_field_to_guest( guest_handle_cast(compat, compat_mem_acquire_resource_t), @@ -611,6 +622,21 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) break; } + if ( split < 0 ) + { + /* Continuation occurred. */ + ASSERT(rc != XENMEM_acquire_resource); + done = cmd >> MEMOP_EXTENT_SHIFT; + } + else + { + /* No continuation. */ + ASSERT(rc == 0); + done = nat.mar->nr_frames; + } + + ASSERT(done <= nat.mar->nr_frames); + /* * frame_list is an input for translated guests, and an output for * untranslated guests. Only copy out for untranslated guests. @@ -626,21 +652,67 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) */ BUILD_BUG_ON(sizeof(compat_pfn_t) > sizeof(xen_pfn_t)); - for ( i = 0; i < cmp.mar.nr_frames; i++ ) + rc = 0; + for ( i = 0; i < done; i++ ) { compat_pfn_t frame = xen_frame_list[i]; if ( frame != xen_frame_list[i] ) - return -ERANGE; + { + rc = -ERANGE; + break; + } compat_frame_list[i] = frame; } - if ( __copy_to_compat_offset(cmp.mar.frame_list, 0, - compat_frame_list, - cmp.mar.nr_frames) ) - return -EFAULT; + if ( !rc && __copy_to_compat_offset( + cmp.mar.frame_list, start_extent, + compat_frame_list, done) ) + rc = -EFAULT; + + if ( rc ) + { + if ( split < 0 ) + { + gdprintk(XENLOG_ERR, + "Cannot cancel continuation: %ld\n", rc); + domain_crash(current->domain); + } + return rc; + } + } + + start_extent += done; + + /* Completely done. */ + if ( start_extent == cmp.mar.nr_frames ) + break; + + /* + * Done a "full" batch, but we were limited by space in the xlat + * area. Go around the loop again without necesserily returning + * to guest context. + */ + if ( done == nat.mar->nr_frames ) + { + split = 1; + break; } + + /* Explicit continuation request from a higher level. */ + if ( done < nat.mar->nr_frames ) + return hypercall_create_continuation( + __HYPERVISOR_memory_op, "ih", + op | (start_extent << MEMOP_EXTENT_SHIFT), compat); + + /* + * Well... Somethings gone wrong with the two levels of chunking. + * My condolences to whomever next has to debug this mess. + */ + ASSERT_UNREACHABLE(); + domain_crash(current->domain); + split = 0; break; } diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 280b7969b6..b95403695f 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -4053,6 +4053,9 @@ int gnttab_acquire_resource( for ( i = 0; i < nr_frames; ++i ) mfn_list[i] = virt_to_mfn(vaddrs[frame + i]); + /* Success. Passed nr_frames back to the caller. */ + rc = nr_frames; + out: grant_write_unlock(gt); diff --git a/xen/common/memory.c b/xen/common/memory.c index f23b001fd2..7b012ce291 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1129,23 +1129,41 @@ static int acquire_ioreq_server(struct domain *d, mfn_list[i] = mfn_x(mfn); } - return 0; + /* Success. Passed nr_frames back to the caller. */ + return nr_frames; #else return -EOPNOTSUPP; #endif } +/* + * Returns -errno on error, or positive in the range [1, nr_frames] on + * success. Returning less than nr_frames contitutes a request for a + * continuation. Callers can depend on frame + nr_frames not overflowing. + */ +static int _acquire_resource( + struct domain *d, unsigned int type, unsigned int id, unsigned int frame, + unsigned int nr_frames, xen_pfn_t mfn_list[]) +{ + switch ( type ) + { + case XENMEM_resource_grant_table: + return gnttab_acquire_resource(d, id, frame, nr_frames, mfn_list); + + case XENMEM_resource_ioreq_server: + return acquire_ioreq_server(d, id, frame, nr_frames, mfn_list); + + default: + return -EOPNOTSUPP; + } +} + static int acquire_resource( - XEN_GUEST_HANDLE_PARAM(xen_mem_acquire_resource_t) arg) + XEN_GUEST_HANDLE_PARAM(xen_mem_acquire_resource_t) arg, + unsigned long start_extent) { struct domain *d, *currd = current->domain; xen_mem_acquire_resource_t xmar; - /* - * The mfn_list and gfn_list (below) arrays are ok on stack for the - * moment since they are small, but if they need to grow in future - * use-cases then per-CPU arrays or heap allocations may be required. - */ - xen_pfn_t mfn_list[32]; unsigned int max_frames; int rc; @@ -1158,9 +1176,6 @@ static int acquire_resource( if ( xmar.pad != 0 ) return -EINVAL; - if ( xmar.nr_frames > ARRAY_SIZE(mfn_list) ) - return -E2BIG; - /* * The ABI is rather unfortunate. nr_frames (and therefore the total size * of the resource) is 32bit, while frame (the offset within the resource @@ -1190,7 +1205,7 @@ static int acquire_resource( if ( guest_handle_is_null(xmar.frame_list) ) { - if ( xmar.nr_frames ) + if ( xmar.nr_frames || start_extent ) goto out; xmar.nr_frames = max_frames; @@ -1198,30 +1213,47 @@ static int acquire_resource( goto out; } - do { - switch ( xmar.type ) - { - case XENMEM_resource_grant_table: - rc = gnttab_acquire_resource(d, xmar.id, xmar.frame, xmar.nr_frames, - mfn_list); - break; + /* + * Limiting nr_frames at (UINT_MAX >> MEMOP_EXTENT_SHIFT) isn't ideal. If + * it ever becomes a practical problem, we can switch to mutating + * xmar.{frame,nr_frames,frame_list} in guest memory. + */ + rc = -EINVAL; + if ( start_extent >= xmar.nr_frames || + xmar.nr_frames > (UINT_MAX >> MEMOP_EXTENT_SHIFT) ) + goto out; - case XENMEM_resource_ioreq_server: - rc = acquire_ioreq_server(d, xmar.id, xmar.frame, xmar.nr_frames, - mfn_list); - break; + /* Adjust for work done on previous continuations. */ + xmar.nr_frames -= start_extent; + xmar.frame += start_extent; + guest_handle_add_offset(xmar.frame_list, start_extent); - default: - rc = -EOPNOTSUPP; - break; - } + do { + /* + * Arbitrary size. Not too much stack space, and a reasonable stride + * for continuation checks. + */ + xen_pfn_t mfn_list[32]; + unsigned int todo = MIN(ARRAY_SIZE(mfn_list), xmar.nr_frames), done; - if ( rc ) + rc = _acquire_resource(d, xmar.type, xmar.id, xmar.frame, + todo, mfn_list); + if ( rc < 0 ) + goto out; + + done = rc; + rc = 0; + if ( done == 0 || done > todo ) + { + ASSERT_UNREACHABLE(); + rc = -EINVAL; goto out; + } + /* Adjust guest frame_list appropriately. */ if ( !paging_mode_translate(currd) ) { - if ( copy_to_guest(xmar.frame_list, mfn_list, xmar.nr_frames) ) + if ( copy_to_guest(xmar.frame_list, mfn_list, done) ) rc = -EFAULT; } else @@ -1229,10 +1261,10 @@ static int acquire_resource( xen_pfn_t gfn_list[ARRAY_SIZE(mfn_list)]; unsigned int i; - if ( copy_from_guest(gfn_list, xmar.frame_list, xmar.nr_frames) ) + if ( copy_from_guest(gfn_list, xmar.frame_list, done) ) rc = -EFAULT; - for ( i = 0; !rc && i < xmar.nr_frames; i++ ) + for ( i = 0; !rc && i < done; i++ ) { rc = set_foreign_p2m_entry(currd, d, gfn_list[i], _mfn(mfn_list[i])); @@ -1241,7 +1273,32 @@ static int acquire_resource( rc = -EIO; } } - } while ( 0 ); + + if ( rc ) + goto out; + + xmar.nr_frames -= done; + xmar.frame += done; + guest_handle_add_offset(xmar.frame_list, done); + start_extent += done; + + /* + * Explicit continuation request from _acquire_resource(), or we've + * still got more work to do. + */ + if ( done < todo || + (xmar.nr_frames && hypercall_preempt_check()) ) + { + rc = hypercall_create_continuation( + __HYPERVISOR_memory_op, "lh", + XENMEM_acquire_resource | (start_extent << MEMOP_EXTENT_SHIFT), + arg); + goto out; + } + + } while ( xmar.nr_frames ); + + rc = 0; out: rcu_unlock_domain(d); @@ -1708,7 +1765,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) case XENMEM_acquire_resource: rc = acquire_resource( - guest_handle_cast(arg, xen_mem_acquire_resource_t)); + guest_handle_cast(arg, xen_mem_acquire_resource_t), + start_extent); break; default: -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |