|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 09/11] xen/memory: Fix mapping grant tables with XENMEM_acquire_resource
On 22.09.2020 20:24, Andrew Cooper wrote:
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4632,7 +4632,6 @@ int arch_acquire_resource(struct domain *d, unsigned
> int type,
> if ( id != (unsigned int)ioservid )
> break;
>
> - rc = 0;
> for ( i = 0; i < nr_frames; i++ )
> {
> mfn_t mfn;
> @@ -4643,6 +4642,9 @@ int arch_acquire_resource(struct domain *d, unsigned
> int type,
>
> mfn_list[i] = mfn_x(mfn);
> }
> + if ( i == nr_frames )
> + /* Success. Passed nr_frames back to the caller. */
> + rc = nr_frames;
With this, shouldn't the return type of the function be changed to
"long"? I realize that's no an issue with XENMEM_resource_ioreq_server
specifically, but I mean the general case.
> --- a/xen/common/compat/memory.c
> +++ b/xen/common/compat/memory.c
> @@ -402,23 +402,10 @@ int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
> case XENMEM_acquire_resource:
> {
> xen_pfn_t *xen_frame_list = NULL;
> - unsigned int max_nr_frames;
>
> if ( copy_from_guest(&cmp.mar, compat, 1) )
> return -EFAULT;
>
> - /*
> - * The number of frames handled is currently limited to a
> - * small number by the underlying implementation, so the
> - * scratch space should be sufficient for bouncing the
> - * frame addresses.
> - */
> - max_nr_frames = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) /
> - sizeof(*xen_frame_list);
> -
> - if ( cmp.mar.nr_frames > max_nr_frames )
> - return -E2BIG;
> -
> /* Marshal the frame list in the remainder of the xlat space. */
> if ( !compat_handle_is_null(cmp.mar.frame_list) )
> xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
> @@ -432,6 +419,28 @@ int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
>
> if ( xen_frame_list && cmp.mar.nr_frames )
> {
> + unsigned int xlat_max_frames =
> + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) /
> + sizeof(*xen_frame_list);
> +
> + if ( start_extent >= nat.mar->nr_frames )
> + return -EINVAL;
Like for patch 2, I don't see why the == case should result in an
error, at the very least when start_extent is zero.
> @@ -611,6 +622,21 @@ int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
> break;
> }
>
> + if ( split < 0 )
> + {
> + /* Contintuation occured. */
Nit: Stray 't'. And missing 'r'?
> @@ -636,15 +662,45 @@ int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
> compat_frame_list[i] = frame;
> }
>
> - if ( __copy_to_compat_offset(cmp.mar.frame_list, 0,
> - compat_frame_list,
> - cmp.mar.nr_frames) )
> + if ( __copy_to_compat_offset(
> + cmp.mar.frame_list, start_extent,
> + compat_frame_list, done) )
> return -EFAULT;
> }
> - break;
> +
> + start_extent += done;
> +
> + /* Completely done. */
> + if ( start_extent == cmp.mar.nr_frames )
> + break;
> +
> + /*
> + * Done a "full" batch, but we were limited by space in the xlat
> + * area. Go around the loop again without necesserily returning
> + * to guest context.
> + */
> + if ( done == nat.mar->nr_frames )
> + {
> + split = 1;
> + break;
> + }
> +
> + /* Explicit continuation request from a higher level. */
> + if ( done < nat.mar->nr_frames )
> + return hypercall_create_continuation(
> + __HYPERVISOR_memory_op, "ih",
> + op | (start_extent << MEMOP_EXTENT_SHIFT), compat);
> +
> + /*
> + * Well... Somethings gone wrong with the two levels of chunking.
> + * My condolences to whomever next has to debug this mess.
> + */
Any suggestion how to overcome this "mess"?
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -4105,6 +4105,9 @@ int gnttab_acquire_resource(
> for ( i = 0; i < nr_frames; ++i )
> mfn_list[i] = virt_to_mfn(vaddrs[frame + i]);
>
> + /* Success. Passed nr_frames back to the caller. */
Nit: "Pass"?
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -1027,17 +1027,31 @@ static unsigned int resource_max_frames(struct domain
> *d,
> }
> }
>
> +/*
> + * Returns -errno on error, or positive in the range [1, nr_frames] on
> + * success. Returning less than nr_frames contitutes a request for a
> + * continuation.
> + */
> +static int _acquire_resource(
> + struct domain *d, unsigned int type, unsigned int id, unsigned long
> frame,
> + unsigned int nr_frames, xen_pfn_t mfn_list[])
As per the comment the return type may again want to be "long" here.
Albeit I realize the restriction to (UINT_MAX >> MEMOP_EXTENT_SHIFT)
makes this (and the other place above) only a latent issue for now,
so it may well be fine to be left as is.
> @@ -1087,26 +1098,47 @@ static int acquire_resource(
> goto out;
> }
>
> + /*
> + * Limiting nr_frames at (UINT_MAX >> MEMOP_EXTENT_SHIFT) isn't ideal.
> If
> + * it ever becomes a practical problem, we can switch to mutating
> + * xmar.{frame,nr_frames,frame_list} in guest memory.
> + */
For 64-bit, extending the limit to ULONG_MAX >> MEMOP_EXTENT_SHIFT
may also be an option.
> + rc = -EINVAL;
> + if ( start_extent >= xmar.nr_frames ||
Again, at least when start_extent is zero, == should not result in an
error.
> + xmar.nr_frames > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
> + goto out;
> +
> + /* Adjust for work done on previous continuations. */
> + xmar.nr_frames -= start_extent;
> + xmar.frame += start_extent;
> + guest_handle_add_offset(xmar.frame_list, start_extent);
> +
> do {
> - switch ( xmar.type )
> - {
> - case XENMEM_resource_grant_table:
> - rc = gnttab_acquire_resource(d, xmar.id, xmar.frame,
> xmar.nr_frames,
> - mfn_list);
> - break;
> + /*
> + * Arbitrary size. Not too much stack space, and a reasonable stride
> + * for continutation checks.
Nit: Stray 't' again.
> @@ -1126,7 +1158,32 @@ static int acquire_resource(
> rc = -EIO;
> }
> }
> - } while ( 0 );
> +
> + if ( rc )
> + goto out;
> +
> + xmar.nr_frames -= done;
> + xmar.frame += done;
> + guest_handle_add_offset(xmar.frame_list, done);
> + start_extent += done;
> +
> + /*
> + * Explicit contination request from _acquire_resource(), or we've
Nit: Missing 'u' this time round.
Jan
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |