[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [XEN PATCH v11 05/14] xen/arm: ffa: support guest FFA_PARTITION_INFO_GET
Hi Jens, > On 31 Jul 2023, at 14:15, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote: > > Adds support in the mediator to handle FFA_PARTITION_INFO_GET requests > from a guest. The requests are forwarded to the SPMC and the response is > translated according to the FF-A version in use by the guest. > > Using FFA_PARTITION_INFO_GET changes the owner of the RX buffer to the > caller (the guest in this case), so once it is done with the buffer it > must be released using FFA_RX_RELEASE before another call can be made. > > Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx> Reviewed-by: Bertrand Marquis <bertrand.marquis@xxxxxxx> Cheers Bertrand > --- > v10->v11 > - Fixing a typo in a comment and adding another comment as requested > --- > xen/arch/arm/tee/ffa.c | 134 +++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 134 insertions(+) > > diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c > index 6c76c9885774..6c889bf97002 100644 > --- a/xen/arch/arm/tee/ffa.c > +++ b/xen/arch/arm/tee/ffa.c > @@ -166,7 +166,18 @@ > #define FFA_MSG_SEND 0x8400006EU > #define FFA_MSG_POLL 0x8400006AU > > +/* > + * Structs below ending with _1_0 are defined in FF-A-1.0-REL and > + * structs ending with _1_1 are defined in FF-A-1.1-REL0. > + */ > + > /* Partition information descriptor */ > +struct ffa_partition_info_1_0 { > + uint16_t id; > + uint16_t execution_context; > + uint32_t partition_properties; > +}; > + > struct ffa_partition_info_1_1 { > uint16_t id; > uint16_t execution_context; > @@ -189,6 +200,7 @@ struct ffa_ctx { > */ > uint16_t create_signal_count; > bool rx_is_free; > + spinlock_t lock; > }; > > /* Negotiated FF-A version to use with the SPMC */ > @@ -203,9 +215,15 @@ static uint16_t subscr_vm_destroyed_count __read_mostly; > /* > * Our rx/tx buffers shared with the SPMC. FFA_RXTX_PAGE_COUNT is the > * number of pages used in each of these buffers. > + * > + * The RX buffer is protected from concurrent usage with ffa_rx_buffer_lock. > + * Note that the SPMC is also tracking the ownership of our RX buffer so > + * for calls which uses our RX buffer to deliver a result we must call > + * ffa_rx_release() to let the SPMC know that we're done with the buffer. > */ > static void *ffa_rx __read_mostly; > static void *ffa_tx __read_mostly; > +static DEFINE_SPINLOCK(ffa_rx_buffer_lock); > > static bool ffa_get_version(uint32_t *vers) > { > @@ -512,6 +530,103 @@ static uint32_t handle_rxtx_unmap(void) > return FFA_RET_OK; > } > > +static int32_t handle_partition_info_get(uint32_t w1, uint32_t w2, uint32_t > w3, > + uint32_t w4, uint32_t w5, > + uint32_t *count) > +{ > + int32_t ret = FFA_RET_DENIED; > + struct domain *d = current->domain; > + struct ffa_ctx *ctx = d->arch.tee; > + > + /* > + * FF-A v1.0 has w5 MBZ while v1.1 allows > + * FFA_PARTITION_INFO_GET_COUNT_FLAG to be non-zero. > + * > + * FFA_PARTITION_INFO_GET_COUNT is only using registers and not the > + * rxtx buffer so do the partition_info_get directly. > + */ > + if ( w5 == FFA_PARTITION_INFO_GET_COUNT_FLAG && > + ctx->guest_vers == FFA_VERSION_1_1 ) > + return ffa_partition_info_get(w1, w2, w3, w4, w5, count); > + if ( w5 ) > + return FFA_RET_INVALID_PARAMETERS; > + > + if ( !ffa_rx ) > + return FFA_RET_DENIED; > + > + spin_lock(&ctx->lock); > + if ( !ctx->page_count || !ctx->rx_is_free ) > + goto out; > + spin_lock(&ffa_rx_buffer_lock); > + ret = ffa_partition_info_get(w1, w2, w3, w4, w5, count); > + if ( ret ) > + goto out_rx_buf_unlock; > + /* > + * ffa_partition_info_get() succeeded so we now own the RX buffer we > + * share with the SPMC. We must give it back using ffa_rx_release() > + * once we've copied the content. > + */ > + > + if ( ctx->guest_vers == FFA_VERSION_1_0 ) > + { > + size_t n; > + struct ffa_partition_info_1_1 *src = ffa_rx; > + struct ffa_partition_info_1_0 *dst = ctx->rx; > + > + if ( ctx->page_count * FFA_PAGE_SIZE < *count * sizeof(*dst) ) > + { > + ret = FFA_RET_NO_MEMORY; > + goto out_rx_release; > + } > + > + for ( n = 0; n < *count; n++ ) > + { > + dst[n].id = src[n].id; > + dst[n].execution_context = src[n].execution_context; > + dst[n].partition_properties = src[n].partition_properties; > + } > + } > + else > + { > + size_t sz = *count * sizeof(struct ffa_partition_info_1_1); > + > + if ( ctx->page_count * FFA_PAGE_SIZE < sz ) > + { > + ret = FFA_RET_NO_MEMORY; > + goto out_rx_release; > + } > + > + > + memcpy(ctx->rx, ffa_rx, sz); > + } > + ctx->rx_is_free = false; > +out_rx_release: > + ffa_rx_release(); > +out_rx_buf_unlock: > + spin_unlock(&ffa_rx_buffer_lock); > +out: > + spin_unlock(&ctx->lock); > + > + return ret; > +} > + > +static int32_t handle_rx_release(void) > +{ > + int32_t ret = FFA_RET_DENIED; > + struct domain *d = current->domain; > + struct ffa_ctx *ctx = d->arch.tee; > + > + spin_lock(&ctx->lock); > + if ( !ctx->page_count || ctx->rx_is_free ) > + goto out; > + ret = FFA_RET_OK; > + ctx->rx_is_free = true; > +out: > + spin_unlock(&ctx->lock); > + > + return ret; > +} > + > static void handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t > fid) > { > struct arm_smccc_1_2_regs arg = { .a0 = fid, }; > @@ -568,6 +683,7 @@ static bool ffa_handle_call(struct cpu_user_regs *regs) > uint32_t fid = get_user_reg(regs, 0); > struct domain *d = current->domain; > struct ffa_ctx *ctx = d->arch.tee; > + uint32_t count; > int e; > > if ( !ctx ) > @@ -597,6 +713,24 @@ static bool ffa_handle_call(struct cpu_user_regs *regs) > else > set_regs_success(regs, 0, 0); > return true; > + case FFA_PARTITION_INFO_GET: > + e = handle_partition_info_get(get_user_reg(regs, 1), > + get_user_reg(regs, 2), > + get_user_reg(regs, 3), > + get_user_reg(regs, 4), > + get_user_reg(regs, 5), &count); > + if ( e ) > + set_regs_error(regs, e); > + else > + set_regs_success(regs, count, 0); > + return true; > + case FFA_RX_RELEASE: > + e = handle_rx_release(); > + if ( e ) > + set_regs_error(regs, e); > + else > + set_regs_success(regs, 0, 0); > + return true; > case FFA_MSG_SEND_DIRECT_REQ_32: > case FFA_MSG_SEND_DIRECT_REQ_64: > handle_msg_send_direct_req(regs, fid); > -- > 2.34.1 >
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |