[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN PATCH v7 14/20] xen/arm: ffa: support guest FFA_PARTITION_INFO_GET


  • To: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • From: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
  • Date: Fri, 3 Mar 2023 09:50:53 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=qPI9wzLSJDON6ZuMpiPJ+4bStDfQXl6D68MKm+x16NA=; b=nzZv9XckKUsFMz+lJCrBppI6E7z7eg153XbAVAlHBYN1CUO6nHrtvg/gD21jVQtv0Z95bSFaXVXBDvoiL7vFCuE8n6YRaePmseEvvsrlbQuwG+F8yUGYPX91234Xqp9RXnCqrJ+RQE4vJmNyJqEfJZmXkb/QBkga64YytB730UY6z1JftNIIMj3tRlekvhlrP3bE1Qqr7Sk/y61KS1tt4bVKD5Wh1VZWydujhK2hB7L+UXHmV9AglDBukRkB/SdM++5dKXPcqOexWbmom3iFOsxWCTeL7zNF4B8JPAsl3pEOuhu9dAHTjX9jRUj2/XOSMK3X8ZsYQ1TsmK4OgrUGCQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=U6GkG8RBojmWMOTBDYzug5Kkz3ReQrOikPUJ7z1HgmYd95Qa3OHS7z3EowJ7VBXs7bIahAgSDzYbtuKW/M/ToZ/3Aa820ggcOId/IWvn8vLYQbAY1U8UWvxrOYjXLj3WYySUpDpeZgHOelITK+LPoeTKXFgVzMvHsVC0bnbJx9ej8B+KNVPfqzE8E9K+MZs/3xKG2eTzfufhxsAmcO0a/w8oYo3NqlOearB6bu0G9yfctaU2AsSxmkyOyvnH0weD008ffzXnRmpx9BtgynYXxYusShArnQUAs1RvS/LALSixh5u1Uf8vbmPGO2iqbNzPkiSCbtDg2JAJJFPEhKrHeA==
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Marc Bonnici <Marc.Bonnici@xxxxxxx>, Achin Gupta <Achin.Gupta@xxxxxxx>, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>
  • Delivery-date: Fri, 03 Mar 2023 09:51:24 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Original-authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Thread-index: AQHZRtMdhtaowAhPaUGSsxv5T0sWbq7o3ZeA
  • Thread-topic: [XEN PATCH v7 14/20] xen/arm: ffa: support guest FFA_PARTITION_INFO_GET

Hi Jens,

> On 22 Feb 2023, at 16:33, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> 
> Adds support in the mediator to handle FFA_PARTITION_INFO_GET requests
> from a guest. The requests are forwarded to the SPMC and the response is
> translated according to the FF-A version in use by the guest.
> 
> Using FFA_PARTITION_INFO_GET changes the owner of the RX buffer to the
> caller (the guest in this case), so once it is done with the buffer it
> must be released using FFA_RX_RELEASE before another call can be made.
> 
> Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
> ---
> xen/arch/arm/tee/ffa.c | 126 ++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 124 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> index 953b6dfd5eca..3571817c0bcd 100644
> --- a/xen/arch/arm/tee/ffa.c
> +++ b/xen/arch/arm/tee/ffa.c
> @@ -141,6 +141,12 @@
> #define FFA_MSG_POLL                    0x8400006AU
> 
> /* Partition information descriptor */
> +struct ffa_partition_info_1_0 {
> +    uint16_t id;
> +    uint16_t execution_context;
> +    uint32_t partition_properties;
> +};
> +
> struct ffa_partition_info_1_1 {
>     uint16_t id;
>     uint16_t execution_context;
> @@ -157,9 +163,8 @@ struct ffa_ctx {
>     uint32_t guest_vers;
>     bool tx_is_mine;
>     bool interrupted;
> +    spinlock_t lock;
> };
> -
> -

This is removing 2 empty lines (previous patch was wrongly adding one)
but one empty line is required here.

> /* Negotiated FF-A version to use with the SPMC */
> static uint32_t ffa_version __ro_after_init;
> 
> @@ -173,10 +178,16 @@ static unsigned int subscr_vm_destroyed_count 
> __read_mostly;
>  * Our rx/tx buffers shared with the SPMC.
>  *
>  * ffa_page_count is the number of pages used in each of these buffers.
> + *
> + * The RX buffer is protected from concurrent usage with ffa_rx_buffer_lock.
> + * Note that the SPMC is also tracking the ownership of our RX buffer so
> + * for calls which uses our RX buffer to deliver a result we must call
> + * ffa_rx_release() to let the SPMC know that we're done with the buffer.
>  */
> static void *ffa_rx __read_mostly;
> static void *ffa_tx __read_mostly;
> static unsigned int ffa_page_count __read_mostly;
> +static DEFINE_SPINLOCK(ffa_rx_buffer_lock);
> 
> static bool ffa_get_version(uint32_t *vers)
> {
> @@ -463,6 +474,98 @@ static uint32_t handle_rxtx_unmap(void)
>     return FFA_RET_OK;
> }
> 
> +static uint32_t handle_partition_info_get(uint32_t w1, uint32_t w2, uint32_t 
> w3,
> +                                          uint32_t w4, uint32_t w5,
> +                                          uint32_t *count)
> +{
> +    bool query_count_only = w5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
> +    uint32_t w5_mask = 0;
> +    uint32_t ret = FFA_RET_DENIED;
> +    struct domain *d = current->domain;
> +    struct ffa_ctx *ctx = d->arch.tee;
> +
> +    /*
> +     * FF-A v1.0 has w5 MBZ while v1.1 allows
> +     * FFA_PARTITION_INFO_GET_COUNT_FLAG to be non-zero.
> +     */
> +    if ( ctx->guest_vers == FFA_VERSION_1_1 )
> +        w5_mask = FFA_PARTITION_INFO_GET_COUNT_FLAG;
> +    if ( w5 & ~w5_mask )
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    if ( query_count_only )
> +        return ffa_partition_info_get(w1, w2, w3, w4, w5, count);

This code seems a bit to complex.
I would suggest the following:

if (w5 & FFA_PARTITION_INFO_GET_COUNT_FLAG)
{
     if ( ctx->guest_vers == FFA_VERSION_1_1 )
        return ffa_partition_info_get(w1, w2, w3, w4, w5, count);
     else
        return FFA_RET_INVALID_PARAMETERS;
}

> +
> +    if ( !ffa_page_count )
> +        return FFA_RET_DENIED;
> +
> +    spin_lock(&ctx->lock);
> +    spin_lock(&ffa_rx_buffer_lock);
> +    if ( !ctx->page_count || !ctx->tx_is_mine )

If i understand correctly tx_is_mine is protecting the guest rx
buffer until rx_release is called by the guest so that we do not
write in it before the guest has retrieved the data from it.

The name is very misleading, maybe rx_is_writeable or free would be better ?

Also it would be more optimal to test it before taking ffa_rx_buffer_lock.


> +        goto out;
> +    ret = ffa_partition_info_get(w1, w2, w3, w4, w5, count);
> +    if ( ret )
> +        goto out;
> +
> +    if ( ctx->guest_vers == FFA_VERSION_1_0 )
> +    {
> +        size_t n;
> +        struct ffa_partition_info_1_1 *src = ffa_rx;
> +        struct ffa_partition_info_1_0 *dst = ctx->rx;
> +
> +        if ( ctx->page_count * FFA_PAGE_SIZE < *count * sizeof(*dst) )
> +        {
> +            ret = FFA_RET_NO_MEMORY;
> +            goto out_rx_release;
> +        }
> +
> +        for ( n = 0; n < *count; n++ )
> +        {
> +            dst[n].id = src[n].id;
> +            dst[n].execution_context = src[n].execution_context;
> +            dst[n].partition_properties = src[n].partition_properties;
> +        }
> +    }
> +    else
> +    {
> +        size_t sz = *count * sizeof(struct ffa_partition_info_1_1);
> +
> +        if ( ctx->page_count * FFA_PAGE_SIZE < sz )
> +        {
> +            ret = FFA_RET_NO_MEMORY;
> +            goto out_rx_release;
> +        }
> +
> +
> +        memcpy(ctx->rx, ffa_rx, sz);
> +    }
> +    ctx->tx_is_mine = false;

at this point we have no reason to hold ctx->lock

> +out_rx_release:
> +    ffa_rx_release();

There should be no case where do release without unlocking.

It might be cleaner to have 2 functions ffa_rx_get and ffa_rx_release
 handling both the lock and the rx_release message.

> +out:
> +    spin_unlock(&ffa_rx_buffer_lock);

this should stay with ffa_rx_release

Cheers
Bertrand

> +    spin_unlock(&ctx->lock);
> +
> +    return ret;
> +}
> +
> +static uint32_t handle_rx_release(void)
> +{
> +    uint32_t ret = FFA_RET_DENIED;
> +    struct domain *d = current->domain;
> +    struct ffa_ctx *ctx = d->arch.tee;
> +
> +    spin_lock(&ctx->lock);
> +    if ( !ctx->page_count || ctx->tx_is_mine )
> +        goto out;
> +    ret = FFA_RET_OK;
> +    ctx->tx_is_mine = true;
> +out:
> +    spin_unlock(&ctx->lock);
> +
> +    return ret;
> +}
> +
> static void handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t 
> fid)
> {
>     struct arm_smccc_1_2_regs arg = { .a0 = fid, };
> @@ -528,6 +631,7 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>     uint32_t fid = get_user_reg(regs, 0);
>     struct domain *d = current->domain;
>     struct ffa_ctx *ctx = d->arch.tee;
> +    uint32_t count;
>     int e;
> 
>     if ( !ctx )
> @@ -559,6 +663,24 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>         else
>             set_regs_success(regs, 0, 0);
>         return true;
> +    case FFA_PARTITION_INFO_GET:
> +        e = handle_partition_info_get(get_user_reg(regs, 1),
> +                                      get_user_reg(regs, 2),
> +                                      get_user_reg(regs, 3),
> +                                      get_user_reg(regs, 4),
> +                                      get_user_reg(regs, 5), &count);
> +        if ( e )
> +            set_regs_error(regs, e);
> +        else
> +            set_regs_success(regs, count, 0);
> +        return true;
> +    case FFA_RX_RELEASE:
> +        e = handle_rx_release();
> +        if ( e )
> +            set_regs_error(regs, e);
> +        else
> +            set_regs_success(regs, 0, 0);
> +        return true;
>     case FFA_MSG_SEND_DIRECT_REQ_32:
> #ifdef CONFIG_ARM_64
>     case FFA_MSG_SEND_DIRECT_REQ_64:
> -- 
> 2.34.1
> 




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.