[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN PATCH v11 03/14] xen/arm: ffa: send guest events to Secure Partitions


  • To: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • From: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
  • Date: Wed, 16 Aug 2023 09:40:29 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=SJjPQZ/kdxPeAestgRDsL/B1D5bOcRzwwJCMpHGrxCk=; b=a/TR07fgqA8HnIpvdunaPHc05jh0dvAGzln7KTGfAqTrASS1RDdsU75f7M9ArNG8Oq8C5a28iGSjttULSbzkB6kbTteK8AfwSXE6WASrJyZeGZF/R7GsLjmW0M4XQ9FLkvBeL4CIMlCNqTpbOsiadljDLRM89nVu4Iueyn22iaqMSSMOtGcUCM1Cn420yDey2e6fysAeoJ8biBHhX6SoWBERIrxzdMdDEOVzM1YTzUqv3HPxybeZ3UJgZieHFCrUtOBBmI0tfcuwToV4O2T3axJeUIhDSCaE5lBlBoo1a8PEi4Wtwn5x3eC7vWwT5xp+ROkI86flyywgumqBpQqEdA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=hLWSjzHlFBB5viPk5hdbffg7OXP7h6hthtKEd1TaWk9RjvLIqoCEOzmjeJ4eSf+kV5RkWW0GEkh/+jys1ZwHV0A3My9bfpqANH7CLqMYfwntRPhjWt1YStBJ/m+890CLoVvqZaxUEXplXhtWj+cKJUwmlhqrMuZ4LxVhcBRs+eEW5isp/xj6cQZxUPLg1NVenT0F1UGY2RvOjuYP8wZwOJgq77afiTyT97lfMcMUtGZ/OQWmGG01HxSDr/OokmSE/BcnMD+cjEdRC/+09IHEtXjCVDxziOHxCeSqoED5J6g7OQdsZD8bD1yybMpvdoou9aDRny3bnbKAheU5nCQAxw==
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Marc Bonnici <Marc.Bonnici@xxxxxxx>, Achin Gupta <Achin.Gupta@xxxxxxx>, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>
  • Delivery-date: Wed, 16 Aug 2023 09:40:59 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Original-authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Thread-index: AQHZw6jAfJC6fhyqoUCRDBGcOE/+m6/sxAWA
  • Thread-topic: [XEN PATCH v11 03/14] xen/arm: ffa: send guest events to Secure Partitions

Hi Jens,

> On 31 Jul 2023, at 14:15, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> 
> The FF-A specification defines framework messages sent as direct
> requests when certain events occurs. For instance when a VM (guest) is
> created or destroyed. Only SPs which have subscribed to these events
> will receive them. An SP can subscribe to these messages in its
> partition properties.
> 
> Adds a check that the SP supports the needed FF-A features
> FFA_PARTITION_INFO_GET and FFA_RX_RELEASE.
> 
> The partition properties of each SP is retrieved with
> FFA_PARTITION_INFO_GET which returns the information in our RX buffer.
> Using FFA_PARTITION_INFO_GET changes the owner of the RX buffer to the
> caller (us), so once we're done with the buffer it must be released
> using FFA_RX_RELEASE before another call can be made.
> 
> Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>

One NIT after which might be fixed on commit.

With that fixed:
Reviewed-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>

> ---
> v10->v11:
> - Addressing comments and fixing a few style issues
> - Fixing how is_in_subscr_list() is used, it's supposed to take an
>  sp_id, not a vm_id.
> ---
> xen/arch/arm/tee/ffa.c | 234 ++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 233 insertions(+), 1 deletion(-)
> 
> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> index 072198a1326d..5af3e5eedc88 100644
> --- a/xen/arch/arm/tee/ffa.c
> +++ b/xen/arch/arm/tee/ffa.c
> @@ -160,14 +160,33 @@
> #define FFA_MSG_SEND                    0x8400006EU
> #define FFA_MSG_POLL                    0x8400006AU
> 
> +/* Partition information descriptor */
> +struct ffa_partition_info_1_1 {
> +    uint16_t id;
> +    uint16_t execution_context;
> +    uint32_t partition_properties;
> +    uint8_t uuid[16];
> +};
> +
> struct ffa_ctx {
>     /* FF-A version used by the guest */
>     uint32_t guest_vers;
> +    /*
> +     * Number of SPs that we have sent a VM created signal to, used in
> +     * ffa_domain_teardown() to know which SPs need to be signalled.
> +     */
> +    uint16_t create_signal_count;
> };
> 
> /* Negotiated FF-A version to use with the SPMC */
> static uint32_t __ro_after_init ffa_version;
> 
> +/* SPs subscribing to VM_CREATE and VM_DESTROYED events */
> +static uint16_t *subscr_vm_created __read_mostly;
> +static uint16_t subscr_vm_created_count __read_mostly;
> +static uint16_t *subscr_vm_destroyed __read_mostly;
> +static uint16_t subscr_vm_destroyed_count __read_mostly;
> +
> /*
>  * Our rx/tx buffers shared with the SPMC. FFA_RXTX_PAGE_COUNT is the
>  * number of pages used in each of these buffers.
> @@ -251,6 +270,87 @@ static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t 
> rx_addr,
>     return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 0);
> }
> 
> +static int32_t ffa_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
> +                                      uint32_t w4, uint32_t w5,
> +                                      uint32_t *count)
> +{
> +    const struct arm_smccc_1_2_regs arg = {
> +        .a0 = FFA_PARTITION_INFO_GET,
> +        .a1 = w1,
> +        .a2 = w2,
> +        .a3 = w3,
> +        .a4 = w4,
> +        .a5 = w5,
> +    };
> +    struct arm_smccc_1_2_regs resp;
> +    uint32_t ret;
> +
> +    arm_smccc_1_2_smc(&arg, &resp);
> +
> +    ret = get_ffa_ret_code(&resp);
> +    if ( !ret )
> +        *count = resp.a2;
> +
> +    return ret;
> +}
> +
> +static int32_t ffa_rx_release(void)
> +{
> +    return ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
> +}
> +
> +static int32_t ffa_direct_req_send_vm(uint16_t sp_id, uint16_t vm_id,
> +                                      uint8_t msg)
> +{
> +    uint32_t exp_resp = FFA_MSG_FLAG_FRAMEWORK;
> +    unsigned int retry_count = 0;
> +    int32_t res;
> +
> +    if ( msg == FFA_MSG_SEND_VM_CREATED )
> +        exp_resp |= FFA_MSG_RESP_VM_CREATED;
> +    else if ( msg == FFA_MSG_SEND_VM_DESTROYED )
> +        exp_resp |= FFA_MSG_RESP_VM_DESTROYED;
> +    else
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    do {
> +        const struct arm_smccc_1_2_regs arg = {
> +            .a0 = FFA_MSG_SEND_DIRECT_REQ_32,
> +            .a1 = sp_id,
> +            .a2 = FFA_MSG_FLAG_FRAMEWORK | msg,
> +            .a5 = vm_id,
> +        };
> +        struct arm_smccc_1_2_regs resp;
> +
> +        arm_smccc_1_2_smc(&arg, &resp);
> +        if ( resp.a0 != FFA_MSG_SEND_DIRECT_RESP_32 || resp.a2 != exp_resp )
> +        {
> +            /*
> +             * This is an invalid response, likely due to some error in the
> +             * implementation of the ABI.
> +             */
> +            return FFA_RET_INVALID_PARAMETERS;
> +        }
> +        res = resp.a3;
> +        if ( ++retry_count > 10 )
> +        {
> +            /*
> +             * TODO
> +             * FFA_RET_INTERRUPTED means that the SPMC has a pending
> +             * non-secure interrupt, we need a way of delivering that
> +             * non-secure interrupt.
> +             * FFA_RET_RETRY is the SP telling us that it's temporarily
> +             * blocked from handling the direct request, we need a generic
> +             * way to deal with this.
> +             * For now in both cases, give up after a few retries.
> +             */
> +            return res;
> +        }
> +    } while ( res == FFA_RET_INTERRUPTED || res == FFA_RET_RETRY );
> +
> +    return res;
> +}
> +
> static uint16_t get_vm_id(const struct domain *d)
> {
>     /* +1 since 0 is reserved for the hypervisor in FF-A */
> @@ -374,6 +474,8 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
> static int ffa_domain_init(struct domain *d)
> {
>     struct ffa_ctx *ctx;
> +    unsigned int n;
> +    int32_t res;
> 
>     if ( !ffa_version )
>         return -ENODEV;
> @@ -390,17 +492,68 @@ static int ffa_domain_init(struct domain *d)
> 
>     d->arch.tee = ctx;
> 
> +    for ( n = 0; n < subscr_vm_created_count; n++ )
> +    {
> +        res = ffa_direct_req_send_vm(subscr_vm_created[n], get_vm_id(d),
> +                                     FFA_MSG_SEND_VM_CREATED);
> +        if ( res )
> +        {
> +            printk(XENLOG_ERR "ffa: Failed to report creation of vm_id %u to 
>  %u: res %d\n",
> +                   get_vm_id(d), subscr_vm_created[n], res);
> +            ctx->create_signal_count = n;
> +            return -EIO;
> +        }
> +    }
> +    ctx->create_signal_count = subscr_vm_created_count;
> +
>     return 0;
> }
> 
> +static bool is_in_subscr_list(const uint16_t *subscr, uint16_t start,
> +                              uint16_t end, uint16_t sp_id)
> +{
> +    unsigned int n;
> +
> +    for ( n = start; n < end; n++ )
> +    {
> +        if ( subscr[n] == sp_id )
> +            return true;
> +    }
> +
> +    return false;
> +}
> +
> /* This function is supposed to undo what ffa_domain_init() has done */
> static int ffa_domain_teardown(struct domain *d)
> {
>     struct ffa_ctx *ctx = d->arch.tee;
> +    unsigned int n;
> +    int32_t res;
> 
>     if ( !ctx )
>         return 0;
> 
> +

NIT: this extra line should be removed.

@Julien: are you ok to fix that one on commit ?

> +    for ( n = 0; n < subscr_vm_destroyed_count; n++ )
> +    {
> +        /*
> +         * Skip SPs subscribed to the VM created event that never was
> +         * notified of the VM creation due to an error during
> +         * ffa_domain_init().
> +         */
> +        if ( is_in_subscr_list(subscr_vm_created, ctx->create_signal_count,
> +                               subscr_vm_created_count,
> +                               subscr_vm_destroyed[n]) )
> +            continue;
> +
> +        res = ffa_direct_req_send_vm(subscr_vm_destroyed[n], get_vm_id(d),
> +                                     FFA_MSG_SEND_VM_DESTROYED);
> +
> +        if ( res )
> +            printk(XENLOG_ERR "ffa: Failed to report destruction of vm_id %u 
> to  %u: res %d\n",
> +                   get_vm_id(d), subscr_vm_destroyed[n], res);
> +    }
> +
>     XFREE(d->arch.tee);
> 
>     return 0;
> @@ -411,6 +564,81 @@ static int ffa_relinquish_resources(struct domain *d)
>     return 0;
> }
> 
> +static void uninit_subscribers(void)
> +{
> +        subscr_vm_created_count = 0;
> +        subscr_vm_destroyed_count = 0;
> +        XFREE(subscr_vm_created);
> +        XFREE(subscr_vm_destroyed);
> +}
> +
> +static bool init_subscribers(struct ffa_partition_info_1_1 *fpi, uint16_t 
> count)
> +{
> +    uint16_t n;
> +    uint16_t c_pos;
> +    uint16_t d_pos;
> +
> +    subscr_vm_created_count = 0;
> +    subscr_vm_destroyed_count = 0;
> +    for ( n = 0; n < count; n++ )
> +    {
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_CREATED )
> +            subscr_vm_created_count++;
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_DESTROYED )
> +            subscr_vm_destroyed_count++;
> +    }
> +
> +    if ( subscr_vm_created_count )
> +        subscr_vm_created = xzalloc_array(uint16_t, subscr_vm_created_count);
> +    if ( subscr_vm_destroyed_count )
> +        subscr_vm_destroyed = xzalloc_array(uint16_t,
> +                                            subscr_vm_destroyed_count);
> +    if ( (subscr_vm_created_count && !subscr_vm_created) ||
> +         (subscr_vm_destroyed_count && !subscr_vm_destroyed) )
> +    {
> +        printk(XENLOG_ERR "ffa: Failed to allocate subscription lists\n");
> +        uninit_subscribers();
> +        return false;
> +    }
> +
> +    for ( c_pos = 0, d_pos = 0, n = 0; n < count; n++ )
> +    {
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_CREATED )
> +            subscr_vm_created[c_pos++] = fpi[n].id;
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_DESTROYED )
> +            subscr_vm_destroyed[d_pos++] = fpi[n].id;
> +    }
> +
> +    return true;
> +}
> +
> +static bool init_sps(void)
> +{
> +    bool ret = false;
> +    uint32_t count;
> +    int e;
> +
> +    e = ffa_partition_info_get(0, 0, 0, 0, 0, &count);
> +    if ( e )
> +    {
> +        printk(XENLOG_ERR "ffa: Failed to get list of SPs: %d\n", e);
> +        goto out;
> +    }
> +
> +    if ( count >= UINT16_MAX )
> +    {
> +        printk(XENLOG_ERR "ffa: Impossible number of SPs: %u\n", count);
> +        goto out;
> +    }
> +
> +    ret = init_subscribers(ffa_rx, count);
> +
> +out:
> +    ffa_rx_release();
> +
> +    return ret;
> +}
> +
> static bool ffa_probe(void)
> {
>     uint32_t vers;
> @@ -462,7 +690,8 @@ static bool ffa_probe(void)
>      * TODO: Rework the code to allow domain to use a subset of the
>      * features supported.
>      */
> -    if (
> +    if ( !check_mandatory_feature(FFA_PARTITION_INFO_GET) ||
> +         !check_mandatory_feature(FFA_RX_RELEASE) ||
>          !check_mandatory_feature(FFA_RXTX_MAP_64) ||
>          !check_mandatory_feature(FFA_RXTX_UNMAP) ||
>          !check_mandatory_feature(FFA_MSG_SEND_DIRECT_REQ_32) )
> @@ -484,6 +713,9 @@ static bool ffa_probe(void)
>     }
>     ffa_version = vers;
> 
> +    if ( !init_sps() )
> +        goto err_free_ffa_tx;
> +
>     return true;
> 
> err_free_ffa_tx:
> -- 
> 2.34.1
> 

Cheers
Bertrand





 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.