[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN PATCH 4/6] xen/arm: ffa: separate partition info get routines


  • To: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • From: Bertrand Marquis <Bertrand.Marquis@xxxxxxx>
  • Date: Wed, 27 Mar 2024 13:42:19 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=2; mx.microsoft.com 1; spf=pass (sender ip is 63.35.35.123) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=pass (signature was verified) header.d=armh.onmicrosoft.com; arc=pass (0 oda=1 ltdi=1 spf=[1,1,smtp.mailfrom=arm.com] dkim=[1,1,header.d=arm.com] dmarc=[1,1,header.from=arm.com])
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=0pvrdkGgNjuEybFpXk2LfI42H3Yh77m/UGD06EenTKI=; b=gBSA2Vn4tSmhctQgwzcWKhzpFkDLDvEll94kAwpNQFf6dGoWmmqzDa8QVQ3xod34gQ+Wu7bdHGFx/ii1zXYD4v2y6NQi2+UDRhMs2kYtoAgFw7MUUmZq5vX+ifFj6dtMGpc8fZRkjpIjvh8/7VZynzvEsempMaj8OSO8V94hq3sRQ/Xe21JxuGgKZ4d8Ukvw+mTKnjY6rri2fFGXLXJy4D8QAzNmvpnzGGeLkbtduM3wJrweUwepgEqo1NAtdk7YkY1UUaltwCqCPz5NcnJGQuV0rSLDYzjFAyZmUjQ/m09HALeO7qy1zeTPb0nBI+Mo4z1BVIAEwiKGswO0ER9dFw==
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=0pvrdkGgNjuEybFpXk2LfI42H3Yh77m/UGD06EenTKI=; b=nZ3Kw+S21YsFINOips8YOQmMzZfwJuOYV6DA2dFqDzXqgoZTFHjxwDAJ2jwB8r3gfq6HERT9kP3ToHxwz7N5E6Q1nPoFYLEhpsts6lJgs8NsWviy0qJX/tO9DlDeA4eE+Ye67pZCFMWTSFqrDMby/41Q5zZXPeyyg+ROgZmO0AFRvRSS3ag8zIQ3E4PKwrm962CHfHXMtEjV4BXZFuSqRg2DFIZ6GRLlqIhLXISQNwNwKCanMkZ5dLFll9Ygx//lgvJ2pE3bYmKaHwtIWjxO8iKQdXNo3vOMp1yx7Fh9j0V84g2LdmR4jGN5lzIKO715beNrLOLHKbN4wT4ZWk4DSQ==
  • Arc-seal: i=2; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=pass; b=XU5Rypcy8j0MfuutyqjdSDD5zjQCEFAIr2hcoooWBgTbPhDJeD/mCsPHvV5hQHGij3tVdUZZsTq+ONaY+wZxjrg6gOp+C+IrL8HW8a/RmszOPYZqAqCmwy8+8jqG4I+1mqQcrOFu9pdMYygfC7IU3Or11xL/jdsTnW0RoDXBprJ3n6T4weaQBjGw0bTwTdvECKq9MDP+YB7DCCdFrvumN/fkiofC9ZLs7rUU9D9AfatHQwI7ghpP95J3tsOxaaBXymGu6VaPEd5bsbzuqPAqjIdIGp+vujkBNT+CDPk0hE2jZIJGxnOrpjA22HLfyG5a4pFtJca9ZLVDf56brAOmjw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=gcbSKUtHaGoQ7Vx9XuqqsnYlF3FI1blnEa9xSGl1a/cricGUT7E827ONF5z5t1EcWtF0igmFZYuqHXrDfXFWihEBA9YhQYtEUWvAhmSaydLUs/8u7Nu82/qjI2ezhwMZ2cipZBe8d5vZkQkf+tbNV64/xO2Sq22x4gVoBOigfG62KfRDYlWsQyr69LPSlu+05bCSN0K0J20Nw4LrW431MZDqNazJFSc+qQ7UGRSC+hYL5XAeMl3ahgYlRt3XRA0j8Aj9uhvMAAL8O5EquGmIrXVJ6G/XEhYQJhzQAl12HPdGTqBp6mhL6s4Ra5OkV7PJ4H4yXP4H5Yti2NgH/agppA==
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>, "patches@xxxxxxxxxx" <patches@xxxxxxxxxx>, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>
  • Delivery-date: Wed, 27 Mar 2024 13:42:38 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Original-authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Thread-index: AQHafphSdluTj/kw9U66NBMqBbhqTrFLm/CA
  • Thread-topic: [XEN PATCH 4/6] xen/arm: ffa: separate partition info get routines

Hi Jens,

> On 25 Mar 2024, at 10:39, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> 
> Move partition info get routines into a separate file for easier
> navigation in the source code.
> 
> Add ffa_partinfo_init(), ffa_partinfo_domain_init(), and
> ffa_partinfo_domain_destroy() to handle the ffa_partinfo internal things
> on initialization and teardown.
> 
> Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>

Reviewed-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>

Cheers
Bertrand

> ---
> xen/arch/arm/tee/Makefile       |   1 +
> xen/arch/arm/tee/ffa.c          | 359 +-----------------------------
> xen/arch/arm/tee/ffa_partinfo.c | 373 ++++++++++++++++++++++++++++++++
> xen/arch/arm/tee/ffa_private.h  |  14 +-
> 4 files changed, 398 insertions(+), 349 deletions(-)
> create mode 100644 xen/arch/arm/tee/ffa_partinfo.c
> 
> diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
> index 0e683d23aa9d..be644fba8055 100644
> --- a/xen/arch/arm/tee/Makefile
> +++ b/xen/arch/arm/tee/Makefile
> @@ -1,4 +1,5 @@
> obj-$(CONFIG_FFA) += ffa.o
> obj-$(CONFIG_FFA) += ffa_shm.o
> +obj-$(CONFIG_FFA) += ffa_partinfo.o
> obj-y += tee.o
> obj-$(CONFIG_OPTEE) += optee.o
> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> index db36292dc52f..7a2803881420 100644
> --- a/xen/arch/arm/tee/ffa.c
> +++ b/xen/arch/arm/tee/ffa.c
> @@ -70,20 +70,6 @@
>  * structs ending with _1_1 are defined in FF-A-1.1-REL0.
>  */
> 
> -/* Partition information descriptor */
> -struct ffa_partition_info_1_0 {
> -    uint16_t id;
> -    uint16_t execution_context;
> -    uint32_t partition_properties;
> -};
> -
> -struct ffa_partition_info_1_1 {
> -    uint16_t id;
> -    uint16_t execution_context;
> -    uint32_t partition_properties;
> -    uint8_t uuid[16];
> -};
> -
> /* Endpoint RX/TX descriptor */
> struct ffa_endpoint_rxtx_descriptor_1_0 {
>     uint16_t sender_id;
> @@ -102,11 +88,6 @@ struct ffa_endpoint_rxtx_descriptor_1_1 {
> /* Negotiated FF-A version to use with the SPMC */
> static uint32_t __ro_after_init ffa_version;
> 
> -/* SPs subscribing to VM_CREATE and VM_DESTROYED events */
> -static uint16_t *subscr_vm_created __read_mostly;
> -static uint16_t subscr_vm_created_count __read_mostly;
> -static uint16_t *subscr_vm_destroyed __read_mostly;
> -static uint16_t subscr_vm_destroyed_count __read_mostly;
> 
> /*
>  * Our rx/tx buffers shared with the SPMC. FFA_RXTX_PAGE_COUNT is the
> @@ -170,90 +151,6 @@ static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t 
> rx_addr,
>     return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 0);
> }
> 
> -static int32_t ffa_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
> -                                      uint32_t w4, uint32_t w5,
> -                                      uint32_t *count, uint32_t *fpi_size)
> -{
> -    const struct arm_smccc_1_2_regs arg = {
> -        .a0 = FFA_PARTITION_INFO_GET,
> -        .a1 = w1,
> -        .a2 = w2,
> -        .a3 = w3,
> -        .a4 = w4,
> -        .a5 = w5,
> -    };
> -    struct arm_smccc_1_2_regs resp;
> -    uint32_t ret;
> -
> -    arm_smccc_1_2_smc(&arg, &resp);
> -
> -    ret = ffa_get_ret_code(&resp);
> -    if ( !ret )
> -    {
> -        *count = resp.a2;
> -        *fpi_size = resp.a3;
> -    }
> -
> -    return ret;
> -}
> -
> -static int32_t ffa_rx_release(void)
> -{
> -    return ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
> -}
> -
> -static int32_t ffa_direct_req_send_vm(uint16_t sp_id, uint16_t vm_id,
> -                                      uint8_t msg)
> -{
> -    uint32_t exp_resp = FFA_MSG_FLAG_FRAMEWORK;
> -    unsigned int retry_count = 0;
> -    int32_t res;
> -
> -    if ( msg == FFA_MSG_SEND_VM_CREATED )
> -        exp_resp |= FFA_MSG_RESP_VM_CREATED;
> -    else if ( msg == FFA_MSG_SEND_VM_DESTROYED )
> -        exp_resp |= FFA_MSG_RESP_VM_DESTROYED;
> -    else
> -        return FFA_RET_INVALID_PARAMETERS;
> -
> -    do {
> -        const struct arm_smccc_1_2_regs arg = {
> -            .a0 = FFA_MSG_SEND_DIRECT_REQ_32,
> -            .a1 = sp_id,
> -            .a2 = FFA_MSG_FLAG_FRAMEWORK | msg,
> -            .a5 = vm_id,
> -        };
> -        struct arm_smccc_1_2_regs resp;
> -
> -        arm_smccc_1_2_smc(&arg, &resp);
> -        if ( resp.a0 != FFA_MSG_SEND_DIRECT_RESP_32 || resp.a2 != exp_resp )
> -        {
> -            /*
> -             * This is an invalid response, likely due to some error in the
> -             * implementation of the ABI.
> -             */
> -            return FFA_RET_INVALID_PARAMETERS;
> -        }
> -        res = resp.a3;
> -        if ( ++retry_count > 10 )
> -        {
> -            /*
> -             * TODO
> -             * FFA_RET_INTERRUPTED means that the SPMC has a pending
> -             * non-secure interrupt, we need a way of delivering that
> -             * non-secure interrupt.
> -             * FFA_RET_RETRY is the SP telling us that it's temporarily
> -             * blocked from handling the direct request, we need a generic
> -             * way to deal with this.
> -             * For now in both cases, give up after a few retries.
> -             */
> -            return res;
> -        }
> -    } while ( res == FFA_RET_INTERRUPTED || res == FFA_RET_RETRY );
> -
> -    return res;
> -}
> -
> static void handle_version(struct cpu_user_regs *regs)
> {
>     struct domain *d = current->domain;
> @@ -371,88 +268,6 @@ static uint32_t ffa_handle_rxtx_unmap(void)
>     return FFA_RET_OK;
> }
> 
> -static int32_t ffa_handle_partition_info_get(uint32_t w1, uint32_t w2,
> -                                             uint32_t w3, uint32_t w4,
> -                                             uint32_t w5, uint32_t *count,
> -                                             uint32_t *fpi_size)
> -{
> -    int32_t ret = FFA_RET_DENIED;
> -    struct domain *d = current->domain;
> -    struct ffa_ctx *ctx = d->arch.tee;
> -
> -    /*
> -     * FF-A v1.0 has w5 MBZ while v1.1 allows
> -     * FFA_PARTITION_INFO_GET_COUNT_FLAG to be non-zero.
> -     *
> -     * FFA_PARTITION_INFO_GET_COUNT is only using registers and not the
> -     * rxtx buffer so do the partition_info_get directly.
> -     */
> -    if ( w5 == FFA_PARTITION_INFO_GET_COUNT_FLAG &&
> -         ctx->guest_vers == FFA_VERSION_1_1 )
> -        return ffa_partition_info_get(w1, w2, w3, w4, w5, count, fpi_size);
> -    if ( w5 )
> -        return FFA_RET_INVALID_PARAMETERS;
> -
> -    if ( !ffa_rx )
> -        return FFA_RET_DENIED;
> -
> -    if ( !spin_trylock(&ctx->rx_lock) )
> -        return FFA_RET_BUSY;
> -
> -    if ( !ctx->page_count || !ctx->rx_is_free )
> -        goto out;
> -    spin_lock(&ffa_rx_buffer_lock);
> -    ret = ffa_partition_info_get(w1, w2, w3, w4, w5, count, fpi_size);
> -    if ( ret )
> -        goto out_rx_buf_unlock;
> -    /*
> -     * ffa_partition_info_get() succeeded so we now own the RX buffer we
> -     * share with the SPMC. We must give it back using ffa_rx_release()
> -     * once we've copied the content.
> -     */
> -
> -    if ( ctx->guest_vers == FFA_VERSION_1_0 )
> -    {
> -        size_t n;
> -        struct ffa_partition_info_1_1 *src = ffa_rx;
> -        struct ffa_partition_info_1_0 *dst = ctx->rx;
> -
> -        if ( ctx->page_count * FFA_PAGE_SIZE < *count * sizeof(*dst) )
> -        {
> -            ret = FFA_RET_NO_MEMORY;
> -            goto out_rx_release;
> -        }
> -
> -        for ( n = 0; n < *count; n++ )
> -        {
> -            dst[n].id = src[n].id;
> -            dst[n].execution_context = src[n].execution_context;
> -            dst[n].partition_properties = src[n].partition_properties;
> -        }
> -    }
> -    else
> -    {
> -        size_t sz = *count * *fpi_size;
> -
> -        if ( ctx->page_count * FFA_PAGE_SIZE < sz )
> -        {
> -            ret = FFA_RET_NO_MEMORY;
> -            goto out_rx_release;
> -        }
> -
> -        memcpy(ctx->rx, ffa_rx, sz);
> -    }
> -    ctx->rx_is_free = false;
> -out_rx_release:
> -    ffa_rx_release();
> -out_rx_buf_unlock:
> -    spin_unlock(&ffa_rx_buffer_lock);
> -out:
> -    spin_unlock(&ctx->rx_lock);
> -
> -    return ret;
> -}
> -
> static int32_t ffa_handle_rx_release(void)
> {
>     int32_t ret = FFA_RET_DENIED;
> @@ -604,46 +419,9 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>     }
> }
> 
> -static bool is_in_subscr_list(const uint16_t *subscr, uint16_t start,
> -                              uint16_t end, uint16_t sp_id)
> -{
> -    unsigned int n;
> -
> -    for ( n = start; n < end; n++ )
> -    {
> -        if ( subscr[n] == sp_id )
> -            return true;
> -    }
> -
> -    return false;
> -}
> -
> -static void vm_destroy_bitmap_init(struct ffa_ctx *ctx,
> -                                   unsigned int create_signal_count)
> -{
> -    unsigned int n;
> -
> -    for ( n = 0; n < subscr_vm_destroyed_count; n++ )
> -    {
> -        /*
> -         * Skip SPs subscribed to the VM created event that never was
> -         * notified of the VM creation due to an error during
> -         * ffa_domain_init().
> -         */
> -        if ( is_in_subscr_list(subscr_vm_created, create_signal_count,
> -                               subscr_vm_created_count,
> -                               subscr_vm_destroyed[n]) )
> -            continue;
> -
> -        set_bit(n, ctx->vm_destroy_bitmap);
> -    }
> -}
> -
> static int ffa_domain_init(struct domain *d)
> {
>     struct ffa_ctx *ctx;
> -    unsigned int n;
> -    int32_t res;
> 
>     if ( !ffa_version )
>         return -ENODEV;
> @@ -654,8 +432,7 @@ static int ffa_domain_init(struct domain *d)
>     if ( d->domain_id >= UINT16_MAX)
>         return -ERANGE;
> 
> -    ctx = xzalloc_flex_struct(struct ffa_ctx, vm_destroy_bitmap,
> -                              BITS_TO_LONGS(subscr_vm_destroyed_count));
> +    ctx = xzalloc(struct ffa_ctx);
>     if ( !ctx )
>         return -ENOMEM;
> 
> @@ -663,66 +440,28 @@ static int ffa_domain_init(struct domain *d)
>     ctx->teardown_d = d;
>     INIT_LIST_HEAD(&ctx->shm_list);
> 
> -    for ( n = 0; n < subscr_vm_created_count; n++ )
> -    {
> -        res = ffa_direct_req_send_vm(subscr_vm_created[n], ffa_get_vm_id(d),
> -                                     FFA_MSG_SEND_VM_CREATED);
> -        if ( res )
> -        {
> -            printk(XENLOG_ERR "ffa: Failed to report creation of vm_id %u to 
>  %u: res %d\n",
> -                   ffa_get_vm_id(d), subscr_vm_created[n], res);
> -            break;
> -        }
> -    }
> -    vm_destroy_bitmap_init(ctx, n);
> -    if ( n != subscr_vm_created_count )
> +    /*
> +     * ffa_domain_teardown() will be called if ffa_domain_init() returns an
> +     * error, so no need for cleanup in this function.
> +     */
> +
> +    if ( !ffa_partinfo_domain_init(d) )
>         return -EIO;
> 
>     return 0;
> }
> 
> -static void send_vm_destroyed(struct domain *d)
> -{
> -    struct ffa_ctx *ctx = d->arch.tee;
> -    unsigned int n;
> -    int32_t res;
> -
> -    for ( n = 0; n < subscr_vm_destroyed_count; n++ )
> -    {
> -        if ( !test_bit(n, ctx->vm_destroy_bitmap) )
> -            continue;
> -
> -        res = ffa_direct_req_send_vm(subscr_vm_destroyed[n], 
> ffa_get_vm_id(d),
> -                                     FFA_MSG_SEND_VM_DESTROYED);
> -
> -        if ( res )
> -        {
> -            printk(XENLOG_ERR "%pd: ffa: Failed to report destruction of 
> vm_id %u to %u: res %d\n",
> -                   d, ffa_get_vm_id(d), subscr_vm_destroyed[n], res);
> -        }
> -
> -        /*
> -         * For these two error codes the hypervisor is expected to resend
> -         * the destruction message. For the rest it is expected that the
> -         * error is permanent and that is doesn't help to resend the
> -         * destruction message.
> -         */
> -        if ( res != FFA_RET_INTERRUPTED && res != FFA_RET_RETRY )
> -            clear_bit(n, ctx->vm_destroy_bitmap);
> -    }
> -}
> -
> static void ffa_domain_teardown_continue(struct ffa_ctx *ctx, bool first_time)
> {
>     struct ffa_ctx *next_ctx = NULL;
>     bool retry = false;
> 
> -    send_vm_destroyed(ctx->teardown_d);
> +    if ( !ffa_partinfo_domain_destroy(ctx->teardown_d) )
> +        retry = true;
>     if ( !ffa_shm_domain_destroy(ctx->teardown_d) )
>         retry = true;
> 
> -    if ( retry ||
> -         !bitmap_empty(ctx->vm_destroy_bitmap, subscr_vm_destroyed_count) )
> +    if ( retry )
>     {
>         printk(XENLOG_G_INFO "%pd: ffa: Remaining cleanup, retrying\n", 
> ctx->teardown_d);
> 
> @@ -796,82 +535,6 @@ static int ffa_relinquish_resources(struct domain *d)
>     return 0;
> }
> 
> -static void uninit_subscribers(void)
> -{
> -        subscr_vm_created_count = 0;
> -        subscr_vm_destroyed_count = 0;
> -        XFREE(subscr_vm_created);
> -        XFREE(subscr_vm_destroyed);
> -}
> -
> -static bool init_subscribers(struct ffa_partition_info_1_1 *fpi, uint16_t 
> count)
> -{
> -    uint16_t n;
> -    uint16_t c_pos;
> -    uint16_t d_pos;
> -
> -    subscr_vm_created_count = 0;
> -    subscr_vm_destroyed_count = 0;
> -    for ( n = 0; n < count; n++ )
> -    {
> -        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_CREATED )
> -            subscr_vm_created_count++;
> -        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_DESTROYED )
> -            subscr_vm_destroyed_count++;
> -    }
> -
> -    if ( subscr_vm_created_count )
> -        subscr_vm_created = xzalloc_array(uint16_t, subscr_vm_created_count);
> -    if ( subscr_vm_destroyed_count )
> -        subscr_vm_destroyed = xzalloc_array(uint16_t,
> -                                            subscr_vm_destroyed_count);
> -    if ( (subscr_vm_created_count && !subscr_vm_created) ||
> -         (subscr_vm_destroyed_count && !subscr_vm_destroyed) )
> -    {
> -        printk(XENLOG_ERR "ffa: Failed to allocate subscription lists\n");
> -        uninit_subscribers();
> -        return false;
> -    }
> -
> -    for ( c_pos = 0, d_pos = 0, n = 0; n < count; n++ )
> -    {
> -        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_CREATED )
> -            subscr_vm_created[c_pos++] = fpi[n].id;
> -        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_DESTROYED )
> -            subscr_vm_destroyed[d_pos++] = fpi[n].id;
> -    }
> -
> -    return true;
> -}
> -
> -static bool init_sps(void)
> -{
> -    bool ret = false;
> -    uint32_t fpi_size;
> -    uint32_t count;
> -    int e;
> -
> -    e = ffa_partition_info_get(0, 0, 0, 0, 0, &count, &fpi_size);
> -    if ( e )
> -    {
> -        printk(XENLOG_ERR "ffa: Failed to get list of SPs: %d\n", e);
> -        goto out;
> -    }
> -
> -    if ( count >= UINT16_MAX )
> -    {
> -        printk(XENLOG_ERR "ffa: Impossible number of SPs: %u\n", count);
> -        goto out;
> -    }
> -
> -    ret = init_subscribers(ffa_rx, count);
> -
> -out:
> -    ffa_rx_release();
> -
> -    return ret;
> -}
> -
> static bool ffa_probe(void)
> {
>     uint32_t vers;
> @@ -949,7 +612,7 @@ static bool ffa_probe(void)
>     }
>     ffa_version = vers;
> 
> -    if ( !init_sps() )
> +    if ( !ffa_partinfo_init() )
>         goto err_free_ffa_tx;
> 
>     INIT_LIST_HEAD(&ffa_teardown_head);
> diff --git a/xen/arch/arm/tee/ffa_partinfo.c b/xen/arch/arm/tee/ffa_partinfo.c
> new file mode 100644
> index 000000000000..dc1059584828
> --- /dev/null
> +++ b/xen/arch/arm/tee/ffa_partinfo.c
> @@ -0,0 +1,373 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2024  Linaro Limited
> + */
> +
> +#include <xen/const.h>
> +#include <xen/sizes.h>
> +#include <xen/types.h>
> +
> +#include <asm/smccc.h>
> +#include <asm/regs.h>
> +
> +#include "ffa_private.h"
> +
> +/* Partition information descriptor defined in FF-A-1.0-REL */
> +struct ffa_partition_info_1_0 {
> +    uint16_t id;
> +    uint16_t execution_context;
> +    uint32_t partition_properties;
> +};
> +
> +/* Partition information descriptor defined in FF-A-1.1-REL0 */
> +struct ffa_partition_info_1_1 {
> +    uint16_t id;
> +    uint16_t execution_context;
> +    uint32_t partition_properties;
> +    uint8_t uuid[16];
> +};
> +
> +/* SPs subscribing to VM_CREATE and VM_DESTROYED events */
> +static uint16_t *subscr_vm_created __read_mostly;
> +static uint16_t subscr_vm_created_count __read_mostly;
> +static uint16_t *subscr_vm_destroyed __read_mostly;
> +static uint16_t subscr_vm_destroyed_count __read_mostly;
> +
> +static int32_t ffa_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
> +                                      uint32_t w4, uint32_t w5, uint32_t 
> *count,
> +                                      uint32_t *fpi_size)
> +{
> +    const struct arm_smccc_1_2_regs arg = {
> +        .a0 = FFA_PARTITION_INFO_GET,
> +        .a1 = w1,
> +        .a2 = w2,
> +        .a3 = w3,
> +        .a4 = w4,
> +        .a5 = w5,
> +    };
> +    struct arm_smccc_1_2_regs resp;
> +    uint32_t ret;
> +
> +    arm_smccc_1_2_smc(&arg, &resp);
> +
> +    ret = ffa_get_ret_code(&resp);
> +    if ( !ret )
> +    {
> +        *count = resp.a2;
> +        *fpi_size = resp.a3;
> +    }
> +
> +    return ret;
> +}
> +
> +int32_t ffa_handle_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
> +                                      uint32_t w4, uint32_t w5, uint32_t 
> *count,
> +                                      uint32_t *fpi_size)
> +{
> +    int32_t ret = FFA_RET_DENIED;
> +    struct domain *d = current->domain;
> +    struct ffa_ctx *ctx = d->arch.tee;
> +
> +    /*
> +     * FF-A v1.0 has w5 MBZ while v1.1 allows
> +     * FFA_PARTITION_INFO_GET_COUNT_FLAG to be non-zero.
> +     *
> +     * FFA_PARTITION_INFO_GET_COUNT is only using registers and not the
> +     * rxtx buffer so do the partition_info_get directly.
> +     */
> +    if ( w5 == FFA_PARTITION_INFO_GET_COUNT_FLAG &&
> +         ctx->guest_vers == FFA_VERSION_1_1 )
> +        return ffa_partition_info_get(w1, w2, w3, w4, w5, count, fpi_size);
> +    if ( w5 )
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    if ( !ffa_rx )
> +        return FFA_RET_DENIED;
> +
> +    if ( !spin_trylock(&ctx->rx_lock) )
> +        return FFA_RET_BUSY;
> +
> +    if ( !ctx->page_count || !ctx->rx_is_free )
> +        goto out;
> +    spin_lock(&ffa_rx_buffer_lock);
> +    ret = ffa_partition_info_get(w1, w2, w3, w4, w5, count, fpi_size);
> +    if ( ret )
> +        goto out_rx_buf_unlock;
> +    /*
> +     * ffa_partition_info_get() succeeded so we now own the RX buffer we
> +     * share with the SPMC. We must give it back using ffa_rx_release()
> +     * once we've copied the content.
> +     */
> +
> +    if ( ctx->guest_vers == FFA_VERSION_1_0 )
> +    {
> +        size_t n;
> +        struct ffa_partition_info_1_1 *src = ffa_rx;
> +        struct ffa_partition_info_1_0 *dst = ctx->rx;
> +
> +        if ( ctx->page_count * FFA_PAGE_SIZE < *count * sizeof(*dst) )
> +        {
> +            ret = FFA_RET_NO_MEMORY;
> +            goto out_rx_release;
> +        }
> +
> +        for ( n = 0; n < *count; n++ )
> +        {
> +            dst[n].id = src[n].id;
> +            dst[n].execution_context = src[n].execution_context;
> +            dst[n].partition_properties = src[n].partition_properties;
> +        }
> +    }
> +    else
> +    {
> +        size_t sz = *count * *fpi_size;
> +
> +        if ( ctx->page_count * FFA_PAGE_SIZE < sz )
> +        {
> +            ret = FFA_RET_NO_MEMORY;
> +            goto out_rx_release;
> +        }
> +
> +        memcpy(ctx->rx, ffa_rx, sz);
> +    }
> +    ctx->rx_is_free = false;
> +out_rx_release:
> +    ffa_rx_release();
> +out_rx_buf_unlock:
> +    spin_unlock(&ffa_rx_buffer_lock);
> +out:
> +    spin_unlock(&ctx->rx_lock);
> +
> +    return ret;
> +}
> +
> +static int32_t ffa_direct_req_send_vm(uint16_t sp_id, uint16_t vm_id,
> +                                      uint8_t msg)
> +{
> +    uint32_t exp_resp = FFA_MSG_FLAG_FRAMEWORK;
> +    unsigned int retry_count = 0;
> +    int32_t res;
> +
> +    if ( msg == FFA_MSG_SEND_VM_CREATED )
> +        exp_resp |= FFA_MSG_RESP_VM_CREATED;
> +    else if ( msg == FFA_MSG_SEND_VM_DESTROYED )
> +        exp_resp |= FFA_MSG_RESP_VM_DESTROYED;
> +    else
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    do {
> +        const struct arm_smccc_1_2_regs arg = {
> +            .a0 = FFA_MSG_SEND_DIRECT_REQ_32,
> +            .a1 = sp_id,
> +            .a2 = FFA_MSG_FLAG_FRAMEWORK | msg,
> +            .a5 = vm_id,
> +        };
> +        struct arm_smccc_1_2_regs resp;
> +
> +        arm_smccc_1_2_smc(&arg, &resp);
> +        if ( resp.a0 != FFA_MSG_SEND_DIRECT_RESP_32 || resp.a2 != exp_resp )
> +        {
> +            /*
> +             * This is an invalid response, likely due to some error in the
> +             * implementation of the ABI.
> +             */
> +            return FFA_RET_INVALID_PARAMETERS;
> +        }
> +        res = resp.a3;
> +        if ( ++retry_count > 10 )
> +        {
> +            /*
> +             * TODO
> +             * FFA_RET_INTERRUPTED means that the SPMC has a pending
> +             * non-secure interrupt, we need a way of delivering that
> +             * non-secure interrupt.
> +             * FFA_RET_RETRY is the SP telling us that it's temporarily
> +             * blocked from handling the direct request, we need a generic
> +             * way to deal with this.
> +             * For now in both cases, give up after a few retries.
> +             */
> +            return res;
> +        }
> +    } while ( res == FFA_RET_INTERRUPTED || res == FFA_RET_RETRY );
> +
> +    return res;
> +}
> +
> +static void uninit_subscribers(void)
> +{
> +        subscr_vm_created_count = 0;
> +        subscr_vm_destroyed_count = 0;
> +        XFREE(subscr_vm_created);
> +        XFREE(subscr_vm_destroyed);
> +}
> +
> +static bool init_subscribers(struct ffa_partition_info_1_1 *fpi, uint16_t 
> count)
> +{
> +    uint16_t n;
> +    uint16_t c_pos;
> +    uint16_t d_pos;
> +
> +    subscr_vm_created_count = 0;
> +    subscr_vm_destroyed_count = 0;
> +    for ( n = 0; n < count; n++ )
> +    {
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_CREATED )
> +            subscr_vm_created_count++;
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_DESTROYED )
> +            subscr_vm_destroyed_count++;
> +    }
> +
> +    if ( subscr_vm_created_count )
> +        subscr_vm_created = xzalloc_array(uint16_t, subscr_vm_created_count);
> +    if ( subscr_vm_destroyed_count )
> +        subscr_vm_destroyed = xzalloc_array(uint16_t,
> +                                            subscr_vm_destroyed_count);
> +    if ( (subscr_vm_created_count && !subscr_vm_created) ||
> +         (subscr_vm_destroyed_count && !subscr_vm_destroyed) )
> +    {
> +        printk(XENLOG_ERR "ffa: Failed to allocate subscription lists\n");
> +        uninit_subscribers();
> +        return false;
> +    }
> +
> +    for ( c_pos = 0, d_pos = 0, n = 0; n < count; n++ )
> +    {
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_CREATED )
> +            subscr_vm_created[c_pos++] = fpi[n].id;
> +        if ( fpi[n].partition_properties & FFA_PART_PROP_NOTIF_DESTROYED )
> +            subscr_vm_destroyed[d_pos++] = fpi[n].id;
> +    }
> +
> +    return true;
> +}
> +
> +
> +
> +bool ffa_partinfo_init(void)
> +{
> +    bool ret = false;
> +    uint32_t fpi_size;
> +    uint32_t count;
> +    int e;
> +
> +    e = ffa_partition_info_get(0, 0, 0, 0, 0, &count, &fpi_size);
> +    if ( e )
> +    {
> +        printk(XENLOG_ERR "ffa: Failed to get list of SPs: %d\n", e);
> +        goto out;
> +    }
> +
> +    if ( count >= UINT16_MAX )
> +    {
> +        printk(XENLOG_ERR "ffa: Impossible number of SPs: %u\n", count);
> +        goto out;
> +    }
> +
> +    ret = init_subscribers(ffa_rx, count);
> +
> +out:
> +    ffa_rx_release();
> +
> +    return ret;
> +}
> +
> +static bool is_in_subscr_list(const uint16_t *subscr, uint16_t start,
> +                              uint16_t end, uint16_t sp_id)
> +{
> +    unsigned int n;
> +
> +    for ( n = start; n < end; n++ )
> +    {
> +        if ( subscr[n] == sp_id )
> +            return true;
> +    }
> +
> +    return false;
> +}
> +
> +static void vm_destroy_bitmap_init(struct ffa_ctx *ctx,
> +                                   unsigned int create_signal_count)
> +{
> +    unsigned int n;
> +
> +    for ( n = 0; n < subscr_vm_destroyed_count; n++ )
> +    {
> +        /*
> +         * Skip SPs subscribed to the VM created event that never was
> +         * notified of the VM creation due to an error during
> +         * ffa_domain_init().
> +         */
> +        if ( is_in_subscr_list(subscr_vm_created, create_signal_count,
> +                               subscr_vm_created_count,
> +                               subscr_vm_destroyed[n]) )
> +            continue;
> +
> +        set_bit(n, ctx->vm_destroy_bitmap);
> +    }
> +}
> +
> +bool ffa_partinfo_domain_init(struct domain *d)
> +{
> +    unsigned int count = BITS_TO_LONGS(subscr_vm_destroyed_count);
> +    struct ffa_ctx *ctx = d->arch.tee;
> +    unsigned int n;
> +    int32_t res;
> +
> +    ctx->vm_destroy_bitmap = xzalloc_array(unsigned long, count);
> +    if ( !ctx->vm_destroy_bitmap )
> +        return false;
> +
> +    for ( n = 0; n < subscr_vm_created_count; n++ )
> +    {
> +        res = ffa_direct_req_send_vm(subscr_vm_created[n], ffa_get_vm_id(d),
> +                                     FFA_MSG_SEND_VM_CREATED);
> +        if ( res )
> +        {
> +            printk(XENLOG_ERR "ffa: Failed to report creation of vm_id %u to 
>  %u: res %d\n",
> +                   ffa_get_vm_id(d), subscr_vm_created[n], res);
> +            break;
> +        }
> +    }
> +    vm_destroy_bitmap_init(ctx, n);
> +
> +    return n == subscr_vm_created_count;
> +}
> +
> +bool ffa_partinfo_domain_destroy(struct domain *d)
> +{
> +    struct ffa_ctx *ctx = d->arch.tee;
> +    unsigned int n;
> +    int32_t res;
> +
> +    if ( !ctx->vm_destroy_bitmap )
> +        return true;
> +
> +    for ( n = 0; n < subscr_vm_destroyed_count; n++ )
> +    {
> +        if ( !test_bit(n, ctx->vm_destroy_bitmap) )
> +            continue;
> +
> +        res = ffa_direct_req_send_vm(subscr_vm_destroyed[n], 
> ffa_get_vm_id(d),
> +                                     FFA_MSG_SEND_VM_DESTROYED);
> +
> +        if ( res )
> +        {
> +            printk(XENLOG_ERR "%pd: ffa: Failed to report destruction of 
> vm_id %u to %u: res %d\n",
> +                   d, ffa_get_vm_id(d), subscr_vm_destroyed[n], res);
> +        }
> +
> +        /*
> +         * For these two error codes the hypervisor is expected to resend
> +         * the destruction message. For the rest it is expected that the
> +         * error is permanent and that is doesn't help to resend the
> +         * destruction message.
> +         */
> +        if ( res != FFA_RET_INTERRUPTED && res != FFA_RET_RETRY )
> +            clear_bit(n, ctx->vm_destroy_bitmap);
> +    }
> +
> +    if ( bitmap_empty(ctx->vm_destroy_bitmap, subscr_vm_destroyed_count) )
> +        XFREE(ctx->vm_destroy_bitmap);
> +
> +    return !ctx->vm_destroy_bitmap;
> +}
> diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
> index f3e2f42e573e..6b32b69cfe90 100644
> --- a/xen/arch/arm/tee/ffa_private.h
> +++ b/xen/arch/arm/tee/ffa_private.h
> @@ -244,7 +244,7 @@ struct ffa_ctx {
>      * Used for ffa_domain_teardown() to keep track of which SPs should be
>      * notified that this guest is being destroyed.
>      */
> -    unsigned long vm_destroy_bitmap[];
> +    unsigned long *vm_destroy_bitmap;
> };
> 
> extern void *ffa_rx;
> @@ -256,6 +256,13 @@ bool ffa_shm_domain_destroy(struct domain *d);
> void ffa_handle_mem_share(struct cpu_user_regs *regs);
> int ffa_handle_mem_reclaim(uint64_t handle, uint32_t flags);
> 
> +bool ffa_partinfo_init(void);
> +bool ffa_partinfo_domain_init(struct domain *d);
> +bool ffa_partinfo_domain_destroy(struct domain *d);
> +int32_t ffa_handle_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
> +                                      uint32_t w4, uint32_t w5, uint32_t 
> *count,
> +                                      uint32_t *fpi_size);
> +
> 
> static inline uint16_t ffa_get_vm_id(const struct domain *d)
> {
> @@ -325,4 +332,9 @@ static inline int32_t ffa_simple_call(uint32_t fid, 
> register_t a1,
>     return ffa_get_ret_code(&resp);
> }
> 
> +static inline int32_t ffa_rx_release(void)
> +{
> +    return ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
> +}
> +
> #endif /*__FFA_PRIVATE_H__*/
> -- 
> 2.34.1
> 




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.