[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN PATCH v7 13/20] xen/arm: ffa: support mapping guest RX/TX buffers



Hi,

On Fri, Mar 3, 2023 at 9:16 AM Bertrand Marquis
<Bertrand.Marquis@xxxxxxx> wrote:
>
> Hi Jens,
>
> > On 3 Mar 2023, at 08:41, Jens Wiklander <jens.wiklander@xxxxxxxxxx> wrote:
> >
> > Hi Bertrand,
> >
> > On Thu, Mar 2, 2023 at 4:05 PM Bertrand Marquis
> > <Bertrand.Marquis@xxxxxxx> wrote:
> >>
> >> Hi Jens,
> >>
> >>> On 22 Feb 2023, at 16:33, Jens Wiklander <jens.wiklander@xxxxxxxxxx> 
> >>> wrote:
> >>>
> >>> Adds support in the mediator to map and unmap the RX and TX buffers
> >>> provided by the guest using the two FF-A functions FFA_RXTX_MAP and
> >>> FFA_RXTX_UNMAP.
> >>>
> >>> These buffer are later used to to transmit data that cannot be passed in
> >>> registers only.
> >>>
> >>> Signed-off-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
> >>> ---
> >>> xen/arch/arm/tee/ffa.c | 127 +++++++++++++++++++++++++++++++++++++++++
> >>> 1 file changed, 127 insertions(+)
> >>>
> >>> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> >>> index f1b014b6c7f4..953b6dfd5eca 100644
> >>> --- a/xen/arch/arm/tee/ffa.c
> >>> +++ b/xen/arch/arm/tee/ffa.c
> >>> @@ -149,10 +149,17 @@ struct ffa_partition_info_1_1 {
> >>> };
> >>>
> >>> struct ffa_ctx {
> >>> +    void *rx;
> >>> +    const void *tx;
> >>> +    struct page_info *rx_pg;
> >>> +    struct page_info *tx_pg;
> >>> +    unsigned int page_count;
> >>>    uint32_t guest_vers;
> >>> +    bool tx_is_mine;
> >>>    bool interrupted;
> >>> };
> >>>
> >>> +
> >> Newline probably added by mistake.
> >
> > Yes, I'll remove it.
> >
> >>
> >>> /* Negotiated FF-A version to use with the SPMC */
> >>> static uint32_t ffa_version __ro_after_init;
> >>>
> >>> @@ -337,6 +344,11 @@ static void set_regs(struct cpu_user_regs *regs, 
> >>> register_t v0, register_t v1,
> >>>        set_user_reg(regs, 7, v7);
> >>> }
> >>>
> >>> +static void set_regs_error(struct cpu_user_regs *regs, uint32_t 
> >>> error_code)
> >>> +{
> >>> +    set_regs(regs, FFA_ERROR, 0, error_code, 0, 0, 0, 0, 0);
> >>> +}
> >>> +
> >>> static void set_regs_success(struct cpu_user_regs *regs, uint32_t w2,
> >>>                             uint32_t w3)
> >>> {
> >>> @@ -358,6 +370,99 @@ static void handle_version(struct cpu_user_regs 
> >>> *regs)
> >>>    set_regs(regs, vers, 0, 0, 0, 0, 0, 0, 0);
> >>> }
> >>>
> >>> +static uint32_t handle_rxtx_map(uint32_t fid, register_t tx_addr,
> >>> +                                register_t rx_addr, uint32_t page_count)
> >>> +{
> >>> +    uint32_t ret = FFA_RET_INVALID_PARAMETERS;
> >>> +    struct domain *d = current->domain;
> >>> +    struct ffa_ctx *ctx = d->arch.tee;
> >>> +    struct page_info *tx_pg;
> >>> +    struct page_info *rx_pg;
> >>> +    p2m_type_t t;
> >>> +    void *rx;
> >>> +    void *tx;
> >>> +
> >>> +    if ( !smccc_is_conv_64(fid) )
> >>> +    {
> >>> +        tx_addr &= UINT32_MAX;
> >>> +        rx_addr &= UINT32_MAX;
> >>> +    }
> >>
> >> I am bit wondering here what we should do:
> >> - we could just say that 32bit version of the call is not allowed for non 
> >> 32bit guests
> >> - we could check that the highest bits are 0 for 64bit guests and return 
> >> an error if not
> >> - we can just mask hopping that if there was a mistake the address after 
> >> the mask
> >> does not exist in the guest space
> >>
> >> At the end nothing in the spec is preventing a 64bit guest to use the 
> >> 32bit so it might
> >> be a good idea to return an error if the highest 32bit are not 0 here ?
> >
> > The SMC Calling Convention says:
> > When an SMC32/HVC32 call is made from AArch64:
> > - A Function Identifier is passed in register W0.
> > - Arguments are passed in registers W1-W7.
> >
> > So masking off the higher bits is all that should be done.
>
> Please put a comment saying that in 32 bit convention higher bits should be 
> ignored.

OK.

Thanks,
Jens

>
> >
> >>
> >>> +
> >>> +    /* For now to keep things simple, only deal with a single page */
> >>> +    if ( page_count != 1 )
> >>> +        return FFA_RET_NOT_SUPPORTED;
> >>
> >> Please add a TODO here and a print as this is a limitation we will 
> >> probably have to
> >> work on soon.
> >
> > I'll add an arbitrary upper limit and a print if it's exceeded.
>
> thanks
>
> >
> >>
> >>
> >>> +
> >>> +    /* Already mapped */
> >>> +    if ( ctx->rx )
> >>> +        return FFA_RET_DENIED;
> >>> +
> >>> +    tx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(tx_addr)), &t, 
> >>> P2M_ALLOC);
> >>> +    if ( !tx_pg )
> >>> +        return FFA_RET_INVALID_PARAMETERS;
> >>> +    /* Only normal RAM for now */
> >>> +    if ( !p2m_is_ram(t) )
> >>> +        goto err_put_tx_pg;
> >>> +
> >>> +    rx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(rx_addr)), &t, 
> >>> P2M_ALLOC);
> >>> +    if ( !tx_pg )
> >>> +        goto err_put_tx_pg;
> >>> +    /* Only normal RAM for now */
> >>> +    if ( !p2m_is_ram(t) )
> >>> +        goto err_put_rx_pg;
> >>> +
> >>> +    tx = __map_domain_page_global(tx_pg);
> >>> +    if ( !tx )
> >>> +        goto err_put_rx_pg;
> >>> +
> >>> +    rx = __map_domain_page_global(rx_pg);
> >>> +    if ( !rx )
> >>> +        goto err_unmap_tx;
> >>> +
> >>> +    ctx->rx = rx;
> >>> +    ctx->tx = tx;
> >>> +    ctx->rx_pg = rx_pg;
> >>> +    ctx->tx_pg = tx_pg;
> >>> +    ctx->page_count = 1;
> >>
> >> please use page_count here instead of 1 so that this is not forgotten once
> >> we add support for more pages.
> >
> > OK
>
> Cheers
> Bertrand
>
> >
> > Cheers,
> > Jens
> >
> >>
> >>
> >> Cheers
> >> Bertrand
> >>
> >>> +    ctx->tx_is_mine = true;
> >>> +    return FFA_RET_OK;
> >>> +
> >>> +err_unmap_tx:
> >>> +    unmap_domain_page_global(tx);
> >>> +err_put_rx_pg:
> >>> +    put_page(rx_pg);
> >>> +err_put_tx_pg:
> >>> +    put_page(tx_pg);
> >>> +
> >>> +    return ret;
> >>> +}
> >>> +
> >>> +static void rxtx_unmap(struct ffa_ctx *ctx)
> >>> +{
> >>> +    unmap_domain_page_global(ctx->rx);
> >>> +    unmap_domain_page_global(ctx->tx);
> >>> +    put_page(ctx->rx_pg);
> >>> +    put_page(ctx->tx_pg);
> >>> +    ctx->rx = NULL;
> >>> +    ctx->tx = NULL;
> >>> +    ctx->rx_pg = NULL;
> >>> +    ctx->tx_pg = NULL;
> >>> +    ctx->page_count = 0;
> >>> +    ctx->tx_is_mine = false;
> >>> +}
> >>> +
> >>> +static uint32_t handle_rxtx_unmap(void)
> >>> +{
> >>> +    struct domain *d = current->domain;
> >>> +    struct ffa_ctx *ctx = d->arch.tee;
> >>> +
> >>> +    if ( !ctx->rx )
> >>> +        return FFA_RET_INVALID_PARAMETERS;
> >>> +
> >>> +    rxtx_unmap(ctx);
> >>> +
> >>> +    return FFA_RET_OK;
> >>> +}
> >>> +
> >>> static void handle_msg_send_direct_req(struct cpu_user_regs *regs, 
> >>> uint32_t fid)
> >>> {
> >>>    struct arm_smccc_1_2_regs arg = { .a0 = fid, };
> >>> @@ -423,6 +528,7 @@ static bool ffa_handle_call(struct cpu_user_regs 
> >>> *regs)
> >>>    uint32_t fid = get_user_reg(regs, 0);
> >>>    struct domain *d = current->domain;
> >>>    struct ffa_ctx *ctx = d->arch.tee;
> >>> +    int e;
> >>>
> >>>    if ( !ctx )
> >>>        return false;
> >>> @@ -435,6 +541,24 @@ static bool ffa_handle_call(struct cpu_user_regs 
> >>> *regs)
> >>>    case FFA_ID_GET:
> >>>        set_regs_success(regs, get_vm_id(d), 0);
> >>>        return true;
> >>> +    case FFA_RXTX_MAP_32:
> >>> +#ifdef CONFIG_ARM_64
> >>> +    case FFA_RXTX_MAP_64:
> >>> +#endif
> >>> +        e = handle_rxtx_map(fid, get_user_reg(regs, 1), 
> >>> get_user_reg(regs, 2),
> >>> +                            get_user_reg(regs, 3));
> >>> +        if ( e )
> >>> +            set_regs_error(regs, e);
> >>> +        else
> >>> +            set_regs_success(regs, 0, 0);
> >>> +        return true;
> >>> +    case FFA_RXTX_UNMAP:
> >>> +        e = handle_rxtx_unmap();
> >>> +        if ( e )
> >>> +            set_regs_error(regs, e);
> >>> +        else
> >>> +            set_regs_success(regs, 0, 0);
> >>> +        return true;
> >>>    case FFA_MSG_SEND_DIRECT_REQ_32:
> >>> #ifdef CONFIG_ARM_64
> >>>    case FFA_MSG_SEND_DIRECT_REQ_64:
> >>> @@ -515,6 +639,9 @@ static int ffa_relinquish_resources(struct domain *d)
> >>>                   get_vm_id(d), subscr_vm_destroyed[n], res);
> >>>    }
> >>>
> >>> +    if ( ctx->rx )
> >>> +        rxtx_unmap(ctx);
> >>> +
> >>>    XFREE(d->arch.tee);
> >>>
> >>>    return 0;
> >>> --
> >>> 2.34.1
>
>



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.