[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 21/24] ARM: vITS: handle INVALL command



On Wed, Sep 28, 2016 at 11:54 PM, Andre Przywara <andre.przywara@xxxxxxx> wrote:
> The INVALL command instructs an ITS to invalidate the configuration
> data for all LPIs associated with a given redistributor (read: VCPU).
> To avoid iterating (and mapping!) all guest tables, we instead go through
> the host LPI table to find any LPIs targetting this VCPU. We then update
> the configuration bits for the connected virtual LPIs.
>
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
> ---
>  xen/arch/arm/gic-its.c        | 58 
> +++++++++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/vgic-its.c       | 30 ++++++++++++++++++++++
>  xen/include/asm-arm/gic-its.h |  2 ++
>  3 files changed, 90 insertions(+)
>
> diff --git a/xen/arch/arm/gic-its.c b/xen/arch/arm/gic-its.c
> index 6f4329f..5129d6e 100644
> --- a/xen/arch/arm/gic-its.c
> +++ b/xen/arch/arm/gic-its.c
> @@ -228,6 +228,18 @@ static int its_send_cmd_inv(struct host_its *its,
>      return its_send_command(its, cmd);
>  }
>
> +static int its_send_cmd_invall(struct host_its *its, int cpu)
> +{
> +    uint64_t cmd[4];
> +
> +    cmd[0] = GITS_CMD_INVALL;
> +    cmd[1] = 0x00;
> +    cmd[2] = cpu & GENMASK(15, 0);
> +    cmd[3] = 0x00;
> +
> +    return its_send_command(its, cmd);
> +}
> +
>  int gicv3_its_map_device(struct host_its *hw_its, struct domain *d,
>                           int devid, int bits, bool valid)
>  {
> @@ -668,6 +680,52 @@ uint32_t gicv3_lpi_lookup_lpi(struct domain *d, uint32_t 
> host_lpi, int *vcpu_id)
>      return hlpi.virt_lpi;
>  }
>
> +/* Iterate over all host LPIs, and updating the "enabled" state for a given
> + * guest redistributor (VCPU) given the respective state in the provided
> + * proptable. This proptable is indexed by the stored virtual LPI number.
> + * This is to implement a guest INVALL command.
> + */
> +void gicv3_lpi_update_configurations(struct vcpu *v, uint8_t *proptable)
> +{
> +    int chunk, i;
> +    struct host_its *its;
> +
> +    for (chunk = 0; chunk < MAX_HOST_LPIS / HOST_LPIS_PER_PAGE; chunk++)
> +    {
> +        if ( !lpi_data.host_lpis[chunk] )
> +            continue;
> +
> +        for (i = 0; i < HOST_LPIS_PER_PAGE; i++)
> +        {
> +            union host_lpi *hlpip = &lpi_data.host_lpis[chunk][i], hlpi;
> +            uint32_t hlpi_nr;
> +
> +            hlpi.data = hlpip->data;
> +            if ( !hlpi.virt_lpi )
> +                continue;
> +
> +            if ( hlpi.dom_id != v->domain->domain_id )
> +                continue;
> +
> +            if ( hlpi.vcpu_id != v->vcpu_id )
> +                continue;
> +
> +            hlpi_nr = chunk * HOST_LPIS_PER_PAGE + i;
> +
> +            if ( proptable[hlpi.virt_lpi] & LPI_PROP_ENABLED )
> +                lpi_data.lpi_property[hlpi_nr - 8192] |= LPI_PROP_ENABLED;
> +            else
> +                lpi_data.lpi_property[hlpi_nr - 8192] &= ~LPI_PROP_ENABLED;
> +        }
> +    }
        AFAIK, the initial design is to use tasklet to update property
table as it consumes
lot of time to update the table.

> +
> +    /* Tell all ITSes that they should update the property table for CPU 0,
> +     * which is where we map all LPIs to.
> +     */
> +    list_for_each_entry(its, &host_its_list, entry)
> +        its_send_cmd_invall(its, 0);
> +}
> +
>  void gicv3_lpi_set_enable(struct host_its *its,
>                            uint32_t deviceid, uint32_t eventid,
>                            uint32_t host_lpi, bool enabled)
> diff --git a/xen/arch/arm/vgic-its.c b/xen/arch/arm/vgic-its.c
> index 74da8fc..1e429b7 100644
> --- a/xen/arch/arm/vgic-its.c
> +++ b/xen/arch/arm/vgic-its.c
> @@ -294,6 +294,33 @@ out_unlock:
>      return ret;
>  }
>
> +/* INVALL updates the per-LPI configuration status for every LPI mapped to
> + * this redistributor. For the guest side we don't need to update anything,
> + * as we always refer to the actual table for the enabled bit and the
> + * priority.
> + * Enabling or disabling a virtual LPI however needs to be propagated to
> + * the respective host LPI. Instead of iterating over all mapped LPIs in our
> + * emulated GIC (which is expensive due to the required on-demand mapping),
> + * we iterate over all mapped _host_ LPIs and filter for those which are
> + * forwarded to this virtual redistributor.
> + */
> +static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
> +{
> +    uint32_t collid = its_cmd_get_collection(cmdptr);
> +    struct vcpu *vcpu;
> +
> +    spin_lock(&its->its_lock);
> +    vcpu = get_vcpu_from_collection(its, collid);
> +    spin_unlock(&its->its_lock);
> +
> +    if ( !vcpu )
> +        return -1;
> +
> +    gicv3_lpi_update_configurations(vcpu, its->d->arch.vgic.proptable);
> +
> +    return 0;
> +}
> +
>  static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
>  {
>      uint32_t collid = its_cmd_get_collection(cmdptr);
> @@ -515,6 +542,9 @@ static int vgic_its_handle_cmds(struct domain *d, struct 
> virt_its *its,
>          case GITS_CMD_INV:
>              its_handle_inv(its, cmdptr);
>             break;
> +        case GITS_CMD_INVALL:
> +            its_handle_invall(its, cmdptr);
> +           break;
>          case GITS_CMD_MAPC:
>              its_handle_mapc(its, cmdptr);
>              break;
> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
> index 2cdb3e1..ba6b2d5 100644
> --- a/xen/include/asm-arm/gic-its.h
> +++ b/xen/include/asm-arm/gic-its.h
> @@ -146,6 +146,8 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>                              uint32_t devid, uint32_t eventid,
>                              uint32_t host_lpi);
>
> +void gicv3_lpi_update_configurations(struct vcpu *v, uint8_t *proptable);
> +
>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>  {
>      return d->arch.vgic.proptable[lpi - 8192] & 0xfc;
> --
> 2.9.0
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> https://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.