[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 13/19] xen/arm: its: implement hw_irq_controller for LPIs



On Tue, Mar 3, 2015 at 10:58 PM, Stefano Stabellini
<stefano.stabellini@xxxxxxxxxxxxx> wrote:
> On Mon, 2 Mar 2015, vijay.kilari@xxxxxxxxx wrote:
>> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
>>
>> This patch implements hw_irq_controller api's required
>> to handle LPI's.
>>
>> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
>> ---
>>  xen/arch/arm/gic-v3-its.c |  104 
>> ++++++++++++++++++++++++++++++++++++++++++++-
>>  xen/arch/arm/gic.c        |   11 +++++
>>  xen/include/asm-arm/gic.h |    8 +++-
>>  3 files changed, 121 insertions(+), 2 deletions(-)
>>
>> diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
>> index 5d9550f..b2c3320 100644
>> --- a/xen/arch/arm/gic-v3-its.c
>> +++ b/xen/arch/arm/gic-v3-its.c
>> @@ -140,6 +140,9 @@ struct its_cmd_desc {
>>  typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
>>                                  struct its_cmd_desc *);
>>
>> +static void its_send_movi(struct its_node *its, struct its_collection *col,
>> +                          u32 dev_id, u32 id);
>> +
>>  uint32_t its_get_pta_type(void)
>>  {
>>      return pta_type;
>> @@ -244,6 +247,103 @@ int its_get_physical_cid(uint32_t *col_id, uint64_t 
>> vta)
>>      return 1;
>>  }
>>
>> +static void lpi_set_config(u32 hwirq, u32 id, int enable)
>> +{
>> +    u8 *cfg;
>> +
>> +    cfg = gic_rdists->prop_page + hwirq - NR_GIC_LPI;
>> +
>> +    if ( enable )
>> +        *cfg |= (1 << 0);
>> +    else
>> +        *cfg &= ~(1 << 0);
>> +
>> +    /*
>> +     * Make the above write visible to the redistributors.
>> +     * And yes, we're flushing exactly: One. Single. Byte.
>> +     * Humpf...
>> +     */
>> +    if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
>> +        clean_dcache_va_range(cfg, sizeof(*cfg));
>> +    else
>> +        dsb(ishst);
>> +}
>> +
>> +static void its_mask_irq(struct irq_desc *d)
>> +{
>> +    u32 id;
>> +
>> +    id = d->irq;
>> +    set_bit(_IRQ_DISABLED, &d->status);
>> +    lpi_set_config(d->irq, id, 0);
>> +}
>> +
>> +static void its_unmask_irq(struct irq_desc *d)
>> +{
>> +    u32 id;
>> +
>> +    id = d->irq;
>> +    clear_bit(_IRQ_DISABLED, &d->status);
>> +    lpi_set_config(d->irq, id, 1);
>> +}
>> +
>> +static unsigned int its_irq_startup(struct irq_desc *desc)
>> +{
>> +    its_unmask_irq(desc);
>> +
>> +    return 0;
>> +}
>> +
>> +static void its_irq_shutdown(struct irq_desc *desc)
>> +{
>> +    its_mask_irq(desc);
>> +}
>> +
>> +static void its_eoi_irq(struct irq_desc *d)
>> +{
>> +    gic_eoi_irq(d);
>> +}
>> +
>> +static void its_ack_irq(struct irq_desc *desc)
>> +{
>> +    /* No ACK -- reading IAR has done this for us */
>> +}
>> +
>> +static void its_set_affinity(struct irq_desc *d, const cpumask_t *mask_val)
>> +{
>> +    /* XXX: check cpumask_any or cpu_online_map is ok? */
>> +    cpumask_t online_mask;
>> +    unsigned int cpu;
>> +    struct vits_device *its_dev = irq_get_desc_data(d);
>> +    struct its_collection *target_col;
>> +    uint32_t id;
>> +
>> +    cpumask_and(&online_mask, mask_val, &cpu_online_map);
>> +    cpu = cpumask_any(&online_mask);
>> +    /* Physical collection id */
>> +    target_col = &its->collections[cpu];
>> +    /* Physical irq is considered not virq */
>> +    id = d->irq;
>> +
>> +    its_send_movi(its, target_col, its_dev->dev_id, id);
>> +}
>> +
>> +/* TODO: To implement set_affinity */
>> +static const hw_irq_controller gic_guest_its_type = {
>> +    .typename     = "gic-its",
>> +    .startup      = its_irq_startup,
>> +    .shutdown     = its_irq_shutdown,
>> +    .enable       = its_unmask_irq,
>> +    .disable      = its_mask_irq,
>> +    .ack          = its_ack_irq,
>> +    .end          = its_eoi_irq,
>> +    .set_affinity = its_set_affinity,
>> +};
>> +
>> +static const struct gic_its_hw_operations gic_its_ops = {
>> +    .gic_guest_lpi_type  = &gic_guest_its_type,
>> +};
>
> Aside from the one call to its_send_movi, nothing here is ITS specific.
> In fact I would just move all this to gic-v3.c as it feels like part of
> the GICv3 driver rather than the ITS driver.

its_mask_irq(), its_umask_irq() & its_set_affinity() are different for ITS.

>
>
>>  static u64 its_cmd_ptr_to_offset(struct its_node *its,
>>                                   struct its_cmd_block *ptr)
>>  {
>> @@ -392,7 +492,7 @@ static void its_send_mapc(struct its_node *its, struct 
>> its_collection *col,
>>      its_send_single_command(its, its_build_mapc_cmd, &desc);
>>  }
>>
>> -void its_send_movi(struct its_node *its, struct its_collection *col,
>> +static void its_send_movi(struct its_node *its, struct its_collection *col,
>>                     u32 dev_id, u32 id)
>>  {
>>      struct its_cmd_desc desc;
>> @@ -872,6 +972,8 @@ int its_init(struct dt_device_node *node, struct 
>> rdist_prop *rdists)
>>
>>      its_alloc_lpi_tables();
>>
>> +    register_gic_its_ops(&gic_its_ops);
>> +
>>      return 0;
>>  }
>>
>> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
>> index 390c8b0..fb77387 100644
>> --- a/xen/arch/arm/gic.c
>> +++ b/xen/arch/arm/gic.c
>> @@ -46,12 +46,18 @@ static DEFINE_PER_CPU(uint64_t, lr_mask);
>>  static void gic_update_one_lr(struct vcpu *v, int i);
>>
>>  static const struct gic_hw_operations *gic_hw_ops;
>> +static const struct gic_its_hw_operations *gic_its_hw_ops;
>>
>>  void register_gic_ops(const struct gic_hw_operations *ops)
>>  {
>>      gic_hw_ops = ops;
>>  }
>>
>> +void register_gic_its_ops(const struct gic_its_hw_operations *ops)
>> +{
>> +    gic_its_hw_ops = ops;
>> +}
>
> Not clear why you need this.

This is registered to gic driver and the .gic_guest_lpi_type
field in  gic_its_hw_operations is used to update desc->handler
of ITS descriptor

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.