[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 23/30] ARM: vITS: handle MAPTI command



Hi,

On 06/04/17 01:45, Stefano Stabellini wrote:
> On Thu, 6 Apr 2017, Andre Przywara wrote:
>> The MAPTI commands associates a DeviceID/EventID pair with a LPI/CPU
>> pair and actually instantiates LPI interrupts.
>> We connect the already allocated host LPI to this virtual LPI, so that
>> any triggering LPI on the host can be quickly forwarded to a guest.
>> Beside entering the VCPU and the virtual LPI number in the respective
>> host LPI entry, we also initialize and add the already allocated
>> struct pending_irq to our radix tree, so that we can now easily find it
>> by its virtual LPI number.
>> To be able to later find the targetting VCPU for any given LPI without
>> having to walk *all* ITS tables, we store the VCPU ID in the pending_irq
>> struct as well.
>> This exports the vgic_init_pending_irq() function to be able to
>> initialize a new struct pending_irq.
>> As write_itte() is now eventually used, we can now add the static tag.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
>> ---
>>  xen/arch/arm/gic-v3-its.c        | 74 ++++++++++++++++++++++++++++++++++++++
>>  xen/arch/arm/gic-v3-lpi.c        | 18 ++++++++++
>>  xen/arch/arm/vgic-v3-its.c       | 76 
>> ++++++++++++++++++++++++++++++++++++++--
>>  xen/arch/arm/vgic.c              |  2 +-
>>  xen/include/asm-arm/gic_v3_its.h |  6 ++++
>>  xen/include/asm-arm/vgic.h       |  2 ++
>>  6 files changed, 175 insertions(+), 3 deletions(-)
>>
>> diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
>> index 76b0316..d970119 100644
>> --- a/xen/arch/arm/gic-v3-its.c
>> +++ b/xen/arch/arm/gic-v3-its.c
>> @@ -777,6 +777,80 @@ out:
>>      return ret;
>>  }
>>  
>> +/* Must be called with the its_device_lock held. */
>> +static struct its_devices *get_its_device(struct domain *d, paddr_t 
>> vdoorbell,
>> +                                          uint32_t vdevid)
>> +{
>> +    struct rb_node *node = d->arch.vgic.its_devices.rb_node;
>> +    struct its_devices *dev;
>> +
>> +    ASSERT(spin_is_locked(&d->arch.vgic.its_devices_lock));
>> +
>> +    while (node)
>> +    {
>> +        int cmp;
>> +
>> +        dev = rb_entry(node, struct its_devices, rbnode);
>> +        cmp = compare_its_guest_devices(dev, vdoorbell, vdevid);
>> +
>> +        if ( !cmp )
>> +            return dev;
>> +
>> +        if ( cmp > 0 )
>> +            node = node->rb_left;
>> +        else
>> +            node = node->rb_right;
>> +    }
>> +
>> +    return NULL;
>> +}
>> +
>> +static uint32_t get_host_lpi(struct its_devices *dev, uint32_t eventid)
>> +{
>> +    uint32_t host_lpi = 0;
>> +
>> +    if ( dev && (eventid < dev->eventids) )
>> +        host_lpi = dev->host_lpi_blocks[eventid / LPI_BLOCK] +
>> +                                       (eventid % LPI_BLOCK);
>> +
>> +    return host_lpi;
>> +}
>> +
>> +/*
>> + * Connects the event ID for an already assigned device to the given 
>> VCPU/vLPI
>> + * pair. The corresponding physical LPI is already mapped on the host side
>> + * (when assigning the physical device to the guest), so we just connect the
>> + * target VCPU/vLPI pair to that interrupt to inject it properly if it 
>> fires.
>> + * Returns a pointer to the already allocated struct pending_irq that is
>> + * meant to be used by that event.
>> + */
>> +struct pending_irq *gicv3_assign_guest_event(struct domain *d,
>> +                                             paddr_t vdoorbell_address,
>> +                                             uint32_t vdevid, uint32_t 
>> veventid,
>> +                                             struct vcpu *v, uint32_t 
>> virt_lpi)
>> +{
>> +    struct its_devices *dev;
>> +    struct pending_irq *pirq = NULL;
>> +    uint32_t host_lpi = 0;
>> +
>> +    spin_lock(&d->arch.vgic.its_devices_lock);
>> +    dev = get_its_device(d, vdoorbell_address, vdevid);
>> +    if ( dev )
>> +    {
>> +        host_lpi = get_host_lpi(dev, veventid);
>> +        pirq = &dev->pend_irqs[veventid];
>> +    }
>> +    spin_unlock(&d->arch.vgic.its_devices_lock);
>> +
>> +    if ( !host_lpi || !pirq )
>> +        return NULL;
>> +
>> +    gicv3_lpi_update_host_entry(host_lpi, d->domain_id,
>> +                                v ? v->vcpu_id : INVALID_VCPU_ID, virt_lpi);
>> +
>> +    return pirq;
>> +}
>> +
>>  /* Scan the DT for any ITS nodes and create a list of host ITSes out of it. 
>> */
>>  void gicv3_its_dt_init(const struct dt_device_node *node)
>>  {
>> diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
>> index 7d20986..c997ed5 100644
>> --- a/xen/arch/arm/gic-v3-lpi.c
>> +++ b/xen/arch/arm/gic-v3-lpi.c
>> @@ -216,6 +216,24 @@ void gicv3_do_LPI(unsigned int lpi)
>>      rcu_unlock_domain(d);
>>  }
>>  
>> +void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
>> +                                 unsigned int vcpu_id, uint32_t virt_lpi)
>> +{
>> +    union host_lpi *hlpip, hlpi;
>> +
>> +    ASSERT(host_lpi >= LPI_OFFSET);
>> +
>> +    host_lpi -= LPI_OFFSET;
>> +
>> +    hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi % 
>> HOST_LPIS_PER_PAGE];
>> +
>> +    hlpi.virt_lpi = virt_lpi;
>> +    hlpi.dom_id = domain_id;
>> +    hlpi.vcpu_id = vcpu_id;
>> +
>> +    write_u64_atomic(&hlpip->data, hlpi.data);
>> +}
>> +
>>  static int gicv3_lpi_allocate_pendtable(uint64_t *reg)
>>  {
>>      uint64_t val;
>> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
>> index 0372ed0..079dd44 100644
>> --- a/xen/arch/arm/vgic-v3-its.c
>> +++ b/xen/arch/arm/vgic-v3-its.c
>> @@ -275,8 +275,8 @@ static bool write_itte_locked(struct virt_its *its, 
>> uint32_t devid,
>>   * This function takes care of the locking by taking the its_lock itself, so
>>   * a caller shall not hold this. Before returning, the lock is dropped 
>> again.
>>   */
>> -bool write_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
>> -                uint32_t collid, uint32_t vlpi, struct vcpu **vcpu_ptr)
>> +static bool write_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
>> +                       uint32_t collid, uint32_t vlpi, struct vcpu 
>> **vcpu_ptr)
>>  {
>>      bool ret;
>>  
>> @@ -440,6 +440,74 @@ static int its_handle_mapd(struct virt_its *its, 
>> uint64_t *cmdptr)
>>      return ret;
>>  }
>>  
>> +static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
>> +{
>> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
>> +    uint32_t eventid = its_cmd_get_id(cmdptr);
>> +    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
>> +    uint16_t collid = its_cmd_get_collection(cmdptr);
>> +    struct pending_irq *pirq;
>> +    struct vcpu *vcpu = NULL;
>> +    int ret = 0;
>> +
>> +    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
>> +        intid = eventid;
>> +
>> +    spin_lock(&its->its_lock);
>> +    /*
>> +     * Check whether there is a valid existing mapping. If yes, behavior is
>> +     * unpredictable, we choose to ignore this command here.
>> +     * This makes sure we start with a pristine pending_irq below.
>> +     */
>> +    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
>> +         _intid != INVALID_LPI )
>> +    {
>> +        spin_unlock(&its->its_lock);
>> +        return -1;
>> +    }
>> +
>> +    /* Enter the mapping in our virtual ITS tables. */
>> +    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
>> +    {
>> +        spin_unlock(&its->its_lock);
>> +        return -1;
>> +    }
>> +
>> +    spin_unlock(&its->its_lock);
>> +
>> +    /*
>> +     * Connect this virtual LPI to the corresponding host LPI, which is
>> +     * determined by the same device ID and event ID on the host side.
>> +     * This returns us the corresponding, still unused pending_irq.
>> +     */
>> +    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
>> +                                    devid, eventid, vcpu, intid);
>> +    if ( !pirq )
>> +        return -1;
>> +
>> +    vgic_init_pending_irq(pirq, intid);
>> +
>> +    /*
>> +     * Now read the guest's property table to initialize our cached state.
>> +     * It can't fire at this time, because it is not known to the host yet.
>> +     */
>> +    ret = update_lpi_property(its->d, intid, pirq);
>> +    if ( ret )
>> +        return ret;
>> +
>> +    pirq->vcpu_id = vcpu->vcpu_id;
>> +
>> +    /*
>> +     * Now insert the pending_irq into the domain's LPI tree, so that
>> +     * it becomes live.
>> +     */
>> +    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
>> +    radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
>> +    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
> 
> It looks like the whole allocation, starting from
> gicv3_assign_guest_event, needs to be protected by pend_lpi_tree_lock.
> Otherwise we risk allocating the same struct twice? Or that is not
> possible thanks to the vcmd_lock (because for two struct pending_irq to
> clash they need to belong to the same vits)?

Yes, the pending_irq's are allocated on MAPD, which uses an ITS and a
device ID, so this is unique. We hold the vcmd_lock, so there can't be
another command handler on this ITS.

Nevertheless I added some lines to check the return value of
radix_tree_insert, so we deal with any error here and rollback the above
actions if that fails because of ENOMEM or EEXISTS.

Cheers,
Andre.

>> +    return 0;
>> +}
>> +
>>  #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
>>  
>>  /*
>> @@ -480,6 +548,10 @@ static int vgic_its_handle_cmds(struct domain *d, 
>> struct virt_its *its)
>>          case GITS_CMD_MAPD:
>>              ret = its_handle_mapd(its, command);
>>              break;
>> +        case GITS_CMD_MAPI:
>> +        case GITS_CMD_MAPTI:
>> +            ret = its_handle_mapti(its, command);
>> +            break;
>>          case GITS_CMD_SYNC:
>>              /* We handle ITS commands synchronously, so we ignore SYNC. */
>>              break;
>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> index 9b0dc3d..cb1666b 100644
>> --- a/xen/arch/arm/vgic.c
>> +++ b/xen/arch/arm/vgic.c
>> @@ -61,7 +61,7 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, 
>> unsigned int irq)
>>      return vgic_get_rank(v, rank);
>>  }
>>  
>> -static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
>> +void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
>>  {
>>      INIT_LIST_HEAD(&p->inflight);
>>      INIT_LIST_HEAD(&p->lr_queue);
>> diff --git a/xen/include/asm-arm/gic_v3_its.h 
>> b/xen/include/asm-arm/gic_v3_its.h
>> index d3f393f..30aa1ef 100644
>> --- a/xen/include/asm-arm/gic_v3_its.h
>> +++ b/xen/include/asm-arm/gic_v3_its.h
>> @@ -174,6 +174,12 @@ int gicv3_its_map_guest_device(struct domain *d,
>>  int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi);
>>  void gicv3_free_host_lpi_block(uint32_t first_lpi);
>>  
>> +struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t 
>> doorbell,
>> +                                             uint32_t devid, uint32_t 
>> eventid,
>> +                                             struct vcpu *v, uint32_t 
>> virt_lpi);
>> +void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
>> +                                 unsigned int vcpu_id, uint32_t virt_lpi);
>> +
>>  #else
>>  
>>  static LIST_HEAD(host_its_list);
>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>> index 2371960..074afe4 100644
>> --- a/xen/include/asm-arm/vgic.h
>> +++ b/xen/include/asm-arm/vgic.h
>> @@ -83,6 +83,7 @@ struct pending_irq
>>       * TODO: when implementing irq migration, taking only the current
>>       * vgic lock is not going to be enough. */
>>      struct list_head lr_queue;
>> +    uint16_t vcpu_id;          /* The VCPU for an LPI. */
>>  };
>>  
>>  #define NR_INTERRUPT_PER_RANK   32
>> @@ -303,6 +304,7 @@ extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, 
>> unsigned int virq);
>>  extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
>>  extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
>>  extern void vgic_clear_pending_irqs(struct vcpu *v);
>> +extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
>>  extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
>>  extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int 
>> irq);
>>  extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, 
>> int s);
>> -- 
>> 2.8.2
>>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.