[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 10/15] argo: implement the notify op



On Tue, Jan 22, 2019 at 6:14 AM Roger Pau Monné <roger.pau@xxxxxxxxxx> wrote:
>
> On Mon, Jan 21, 2019 at 01:59:50AM -0800, Christopher Clark wrote:
> > Queries for data about space availability in registered rings and
> > causes notification to be sent when space has become available.
> >
> > The hypercall op populates a supplied data structure with information about
> > ring state and if insufficient space is currently available in a given ring,
> > the hypervisor will record the domain's expressed interest and notify it
> > when it observes that space has become available.
> >
> > Checks for free space occur when this notify op is invoked, so it may be
> > intentionally invoked with no data structure to populate
> > (ie. a NULL argument) to trigger such a check and consequent notifications.
> >
> > Limit the maximum number of notify requests in a single operation to a
> > simple fixed limit of 256.
> >
> > Signed-off-by: Christopher Clark <christopher.clark6@xxxxxxxxxxxxxx>
>
> LGTM, but I would like to see the open-coded versions of the list_
> macros fixed:
>
> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
>
> > diff --git a/xen/common/argo.c b/xen/common/argo.c
> > index 518aff7..4b43bdd 100644
> > --- a/xen/common/argo.c
> > +++ b/xen/common/argo.c
> [...]
> > +static void
> > +pending_notify(struct list_head *to_notify)
> > +{
> > +    ASSERT(LOCKING_Read_L1);
> > +
> > +    /* Sending signals for all ents in this list, draining until it is 
> > empty. */
> > +    while ( !list_empty(to_notify) )
> > +    {
> > +        struct pending_ent *ent =
> > +            list_entry(to_notify->next, struct pending_ent, node);
>
> list_first_entry_or_null

(same as earlier message: list_first_entry_or_null is not used by Xen)

list_for_each_entry_safe

>
> > +
> > +        list_del(&ent->node);
> > +        signal_domid(ent->domain_id);
> > +        xfree(ent);
> > +    }
> > +}
> > +
> > +static void
> > +pending_find(const struct domain *d, struct argo_ring_info *ring_info,
> > +             unsigned int payload_space, struct list_head *to_notify)
> > +{
> > +    struct list_head *cursor, *pending_head;
> > +
> > +    ASSERT(LOCKING_Read_rings_L2(d));
> > +
> > +    /*
> > +     * TODO: Current policy here is to signal _all_ of the waiting domains
> > +     *       interested in sending a message of size less than 
> > payload_space.
> > +     *
> > +     * This is likely to be suboptimal, since once one of them has added
> > +     * their message to the ring, there may well be insufficient room
> > +     * available for any of the others to transmit, meaning that they were
> > +     * woken in vain, which created extra work just to requeue their wait.
> > +     *
> > +     * Retain this simple policy for now since it at least avoids starving 
> > a
> > +     * domain of available space notifications because of a policy that 
> > only
> > +     * notified other domains instead. Improvement may be possible;
> > +     * investigation required.
> > +     */
> > +    spin_lock(&ring_info->L3_lock);
> > +
> > +    /* Remove matching ents from the ring list, and add them to 
> > "to_notify" */
> > +    pending_head = &ring_info->pending;
> > +    cursor = pending_head->next;
> > +
> > +    while ( cursor != pending_head )
> > +    {
> > +        struct pending_ent *ent = list_entry(cursor, struct pending_ent, 
> > node);
> > +
> > +        cursor = cursor->next;
>
> list_for_each_entry_safe?

ack

>
> > +
> > +        if ( payload_space >= ent->len )
> > +        {
> > +            if ( ring_info->id.partner_id == XEN_ARGO_DOMID_ANY )
> > +                wildcard_pending_list_remove(ent->domain_id, ent);
> > +
> > +            list_del(&ent->node);
> > +            ring_info->npending--;
> > +            list_add(&ent->node, to_notify);
> > +        }
> > +    }
> > +
> > +    spin_unlock(&ring_info->L3_lock);
> > +}
> > +
> >  static int
> >  pending_queue(const struct domain *d, struct argo_ring_info *ring_info,
> >                domid_t src_id, unsigned int len)
> > @@ -1023,6 +1163,36 @@ pending_requeue(const struct domain *d, struct 
> > argo_ring_info *ring_info,
> >  }
> >
> >  static void
> > +pending_cancel(const struct domain *d, struct argo_ring_info *ring_info,
> > +               domid_t src_id)
> > +{
> > +    struct list_head *cursor, *pending_head;
> > +
> > +    ASSERT(LOCKING_L3(d, ring_info));
> > +
> > +    /* Remove all ents where domain_id matches src_id from the ring's 
> > list. */
> > +    pending_head = &ring_info->pending;
> > +    cursor = pending_head->next;
> > +
> > +    while ( cursor != pending_head )
> > +    {
> > +        struct pending_ent *ent = list_entry(cursor, struct pending_ent, 
> > node);
> > +
> > +        cursor = cursor->next;
>
> list_for_each_entry_safe

ack

>
> > +
> > +        if ( ent->domain_id == src_id )
> > +        {
> > +            /* For wildcard rings, remove each from their wildcard list 
> > too. */
> > +            if ( ring_info->id.partner_id == XEN_ARGO_DOMID_ANY )
> > +                wildcard_pending_list_remove(ent->domain_id, ent);
> > +            list_del(&ent->node);
> > +            xfree(ent);
> > +            ring_info->npending--;
> > +        }
> > +    }
> > +}
> > +
> > +static void
> >  wildcard_rings_pending_remove(struct domain *d)
> >  {
> >      struct list_head *wildcard_head;
> > @@ -1158,6 +1328,86 @@ partner_rings_remove(struct domain *src_d)
> >  }
> >
> >  static int
> > +fill_ring_data(const struct domain *currd,
> > +               XEN_GUEST_HANDLE(xen_argo_ring_data_ent_t) data_ent_hnd)
> > +{
> > +    xen_argo_ring_data_ent_t ent;
> > +    struct domain *dst_d;
> > +    struct argo_ring_info *ring_info;
> > +    int ret = 0;
> > +
> > +    ASSERT(currd == current->domain);
> > +    ASSERT(LOCKING_Read_L1);
> > +
> > +    if ( __copy_from_guest(&ent, data_ent_hnd, 1) )
> > +        return -EFAULT;
> > +
> > +    argo_dprintk("fill_ring_data: ent.ring.domain=%u,ent.ring.aport=%x\n",
> > +                 ent.ring.domain_id, ent.ring.aport);
> > +
> > +    ent.flags = 0;
> > +
> > +    dst_d = get_domain_by_id(ent.ring.domain_id);
> > +    if ( !dst_d || !dst_d->argo )
> > +        goto out;
> > +
> > +    read_lock(&dst_d->argo->rings_L2_rwlock);
> > +
> > +    ring_info = find_ring_info_by_match(dst_d, ent.ring.aport,
> > +                                        currd->domain_id);
> > +    if ( ring_info )
> > +    {
> > +        unsigned int space_avail;
> > +
> > +        ent.flags |= XEN_ARGO_RING_EXISTS;
> > +
> > +        spin_lock(&ring_info->L3_lock);
> > +
> > +        ent.max_message_size = ring_info->len -
> > +                                   sizeof(struct 
> > xen_argo_ring_message_header) -
> > +                                   ROUNDUP_MESSAGE(1);
> > +
> > +        if ( ring_info->id.partner_id == XEN_ARGO_DOMID_ANY )
> > +            ent.flags |= XEN_ARGO_RING_SHARED;
> > +
> > +        space_avail = ringbuf_payload_space(dst_d, ring_info);
> > +
> > +        argo_dprintk("fill_ring_data: aport=%x space_avail=%u"
> > +                     " space_wanted=%u\n",
> > +                     ring_info->id.aport, space_avail, ent.space_required);
> > +
> > +        /* Do not queue a notification for an unachievable size */
> > +        if ( ent.space_required > ent.max_message_size )
> > +            ent.flags |= XEN_ARGO_RING_EMSGSIZE;
> > +        else if ( space_avail >= ent.space_required )
> > +        {
> > +            pending_cancel(dst_d, ring_info, currd->domain_id);
> > +            ent.flags |= XEN_ARGO_RING_SUFFICIENT;
> > +        }
> > +        else
> > +            ret = pending_requeue(dst_d, ring_info, currd->domain_id,
> > +                                  ent.space_required);
> > +
> > +        spin_unlock(&ring_info->L3_lock);
> > +
> > +        if ( space_avail == ent.max_message_size )
> > +            ent.flags |= XEN_ARGO_RING_EMPTY;
> > +
> > +    }
> > +    read_unlock(&dst_d->argo->rings_L2_rwlock);
> > +
> > + out:
> > +    if ( dst_d )
> > +        put_domain(dst_d);
> > +
> > +    if ( !ret && (__copy_field_to_guest(data_ent_hnd, &ent, flags) ||
> > +                  __copy_field_to_guest(data_ent_hnd, &ent, 
> > max_message_size)) )
> > +        return -EFAULT;
> > +
> > +    return ret;
> > +}
> > +
> > +static int
> >  find_ring_mfn(struct domain *d, gfn_t gfn, mfn_t *mfn)
> >  {
> >      struct page_info *page;
> > @@ -1586,6 +1836,112 @@ register_ring(struct domain *currd,
> >      return ret;
> >  }
> >
> > +static void
> > +notify_ring(const struct domain *d, struct argo_ring_info *ring_info,
> > +            struct list_head *to_notify)
> > +{
> > +    unsigned int space;
> > +
> > +    ASSERT(LOCKING_Read_rings_L2(d));
> > +
> > +    spin_lock(&ring_info->L3_lock);
> > +
> > +    if ( ring_info->len )
> > +        space = ringbuf_payload_space(d, ring_info);
> > +    else
> > +        space = 0;
> > +
> > +    spin_unlock(&ring_info->L3_lock);
> > +
> > +    if ( space )
> > +        pending_find(d, ring_info, space, to_notify);
> > +}
> > +
> > +static void
> > +notify_check_pending(struct domain *d)
> > +{
> > +    unsigned int i;
> > +    LIST_HEAD(to_notify);
> > +
> > +    ASSERT(LOCKING_Read_L1);
> > +
> > +    read_lock(&d->argo->rings_L2_rwlock);
> > +
> > +    /* Walk all rings, call notify_ring on each to populate to_notify list 
> > */
> > +    for ( i = 0; i < ARGO_HASHTABLE_SIZE; i++ )
> > +    {
> > +        struct list_head *cursor, *bucket = &d->argo->ring_hash[i];
> > +        struct argo_ring_info *ring_info;
> > +
> > +        for ( cursor = bucket->next; cursor != bucket; cursor = 
> > cursor->next )
>
> list_for_each_entry

list_for_each_entry_safe

thanks

Christopher

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.