[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [UNIKRAFT PATCH 9/9] plat/xen/drivers/9p: Add bottom-half handler



I wasnted to add my thanks Cristi, well done!

-- Felipe

On 07.09.19, 12:40, "Minios-devel on behalf of Costin Lupu" 
<minios-devel-bounces@xxxxxxxxxxxxxxxxxxxx on behalf of costin.lup@xxxxxxxxx> 
wrote:

    Thanks for the 9pfs support, Cristi! Awesome job!
    
    Cheers,
    Costin
    
    On 9/7/19 1:22 PM, Vlad-Andrei BĂDOIU (78692) wrote:
    > From: Cristian Banu <cristb@xxxxxxxxx>
    > 
    > This patch adds bottom-half handling to Xen 9P by creating one thread
    > per ring which waits on data to arrive.
    > 
    > Signed-off-by: Cristian Banu <cristb@xxxxxxxxx>
    > ---
    >  plat/xen/drivers/9p/9pfront.c | 53 ++++++++++++++++++++++++++++++++++-
    >  plat/xen/drivers/9p/9pfront.h | 13 +++++++++
    >  2 files changed, 65 insertions(+), 1 deletion(-)
    > 
    > diff --git a/plat/xen/drivers/9p/9pfront.c b/plat/xen/drivers/9p/9pfront.c
    > index a5321898..da55fd61 100644
    > --- a/plat/xen/drivers/9p/9pfront.c
    > +++ b/plat/xen/drivers/9p/9pfront.c
    > @@ -33,12 +33,16 @@
    >   */
    >  
    >  #include <stdbool.h>
    > +#include <stdio.h>
    >  #include <uk/config.h>
    >  #include <uk/alloc.h>
    >  #include <uk/assert.h>
    >  #include <uk/essentials.h>
    >  #include <uk/errptr.h>
    >  #include <uk/list.h>
    > +#if CONFIG_LIBUKSCHED
    > +#include <uk/thread.h>
    > +#endif
    >  #include <uk/9pdev.h>
    >  #include <uk/9preq.h>
    >  #include <uk/9pdev_trans.h>
    > @@ -61,6 +65,23 @@ struct p9front_header {
    >   uint16_t tag;
    >  } __packed;
    >  
    > +static void p9front_recv(struct p9front_dev_ring *ring);
    > +
    > +#if CONFIG_LIBUKSCHED
    > +
    > +static void p9front_bh_handler(void *arg)
    > +{
    > + struct p9front_dev_ring *ring = arg;
    > +
    > + while (1) {
    > +         uk_waitq_wait_event(&ring->bh_wq,
    > +                         UK_READ_ONCE(ring->data_avail));
    > +         p9front_recv(ring);
    > + }
    > +}
    > +
    > +#endif
    > +
    >  static void p9front_recv(struct p9front_dev_ring *ring)
    >  {
    >   struct p9front_dev *p9fdev = ring->dev;
    > @@ -79,6 +100,9 @@ static void p9front_recv(struct p9front_dev_ring *ring)
    >           xen_rmb();
    >  
    >           if (xen_9pfs_queued(prod, cons, ring_size) < sizeof(hdr)) {
    > +#if CONFIG_LIBUKSCHED
    > +                 UK_WRITE_ONCE(ring->data_avail, false);
    > +#endif
    >                   notify_remote_via_evtchn(evtchn);
    >                   return;
    >           }
    > @@ -144,7 +168,12 @@ static void p9front_handler(evtchn_port_t evtchn,
    >    */
    >   if (ring->dev->p9dev)
    >           uk_9pdev_xmit_notify(ring->dev->p9dev);
    > +#if CONFIG_LIBUKSCHED
    > + UK_WRITE_ONCE(ring->data_avail, true);
    > + uk_waitq_wake_up(&ring->bh_wq);
    > +#else
    >   p9front_recv(ring);
    > +#endif
    >  }
    >  
    >  static void p9front_free_dev_ring(struct p9front_dev *p9fdev, int idx)
    > @@ -154,6 +183,9 @@ static void p9front_free_dev_ring(struct p9front_dev 
*p9fdev, int idx)
    >  
    >   UK_ASSERT(ring->initialized);
    >  
    > + if (ring->bh_thread_name)
    > +         free(ring->bh_thread_name);
    > + uk_thread_kill(ring->bh_thread);
    >   unbind_evtchn(ring->evtchn);
    >   for (i = 0; i < (1 << p9fdev->ring_order); i++)
    >           gnttab_end_access(ring->intf->ref[i]);
    > @@ -226,12 +258,27 @@ static int p9front_allocate_dev_ring(struct 
p9front_dev *p9fdev, int idx)
    >   ring->data.in = data_bytes;
    >   ring->data.out = data_bytes + XEN_FLEX_RING_SIZE(p9fdev->ring_order);
    >  
    > +#if CONFIG_LIBUKSCHED
    > + /* Allocate bottom-half thread. */
    > + ring->data_avail = false;
    > + uk_waitq_init(&ring->bh_wq);
    > +
    > + rc = asprintf(&ring->bh_thread_name, DRIVER_NAME"-recv-%s-%u",
    > +                 p9fdev->tag, idx);
    > + ring->bh_thread = uk_thread_create(ring->bh_thread_name,
    > +                 p9front_bh_handler, ring);
    > + if (!ring->bh_thread) {
    > +         rc = -ENOMEM;
    > +         goto out_free_grants;
    > + }
    > +#endif
    > +
    >   /* Allocate event channel. */
    >   rc = evtchn_alloc_unbound(xendev->otherend_id, p9front_handler, ring,
    >                           &ring->evtchn);
    >   if (rc) {
    >           uk_pr_err(DRIVER_NAME": Error creating evt channel: %d\n", rc);
    > -         goto out_free_grants;
    > +         goto out_free_thread;
    >   }
    >  
    >   unmask_evtchn(ring->evtchn);
    > @@ -241,6 +288,10 @@ static int p9front_allocate_dev_ring(struct 
p9front_dev *p9fdev, int idx)
    >  
    >   return 0;
    >  
    > +out_free_thread:
    > + if (ring->bh_thread_name)
    > +         free(ring->bh_thread_name);
    > + uk_thread_kill(ring->bh_thread);
    >  out_free_grants:
    >   for (i = 0; i < (1 << p9fdev->ring_order); i++)
    >           gnttab_end_access(ring->intf->ref[i]);
    > diff --git a/plat/xen/drivers/9p/9pfront.h b/plat/xen/drivers/9p/9pfront.h
    > index 7cea61c5..0bbc7f44 100644
    > --- a/plat/xen/drivers/9p/9pfront.h
    > +++ b/plat/xen/drivers/9p/9pfront.h
    > @@ -40,6 +40,9 @@
    >  #include <uk/essentials.h>
    >  #include <uk/list.h>
    >  #include <uk/plat/spinlock.h>
    > +#if CONFIG_LIBUKSCHED
    > +#include <uk/sched.h>
    > +#endif
    >  #include <xen/io/9pfs.h>
    >  #include <common/events.h>
    >  #include <common/gnttab.h>
    > @@ -59,6 +62,16 @@ struct p9front_dev_ring {
    >   spinlock_t spinlock;
    >   /* Tracks if this ring was initialized. */
    >   bool initialized;
    > +#if CONFIG_LIBUKSCHED
    > + /* Tracks if there is any data available on this ring. */
    > + bool data_avail;
    > + /* Bottom-half thread. */
    > + struct uk_thread *bh_thread;
    > + /* Bottom-half thread name. */
    > + char *bh_thread_name;
    > + /* Wait-queue on which the thread waits for available data. */
    > + struct uk_waitq bh_wq;
    > +#endif
    >  };
    >  
    >  struct p9front_dev {
    > 
    
    _______________________________________________
    Minios-devel mailing list
    Minios-devel@xxxxxxxxxxxxxxxxxxxx
    https://lists.xenproject.org/mailman/listinfo/minios-devel

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.