[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 06/18] xen/pvcalls: handle commands from the frontend



On Mon, 15 May 2017, Boris Ostrovsky wrote:
> On 05/15/2017 04:35 PM, Stefano Stabellini wrote:
> > When the other end notifies us that there are commands to be read
> > (pvcalls_back_event), wake up the backend thread to parse the command.
> > 
> > The command ring works like most other Xen rings, so use the usual
> > ring macros to read and write to it. The functions implementing the
> > commands are empty stubs for now.
> > 
> > Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> > CC: boris.ostrovsky@xxxxxxxxxx
> > CC: jgross@xxxxxxxx
> > ---
> >  drivers/xen/pvcalls-back.c | 115
> > +++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 115 insertions(+)
> > 
> > diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
> > index 876e577..2b2a49a 100644
> > --- a/drivers/xen/pvcalls-back.c
> > +++ b/drivers/xen/pvcalls-back.c
> > @@ -62,12 +62,127 @@ static void pvcalls_back_ioworker(struct work_struct
> > *work)
> >  {
> >  }
> > 
> > +static int pvcalls_back_socket(struct xenbus_device *dev,
> > +           struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_connect(struct xenbus_device *dev,
> > +                           struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_release(struct xenbus_device *dev,
> > +                           struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_bind(struct xenbus_device *dev,
> > +                        struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_listen(struct xenbus_device *dev,
> > +                          struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_accept(struct xenbus_device *dev,
> > +                          struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_poll(struct xenbus_device *dev,
> > +                        struct xen_pvcalls_request *req)
> > +{
> > +   return 0;
> > +}
> > +
> > +static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
> > +                              struct xen_pvcalls_request *req)
> > +{
> > +   int ret = 0;
> > +
> > +   switch (req->cmd) {
> > +   case PVCALLS_SOCKET:
> > +           ret = pvcalls_back_socket(dev, req);
> > +           break;
> > +   case PVCALLS_CONNECT:
> > +           ret = pvcalls_back_connect(dev, req);
> > +           break;
> > +   case PVCALLS_RELEASE:
> > +           ret = pvcalls_back_release(dev, req);
> > +           break;
> > +   case PVCALLS_BIND:
> > +           ret = pvcalls_back_bind(dev, req);
> > +           break;
> > +   case PVCALLS_LISTEN:
> > +           ret = pvcalls_back_listen(dev, req);
> > +           break;
> > +   case PVCALLS_ACCEPT:
> > +           ret = pvcalls_back_accept(dev, req);
> > +           break;
> > +   case PVCALLS_POLL:
> > +           ret = pvcalls_back_poll(dev, req);
> > +           break;
> > +   default:
> > +           ret = -ENOTSUPP;
> > +           break;
> > +   }
> > +   return ret;
> > +}
> > +
> >  static void pvcalls_back_work(struct work_struct *work)
> >  {
> > +   struct pvcalls_back_priv *priv = container_of(work,
> > +           struct pvcalls_back_priv, register_work);
> > +   int notify, notify_all = 0, more = 1;
> > +   struct xen_pvcalls_request req;
> > +   struct xenbus_device *dev = priv->dev;
> > +
> > +   atomic_set(&priv->work, 1);
> > +
> > +   while (more || !atomic_dec_and_test(&priv->work)) {
> > +           while (RING_HAS_UNCONSUMED_REQUESTS(&priv->ring)) {
> > +                   RING_COPY_REQUEST(&priv->ring,
> > +                                     priv->ring.req_cons++,
> > +                                     &req);
> > +
> > +                   if (pvcalls_back_handle_cmd(dev, &req) > 0) {
> 
> Can you make handlers make "traditional" returns, i.e. <0 on error and 0 on
> success? Or do you really need to distinguish 0 from >0?

Today < 0 means error, 0 means OK but no notifications required, 1 means
OK with notifications. Given that errors are returned to the other end
using the appropriate response field (we don't do anything with an error
in pvcalls_back_work), I could change this to:

-1: no need for notifications (both errors and regular conditions)
0:  notifications


> > +                           RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
> > +                                   &priv->ring, notify);
> > +                           notify_all += notify;
> > +                   }
> > +           }
> > +
> > +           if (notify_all)
> > +                   notify_remote_via_irq(priv->irq);
> > +
> > +           RING_FINAL_CHECK_FOR_REQUESTS(&priv->ring, more);
> > +   }
> >  }
> > 
> >  static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
> >  {
> > +   struct xenbus_device *dev = dev_id;
> > +   struct pvcalls_back_priv *priv = NULL;
> > +
> > +   if (dev == NULL)
> > +           return IRQ_HANDLED;
> > +
> > +   priv = dev_get_drvdata(&dev->dev);
> > +   if (priv == NULL)
> > +           return IRQ_HANDLED;
> 
> These two aren't errors?

They are meant to handle spurious event notifications. From the Linux
irq handling subsystem point of view, they are not errors.


> > +
> > +   atomic_inc(&priv->work);
> 
> Is this really needed? We have a new entry on the ring, so the outer loop in
> pvcalls_back_work() will pick this up (by setting 'more').

This is to avoid race conditions. A notification could be delivered
after RING_FINAL_CHECK_FOR_REQUESTS is called, returning more == 0, but
before pvcalls_back_work completes. In that case, without priv->work,
pvcalls_back_work wouldn't be rescheduled because it is still running
and the work would be left undone.


> > +   queue_work(priv->wq, &priv->register_work);
> > +
> >     return IRQ_HANDLED;
> >  }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.