[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 03/13] xen/pvcalls: implement socket command and handle events



Send a PVCALLS_SOCKET command to the backend, use the masked
req_prod_pvt as req_id. This way, req_id is guaranteed to be between 0
and PVCALLS_NR_REQ_PER_RING. We already have a slot in the rsp array
ready for the response, and there cannot be two outstanding responses
with the same req_id.

Wait for the response by waiting on the inflight_req waitqueue and
check for the req_id field in rsp[req_id]. Use atomic accesses to
read the field. Once a response is received, clear the corresponding rsp
slot by setting req_id to PVCALLS_INVALID_ID. Note that
PVCALLS_INVALID_ID is invalid only from the frontend point of view. It
is not part of the PVCalls protocol.

pvcalls_front_event_handler is in charge of copying responses from the
ring to the appropriate rsp slot. It is done by copying the body of the
response first, then by copying req_id atomically. After the copies,
wake up anybody waiting on waitqueue.

pvcallss_lock protects accesses to the ring.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
 drivers/xen/pvcalls-front.c | 94 +++++++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pvcalls-front.h |  8 ++++
 2 files changed, 102 insertions(+)
 create mode 100644 drivers/xen/pvcalls-front.h

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 5e0b265..d1dbcf1 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -20,6 +20,8 @@
 #include <xen/xenbus.h>
 #include <xen/interface/io/pvcalls.h>
 
+#include "pvcalls-front.h"
+
 #define PVCALLS_INVALID_ID (UINT_MAX)
 #define RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
 #define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
@@ -40,9 +42,101 @@ struct pvcalls_bedata {
 
 static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
 {
+       struct xenbus_device *dev = dev_id;
+       struct pvcalls_bedata *bedata;
+       struct xen_pvcalls_response *rsp;
+       uint8_t *src, *dst;
+       int req_id = 0, more = 0, done = 0;
+
+       if (dev == NULL)
+               return IRQ_HANDLED;
+
+       bedata = dev_get_drvdata(&dev->dev);
+       if (bedata == NULL)
+               return IRQ_HANDLED;
+
+again:
+       while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) {
+               rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
+
+               req_id = rsp->req_id;
+               src = (uint8_t *)&bedata->rsp[req_id];
+               src += sizeof(rsp->req_id);
+               dst = (uint8_t *)rsp;
+               dst += sizeof(rsp->req_id);
+               memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
+               /*
+                * First copy the rest of the data, then req_id. It is
+                * paired with the barrier when accessing bedata->rsp.
+                */
+               smp_wmb();
+               WRITE_ONCE(bedata->rsp[req_id].req_id, rsp->req_id);
+
+               done = 1;
+               bedata->ring.rsp_cons++;
+       }
+
+       RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more);
+       if (more)
+               goto again;
+       if (done)
+               wake_up(&bedata->inflight_req);
        return IRQ_HANDLED;
 }
 
+int pvcalls_front_socket(struct socket *sock)
+{
+       struct pvcalls_bedata *bedata;
+       struct xen_pvcalls_request *req;
+       int notify, req_id, ret;
+
+       if (!pvcalls_front_dev)
+               return -EACCES;
+       /*
+        * PVCalls only supports domain AF_INET,
+        * type SOCK_STREAM and protocol 0 sockets for now.
+        *
+        * Check socket type here, AF_INET and protocol checks are done
+        * by the caller.
+        */
+       if (sock->type != SOCK_STREAM)
+           return -ENOTSUPP;
+
+       bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+       spin_lock(&bedata->pvcallss_lock);
+       req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
+       if (RING_FULL(&bedata->ring) ||
+           READ_ONCE(bedata->rsp[req_id].req_id) != PVCALLS_INVALID_ID) {
+               spin_unlock(&bedata->pvcallss_lock);
+               return -EAGAIN;
+       }
+       req = RING_GET_REQUEST(&bedata->ring, req_id);
+       req->req_id = req_id;
+       req->cmd = PVCALLS_SOCKET;
+       req->u.socket.id = (uint64_t) sock;
+       req->u.socket.domain = AF_INET;
+       req->u.socket.type = SOCK_STREAM;
+       req->u.socket.protocol = 0;
+
+       bedata->ring.req_prod_pvt++;
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+       spin_unlock(&bedata->pvcallss_lock);
+       if (notify)
+               notify_remote_via_irq(bedata->irq);
+
+       if (wait_event_interruptible(bedata->inflight_req,
+               READ_ONCE(bedata->rsp[req_id].req_id) == req_id) != 0)
+               return -EINTR;
+
+       ret = bedata->rsp[req_id].ret;
+       /* read ret, then set this rsp slot to be reused */
+       smp_mb();
+       WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+
+       return ret;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
        { "pvcalls" },
        { "" }
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
new file mode 100644
index 0000000..b7dabed
--- /dev/null
+++ b/drivers/xen/pvcalls-front.h
@@ -0,0 +1,8 @@
+#ifndef __PVCALLS_FRONT_H__
+#define __PVCALLS_FRONT_H__
+
+#include <linux/net.h>
+
+int pvcalls_front_socket(struct socket *sock);
+
+#endif
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.