[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH 8/9] plat/xen/drivers/9p: Implement request and recv


  • To: "minios-devel@xxxxxxxxxxxxx" <minios-devel@xxxxxxxxxxxxx>
  • From: Vlad-Andrei BĂDOIU (78692) <vlad_andrei.badoiu@xxxxxxxxxxxxxxx>
  • Date: Sat, 7 Sep 2019 10:21:59 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=stud.acs.upb.ro; dmarc=pass action=none header.from=stud.acs.upb.ro; dkim=pass header.d=stud.acs.upb.ro; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=mWg8EriGBem2jCPFoOdNSNt4aUnB+5Cc+1gUNa5mT28=; b=TEi/XiZMY0Y/SMT3yj4kJTYM21XR978IWi7dkthxZOhCSnnqqKsV3R3hLyeMJG8IC7DeQLHN+j/2puQj5hm6P+0ggLqEyEAq4xhfRn0BKoPOJ6/S8EO2D06pEtAYCpWto12dypGWMx9kAb93+VZAfWHXu81PJ5S8h6fuAJEoMHNK4ySqzmP7wezL+lIhyF/F5g5CiKh+bUS5Js90MiAwsVPHAmZLwwgYm1WA/7KQXnLl9Ehx39VBpYej5yMk9rKJzWA0B6b20Accw6EAUnXEHEPE4z+GLP9I415fIpOmq9txQtpFKtYydyJhWWIjVMo2M8o3i8stqSg8+dUX5bf+aA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=lRDclIbQVDtCE/JKqUj6ic0iSCLHb1m6RPp3/fu3LNP051OcZFDOZc4XNDrLXrHckqyJb7CvPQhk6HpGxo6dHmZmMMXUhjw8Iyuh3JKhRXA/HXF3/+B21Tjjy3xThtOcAJat/YLKpDx68UOT/HojV/8mH/kifRm99TGRMt+hKlxHf6UscGfvBqKrmUhkQaVYsI1fnOInbkhYDrDefEFtw1V5AYum63pRERtLqldtpTIbAMNmFefRMmXXJxtIkNJyeb3jIZ05tJb87Jbxq/MfoQ2rPZNaYzUAkCgW/lyxughHVWU+GsX03GyEb6fQEqkaGmghl+8UdfDz603r6w/ONg==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=vlad_andrei.badoiu@xxxxxxxxxxxxxxx;
  • Cc: "costin.lupu@xxxxxxxxx" <costin.lupu@xxxxxxxxx>, Cristian Banu <cristb@xxxxxxxxx>
  • Delivery-date: Sat, 07 Sep 2019 10:22:40 +0000
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>
  • Thread-index: AQHVZWYVlsaRvMV7CE29b19VPrntoQ==
  • Thread-topic: [UNIKRAFT PATCH 8/9] plat/xen/drivers/9p: Implement request and recv

From: Cristian Banu <cristb@xxxxxxxxx>

This patch implement request and receive functions for the xen 9P
transport driver.

Signed-off-by: Cristian Banu <cristb@xxxxxxxxx>
---
 plat/xen/drivers/9p/9pfront.c | 138 +++++++++++++++++++++++++++++++++-
 1 file changed, 134 insertions(+), 4 deletions(-)

diff --git a/plat/xen/drivers/9p/9pfront.c b/plat/xen/drivers/9p/9pfront.c
index 1a8b49c5..a5321898 100644
--- a/plat/xen/drivers/9p/9pfront.c
+++ b/plat/xen/drivers/9p/9pfront.c
@@ -37,6 +37,7 @@
 #include <uk/alloc.h>
 #include <uk/assert.h>
 #include <uk/essentials.h>
+#include <uk/errptr.h>
 #include <uk/list.h>
 #include <uk/9pdev.h>
 #include <uk/9preq.h>
@@ -54,10 +55,96 @@ static struct uk_alloc *a;
 static UK_LIST_HEAD(p9front_device_list);
 static DEFINE_SPINLOCK(p9front_device_list_lock);
 
-static void p9front_handler(evtchn_port_t evtchn __unused,
+struct p9front_header {
+       uint32_t size;
+       uint8_t type;
+       uint16_t tag;
+} __packed;
+
+static void p9front_recv(struct p9front_dev_ring *ring)
+{
+       struct p9front_dev *p9fdev = ring->dev;
+       evtchn_port_t evtchn = ring->evtchn;
+       RING_IDX cons, prod, masked_cons, masked_prod;
+       int ring_size, rc;
+       struct p9front_header hdr;
+       struct uk_9preq *req;
+       uint32_t buf_cnt, zc_buf_cnt;
+
+       ring_size = XEN_FLEX_RING_SIZE(p9fdev->ring_order);
+
+       while (1) {
+               cons = ring->intf->in_cons;
+               prod = ring->intf->in_prod;
+               xen_rmb();
+
+               if (xen_9pfs_queued(prod, cons, ring_size) < sizeof(hdr)) {
+                       notify_remote_via_evtchn(evtchn);
+                       return;
+               }
+
+               masked_prod = xen_9pfs_mask(prod, ring_size);
+               masked_cons = xen_9pfs_mask(cons, ring_size);
+
+               xen_9pfs_read_packet(&hdr, ring->data.in, sizeof(hdr),
+                               masked_prod, &masked_cons, ring_size);
+
+               req = uk_9pdev_req_lookup(p9fdev->p9dev, hdr.tag);
+               if (PTRISERR(req)) {
+                       uk_pr_warn("Found invalid tag=%u\n", hdr.tag);
+                       cons += hdr.size;
+                       xen_mb();
+                       ring->intf->in_cons = cons;
+                       continue;
+               }
+
+               masked_cons = xen_9pfs_mask(cons, ring_size);
+
+               /*
+                * Compute amount of data to read into request buffer and into
+                * zero-copy buffer.
+                */
+               buf_cnt = hdr.size;
+               if (hdr.type != UK_9P_RERROR && req->recv.zc_buf)
+                       buf_cnt = MIN(buf_cnt, req->recv.zc_offset);
+               zc_buf_cnt = hdr.size - buf_cnt;
+
+               xen_9pfs_read_packet(req->recv.buf, ring->data.in, buf_cnt,
+                               masked_prod, &masked_cons, ring_size);
+               xen_9pfs_read_packet(req->recv.zc_buf, ring->data.in,
+                               zc_buf_cnt, masked_prod, &masked_cons,
+                               ring_size);
+               cons += hdr.size;
+               xen_mb();
+               ring->intf->in_cons = cons;
+
+               rc = uk_9preq_receive_cb(req, hdr.size);
+               if (rc)
+                       uk_pr_warn("Could not receive reply: %d\n", rc);
+
+               /* Release reference held by uk_9pdev_req_lookup(). */
+               uk_9preq_put(req);
+       }
+}
+
+static void p9front_handler(evtchn_port_t evtchn,
                            struct __regs *regs __unused,
-                           void *arg __unused)
+                           void *arg)
 {
+       struct p9front_dev_ring *ring = arg;
+
+       UK_ASSERT(ring);
+       UK_ASSERT(ring->evtchn == evtchn);
+
+       /*
+        * A new interrupt means that there is a response to be received, which
+        * means that a previously sent request has been removed from the out
+        * ring. Thus, the API can be notified of the possibility of retrying to
+        * send requests blocked on ENOSPC errors.
+        */
+       if (ring->dev->p9dev)
+               uk_9pdev_xmit_notify(ring->dev->p9dev);
+       p9front_recv(ring);
 }
 
 static void p9front_free_dev_ring(struct p9front_dev *p9fdev, int idx)
@@ -242,9 +329,52 @@ static int p9front_disconnect(struct uk_9pdev *p9dev 
__unused)
        return 0;
 }
 
-static int p9front_request(struct uk_9pdev *p9dev __unused,
-                          struct uk_9preq *req __unused)
+static int p9front_request(struct uk_9pdev *p9dev,
+                          struct uk_9preq *req)
 {
+       struct p9front_dev *p9fdev;
+       struct p9front_dev_ring *ring;
+       int ring_idx, ring_size;
+       RING_IDX masked_prod, masked_cons, prod, cons;
+
+       UK_ASSERT(p9dev);
+       UK_ASSERT(req);
+       UK_ASSERT(req->state == UK_9PREQ_READY);
+
+       p9fdev = p9dev->priv;
+
+       ring_size = XEN_FLEX_RING_SIZE(p9fdev->ring_order);
+
+       ring_idx = req->tag % p9fdev->nb_rings;
+       ring = &p9fdev->rings[ring_idx];
+
+       /* Protect against concurrent writes to the out ring. */
+       ukarch_spin_lock(&ring->spinlock);
+       cons = ring->intf->out_cons;
+       prod = ring->intf->out_prod;
+       xen_mb();
+
+       masked_prod = xen_9pfs_mask(prod, ring_size);
+       masked_cons = xen_9pfs_mask(cons, ring_size);
+
+       if (ring_size - xen_9pfs_queued(prod, cons, ring_size) <
+                       req->xmit.size + req->xmit.zc_size) {
+               ukarch_spin_unlock(&ring->spinlock);
+               return -ENOSPC;
+       }
+
+       xen_9pfs_write_packet(ring->data.out, req->xmit.buf, req->xmit.size,
+                             &masked_prod, masked_cons, ring_size);
+       xen_9pfs_write_packet(ring->data.out, req->xmit.zc_buf, 
req->xmit.zc_size,
+                             &masked_prod, masked_cons, ring_size);
+       req->state = UK_9PREQ_SENT;
+       xen_wmb();
+       prod += req->xmit.size + req->xmit.zc_size;
+       ring->intf->out_prod = prod;
+
+       ukarch_spin_unlock(&ring->spinlock);
+       notify_remote_via_evtchn(ring->evtchn);
+
        return 0;
 }
 
-- 
2.20.1


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.