[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2 08/12] plat/xen/drivers/net: Configure netfront rx queue


  • To: minios-devel@xxxxxxxxxxxxx
  • From: Costin Lupu <costin.lupu@xxxxxxxxx>
  • Date: Mon, 1 Apr 2019 16:42:48 +0300
  • Cc: felipe.huici@xxxxxxxxx, Razvan Cojocaru <razvan.cojocaru93@xxxxxxxxx>, Florian.Schmidt@xxxxxxxxx, sharan.santhanam@xxxxxxxxx, simon.kuenzer@xxxxxxxxx, yuri.volchkov@xxxxxxxxx
  • Delivery-date: Mon, 01 Apr 2019 13:43:07 +0000
  • Ironport-phdr: 9a23:Zk1hbxU8sDUoidHIsh9X5oLddZfV8LGtZVwlr6E/grcLSJyIuqrYZRWPv6dThVPEFb/W9+hDw7KP9fy5ASpRvd3f7DgrS99lb1c9k8IYnggtUoauKHbQC7rUVRE8B9lIT1R//nu2YgB/Ecf6YEDO8DXptWZBUhrwOhBoKevrB4Xck9q41/yo+53Ufg5EmCexbal9IRmrsQndrM0bjIRtJqswxRbCv2dFdflRyW50P1yYggzy5t23/J5t8iRQv+wu+stdWqjkfKo2UKJVAi0+P286+MPkux/DTRCS5nQHSWUZjgBIAwne4x7kWJr6rzb3ufB82CmeOs32UKw0VDG/5KplVBPklCEKPCMi/WrJlsJ/kr5UoBO5pxx+3YHUZp2VNOFjda/ZZN8WWHZNUtpUWyFHH4iybZYAD/AZMOhYsYfzulUAoxi5CwauCuPi0SNEimPs0KEmyektDR3K0Qo9FNwOqnTUq9D1Ob8MXOCy16nI0TTDb/VL0jn79YjIag0hquyLUL1sdsrR0lUvFwLDjlmKrYzlIiuV1vkWvmeH9OpsT/qvi3M8pA1ruDivwd4hh4/UjYwW0lDJ7Th1zYkoKdGiS0N2YcSoHIVOuyyYLYd7TNsuT3xntSon0LELup62cDIUxJkpyRPTceGLfoeW7h/lSe2fOy13hGh/d7K6nxuy9E+gxfDiWcSsy1ZKqzZFksHLtnAQyxzf8siHReV5/kemwTuPyQXT5ftFIUAwj6bUN4UhzqQolpoOqkvPBDX2mELugK+XcEUr5PSo5vz6brjpu5OQLYx5hwHkPqgwhMCyA/40PwYWU2ie4+u81bnj/UPjQLVNi/07irXWsJfBJcQHp665BRVZ0oI+6xanEjery8gXnWIdIFJdZRKIlJLlO0vJIPzgF/ewn0yskCt3x/DBJrDhGI/CL3ndkLj7e7Zx8VJTyA0xzdBY+51bEKsBIO/3V0L/r9HYARo5PBa1w+bjEtlyyoQeWWeXCK+DLKzSqUOI5v4oI+SUYY8VuTD9K/ki5/71lHM2hEESfbe30psTc324GvVmI16FYXr3mNsAHnkFvgwkQOztkl2CXiZZZ2yuUKIk+jE7FIWmAJ/DSICph7yBxia7EYdQZmxcF16DDXfod4CFW/gRdCKfOclhnSIYWrilUYAuzguiuxHny7B/NOrb5jUYtY7/1Nhy/+DTmw899Th1D8SFzW6BVWF0nn4JRzAq3aByukp9xUmf3qh8mfNXDsZf6O1UUg0iL57T0/R6C8zuWgLGZtqJU1amTc+8AT4rSNI92cQObFx7G9W+jhDMxSyqDKUQl7GQApw77L7T33zrKMlm0XrJyrQhhUE8QsRTLW2mmrJ/9w/LCo7SkkWZkqGqdaIG0C7O6WeM02yOvUBGXw5qSqjFW24QaVfSrdvj/UzOVaGhCak/OAtb1cGCMrdKasHujVheRPbjJc7eY2Orl2euAhaIwq+DbIrpe2UawiXQE04EnB4P8naCLwcxHT2trHzDDG8mKVW6ZkLq8O5l7X+2UEIw5wWLdFF6kaq4/FgSn/PPZekU2+cvvzw9qjM8OEulwpqCAN2buwtnOqFBec4V60wBzX/T8RZ6aM/zZ5t+j0ITJlwk93jl0A96X8AZyZAn
  • Ironport-sdr: VUlnoPeC+OPP2lMPWNzJP9LRnx53DUWGNq0ID2BVux3+Km/2Fta6IH+JR2SR6xJC9b+FwPFwyC qyomolBS/+AA==
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>

Netfront rx queues use Xen shared rings. Incoming packets are saved in
buffers allocated with user-provided callbacks. The current patch also
adds the event handler for rx queue notifications.

Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
Signed-off-by: Razvan Cojocaru <razvan.cojocaru93@xxxxxxxxx>
---
 plat/xen/drivers/net/netfront.c | 211 ++++++++++++++++++++++++++++++++++++++--
 plat/xen/drivers/net/netfront.h |  29 ++++++
 2 files changed, 231 insertions(+), 9 deletions(-)

diff --git a/plat/xen/drivers/net/netfront.c b/plat/xen/drivers/net/netfront.c
index e810aefd..586c0e6a 100644
--- a/plat/xen/drivers/net/netfront.c
+++ b/plat/xen/drivers/net/netfront.c
@@ -52,6 +52,11 @@
 static struct uk_alloc *drv_allocator;
 
 
+static uint16_t xennet_rxidx(RING_IDX idx)
+{
+       return (uint16_t) (idx & (NET_RX_RING_SIZE - 1));
+}
+
 static void add_id_to_freelist(uint16_t id, uint16_t *freelist)
 {
        freelist[id + 1] = freelist[0];
@@ -67,6 +72,82 @@ static uint16_t get_id_from_freelist(uint16_t *freelist)
        return id;
 }
 
+static int netfront_rxq_enqueue(struct uk_netdev_rx_queue *rxq,
+               struct uk_netbuf *netbuf)
+{
+       RING_IDX req_prod;
+       uint16_t id;
+       netif_rx_request_t *rx_req;
+       struct netfront_dev *nfdev;
+       int notify;
+
+       /* buffer must be page aligned */
+       UK_ASSERT(((unsigned long) netbuf->data & ~PAGE_MASK) == 0);
+
+       if (RING_FULL(&rxq->ring)) {
+               uk_pr_debug("rx queue is full\n");
+               return -ENOSPC;
+       }
+
+       /* get request */
+       req_prod = rxq->ring.req_prod_pvt;
+       id = xennet_rxidx(req_prod);
+       rx_req = RING_GET_REQUEST(&rxq->ring, req_prod);
+       rx_req->id = id;
+
+       /* save buffer */
+       rxq->netbuf[id] = netbuf;
+       /* setup grant for buffer data */
+       nfdev = rxq->netfront_dev;
+       rxq->gref[id] = rx_req->gref =
+               gnttab_grant_access(nfdev->xendev->otherend_id,
+                       virt_to_mfn(netbuf->data), 0);
+       UK_ASSERT(rx_req->gref != GRANT_INVALID_REF);
+
+       wmb(); /* Ensure backend sees requests */
+       rxq->ring.req_prod_pvt = req_prod + 1;
+
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
+       if (notify)
+               notify_remote_via_evtchn(rxq->evtchn);
+
+       return 0;
+}
+
+static int netfront_rx_fillup(struct uk_netdev_rx_queue *rxq, uint16_t nb_desc)
+{
+       struct uk_netbuf *netbuf[nb_desc];
+       int rc, status = 0;
+       uint16_t cnt;
+
+       cnt = rxq->alloc_rxpkts(rxq->alloc_rxpkts_argp, netbuf, nb_desc);
+
+       for (uint16_t i = 0; i < cnt; i++) {
+               rc = netfront_rxq_enqueue(rxq, netbuf[i]);
+               if (unlikely(rc < 0)) {
+                       uk_pr_err("Failed to add a buffer to rx queue %p: %d\n",
+                               rxq, rc);
+
+                       /*
+                        * Release netbufs that we are not going
+                        * to use anymore
+                        */
+                       for (uint16_t j = i; j < cnt; j++)
+                               uk_netbuf_free(netbuf[j]);
+
+                       status |= UK_NETDEV_STATUS_UNDERRUN;
+
+                       goto out;
+               }
+       }
+
+       if (unlikely(cnt < nb_desc))
+               status |= UK_NETDEV_STATUS_UNDERRUN;
+
+out:
+       return status;
+}
+
 static struct uk_netdev_tx_queue *netfront_txq_setup(struct uk_netdev *n,
                uint16_t queue_id,
                uint16_t nb_desc __unused,
@@ -103,15 +184,19 @@ static struct uk_netdev_tx_queue 
*netfront_txq_setup(struct uk_netdev *n,
        UK_ASSERT(txq->ring_ref != GRANT_INVALID_REF);
 
        /* Setup event channel */
-       rc = evtchn_alloc_unbound(nfdev->xendev->otherend_id,
-                       NULL, NULL,
-                       &txq->evtchn);
-       if (rc) {
-               uk_pr_err("Error creating event channel: %d\n", rc);
-               gnttab_end_access(txq->ring_ref);
-               uk_free_page(drv_allocator, sring);
-               return ERR2PTR(-rc);
-       }
+       if (nfdev->split_evtchn || !nfdev->rxqs[queue_id].initialized) {
+               rc = evtchn_alloc_unbound(nfdev->xendev->otherend_id,
+                               NULL, NULL,
+                               &txq->evtchn);
+               if (rc) {
+                       uk_pr_err("Error creating event channel: %d\n", rc);
+                       gnttab_end_access(txq->ring_ref);
+                       uk_free_page(drv_allocator, sring);
+                       return ERR2PTR(rc);
+               }
+       } else
+               txq->evtchn = nfdev->rxqs[queue_id].evtchn;
+
        /* Events are always disabled for tx queue */
        mask_evtchn(txq->evtchn);
 
@@ -128,6 +213,86 @@ static struct uk_netdev_tx_queue 
*netfront_txq_setup(struct uk_netdev *n,
        return txq;
 }
 
+static void netfront_handler(evtchn_port_t port __unused,
+               struct __regs *regs __unused, void *arg)
+{
+       struct uk_netdev_rx_queue *rxq = arg;
+
+       /* Indicate to the network stack about an event */
+       uk_netdev_drv_rx_event(&rxq->netfront_dev->netdev, rxq->lqueue_id);
+}
+
+static struct uk_netdev_rx_queue *netfront_rxq_setup(struct uk_netdev *n,
+               uint16_t queue_id,
+               uint16_t nb_desc __unused,
+               struct uk_netdev_rxqueue_conf *conf)
+{
+       int rc;
+       struct netfront_dev *nfdev;
+       struct uk_netdev_rx_queue *rxq;
+       netif_rx_sring_t *sring;
+
+       UK_ASSERT(n != NULL);
+       UK_ASSERT(conf != NULL);
+
+       nfdev = to_netfront_dev(n);
+       if (queue_id >= nfdev->max_queue_pairs) {
+               uk_pr_err("Invalid queue identifier: %"__PRIu16"\n", queue_id);
+               return ERR2PTR(-EINVAL);
+       }
+
+       rxq = &nfdev->rxqs[queue_id];
+       UK_ASSERT(!rxq->initialized);
+       rxq->netfront_dev = nfdev;
+       rxq->lqueue_id = queue_id;
+
+       /* Setup shared ring */
+       sring = uk_malloc_page(drv_allocator);
+       if (!sring)
+               return ERR2PTR(-ENOMEM);
+       memset(sring, 0, PAGE_SIZE);
+       SHARED_RING_INIT(sring);
+       FRONT_RING_INIT(&rxq->ring, sring, PAGE_SIZE);
+       rxq->ring_size = NET_RX_RING_SIZE;
+       rxq->ring_ref = gnttab_grant_access(nfdev->xendev->otherend_id,
+               virt_to_mfn(sring), 0);
+       UK_ASSERT(rxq->ring_ref != GRANT_INVALID_REF);
+
+       /* Setup event channel */
+       if (nfdev->split_evtchn || !nfdev->txqs[queue_id].initialized) {
+               rc = evtchn_alloc_unbound(nfdev->xendev->otherend_id,
+                               netfront_handler, rxq,
+                               &rxq->evtchn);
+               if (rc) {
+                       uk_pr_err("Error creating event channel: %d\n", rc);
+                       gnttab_end_access(rxq->ring_ref);
+                       uk_free_page(drv_allocator, sring);
+                       return ERR2PTR(rc);
+               }
+       } else {
+               rxq->evtchn = nfdev->txqs[queue_id].evtchn;
+               /* overwriting event handler */
+               bind_evtchn(rxq->evtchn, netfront_handler, rxq);
+       }
+       /*
+        * By default, events are disabled and it is up to the user or
+        * network stack to explicitly enable them.
+        */
+       mask_evtchn(rxq->evtchn);
+       rxq->intr_enabled = 0;
+
+       rxq->alloc_rxpkts = conf->alloc_rxpkts;
+       rxq->alloc_rxpkts_argp = conf->alloc_rxpkts_argp;
+
+       /* Allocate receive buffers for this queue */
+       netfront_rx_fillup(rxq, rxq->ring_size);
+
+       rxq->initialized = true;
+       nfdev->rxqs_num++;
+
+       return rxq;
+}
+
 static int netfront_rxtx_alloc(struct netfront_dev *nfdev,
                const struct uk_netdev_conf *conf)
 {
@@ -195,6 +360,32 @@ exit:
        return rc;
 }
 
+static int netfront_rxq_info_get(struct uk_netdev *n,
+               uint16_t queue_id,
+               struct uk_netdev_queue_info *qinfo)
+{
+       struct netfront_dev *nfdev;
+       struct uk_netdev_rx_queue *rxq;
+       int rc = 0;
+
+       UK_ASSERT(n != NULL);
+       UK_ASSERT(qinfo != NULL);
+
+       nfdev = to_netfront_dev(n);
+       if (unlikely(queue_id >= nfdev->rxqs_num)) {
+               uk_pr_err("Invalid queue id: %"__PRIu16"\n", queue_id);
+               rc = -EINVAL;
+               goto exit;
+       }
+       rxq = &nfdev->rxqs[queue_id];
+       qinfo->nb_min = rxq->ring_size;
+       qinfo->nb_max = rxq->ring_size;
+       qinfo->nb_is_power_of_two = 1;
+
+exit:
+       return rc;
+}
+
 static int netfront_configure(struct uk_netdev *n,
                const struct uk_netdev_conf *conf)
 {
@@ -286,7 +477,9 @@ static unsigned int netfront_promisc_get(struct uk_netdev 
*n)
 static const struct uk_netdev_ops netfront_ops = {
        .configure = netfront_configure,
        .txq_configure = netfront_txq_setup,
+       .rxq_configure = netfront_rxq_setup,
        .txq_info_get = netfront_txq_info_get,
+       .rxq_info_get = netfront_rxq_info_get,
        .info_get = netfront_info_get,
        .einfo_get = netfront_einfo_get,
        .hwaddr_get = netfront_mac_get,
diff --git a/plat/xen/drivers/net/netfront.h b/plat/xen/drivers/net/netfront.h
index 05514cfc..74f19108 100644
--- a/plat/xen/drivers/net/netfront.h
+++ b/plat/xen/drivers/net/netfront.h
@@ -44,6 +44,7 @@
 
 
 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
+#define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
 
 /**
  * internal structure to represent the transmit queue.
@@ -77,6 +78,33 @@ struct uk_netdev_tx_queue {
  * internal structure to represent the receive queue.
  */
 struct uk_netdev_rx_queue {
+       /* The netfront device */
+       struct netfront_dev *netfront_dev;
+       /* The libuknet queue identifier */
+       uint16_t lqueue_id;
+       /* True if initialized */
+       bool initialized;
+
+       /* Shared ring size */
+       uint16_t ring_size;
+       /* Shared ring */
+       netif_rx_front_ring_t ring;
+       /* Shared ring grant ref */
+       grant_ref_t ring_ref;
+       /* Queue event channel */
+       evtchn_port_t evtchn;
+
+       /* The flag to interrupt on the transmit queue */
+       uint8_t intr_enabled;
+
+       /* User-provided receive buffer allocation function */
+       uk_netdev_alloc_rxpkts alloc_rxpkts;
+       void *alloc_rxpkts_argp;
+
+       /* Receive buffers for incoming packets */
+       struct uk_netbuf *netbuf[NET_RX_RING_SIZE];
+       /* Grants for receive buffers */
+       grant_ref_t gref[NET_RX_RING_SIZE];
 };
 
 struct xs_econf {
@@ -93,6 +121,7 @@ struct netfront_dev {
 
        /* List of the Rx/Tx queues */
        uint16_t txqs_num;
+       uint16_t rxqs_num;
        struct uk_netdev_tx_queue *txqs;
        struct uk_netdev_rx_queue *rxqs;
        /* Maximum number of queue pairs */
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.