[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [UNIKRAFT PATCH v4 07/12] plat/xen/drivers/net: Configure netfront rx queue



Hi Sharan,

Please see inline.

On 10/22/20 4:33 PM, Sharan Santhanam wrote:
> Hello Costin,
> 
> Please find the comment inline:
> 
> Thanks & Regards
> 
> Sharan
> 
> On 8/13/20 10:53 AM, Costin Lupu wrote:
>> Incoming packets are saved in buffers allocated with user-provided
>> callbacks.
>> Whenever such packet is available, the driver is notified via event
>> channels.
>> This patch introduces the configuration logic for rx queues and the
>> handler
>> used in dealing with the notifications.
>>
>> Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
>> Signed-off-by: Razvan Cojocaru <razvan.cojocaru93@xxxxxxxxx>
>> ---
>>   plat/xen/drivers/net/netfront.c | 212 ++++++++++++++++++++++++++++++--
>>   plat/xen/drivers/net/netfront.h |  29 +++++
>>   2 files changed, 232 insertions(+), 9 deletions(-)
>>
>> diff --git a/plat/xen/drivers/net/netfront.c
>> b/plat/xen/drivers/net/netfront.c
>> index 3c936743..742e05cf 100644
>> --- a/plat/xen/drivers/net/netfront.c
>> +++ b/plat/xen/drivers/net/netfront.c
>> @@ -52,6 +52,11 @@
>>   static struct uk_alloc *drv_allocator;
>>     +static uint16_t xennet_rxidx(RING_IDX idx)
>> +{
>> +    return (uint16_t) (idx & (NET_RX_RING_SIZE - 1));
>> +}
>> +
>>   static void add_id_to_freelist(uint16_t id, uint16_t *freelist)
>>   {
>>       freelist[id + 1] = freelist[0];
>> @@ -67,6 +72,82 @@ static uint16_t get_id_from_freelist(uint16_t
>> *freelist)
>>       return id;
>>   }
>>   +static int netfront_rxq_enqueue(struct uk_netdev_rx_queue *rxq,
>> +        struct uk_netbuf *netbuf)
>> +{
>> +    RING_IDX req_prod;
>> +    uint16_t id;
>> +    netif_rx_request_t *rx_req;
>> +    struct netfront_dev *nfdev;
>> +    int notify;
>> +
>> +    /* buffer must be page aligned */
>> +    UK_ASSERT(((unsigned long) netbuf->data & ~PAGE_MASK) == 0);
>> +
>> +    if (RING_FULL(&rxq->ring)) {
>> +        uk_pr_debug("rx queue is full\n");
>> +        return -ENOSPC;
>> +    }
>> +
>> +    /* get request */
>> +    req_prod = rxq->ring.req_prod_pvt;
>> +    id = xennet_rxidx(req_prod);
>> +    rx_req = RING_GET_REQUEST(&rxq->ring, req_prod);
>> +    rx_req->id = id;
>> +
>> +    /* save buffer */
>> +    rxq->netbuf[id] = netbuf;
>> +    /* setup grant for buffer data */
>> +    nfdev = rxq->netfront_dev;
>> +    rxq->gref[id] = rx_req->gref =
>> +        gnttab_grant_access(nfdev->xendev->otherend_id,
>> +            virt_to_mfn(netbuf->data), 0);
>> +    UK_ASSERT(rx_req->gref != GRANT_INVALID_REF);
>> +
>> +    wmb(); /* Ensure backend sees requests */
>> +    rxq->ring.req_prod_pvt = req_prod + 1;
>> +
>> +    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
>> +    if (notify)
>> +        notify_remote_via_evtchn(rxq->evtchn);
>> +
>> +    return 0;
>> +}
>> +
>> +static int netfront_rx_fillup(struct uk_netdev_rx_queue *rxq,
>> uint16_t nb_desc)
>> +{
>> +    struct uk_netbuf *netbuf[nb_desc];
>> +    int rc, status = 0;
>> +    uint16_t cnt;
>> +
>> +    cnt = rxq->alloc_rxpkts(rxq->alloc_rxpkts_argp, netbuf, nb_desc);
>> +
>> +    for (uint16_t i = 0; i < cnt; i++) {
>> +        rc = netfront_rxq_enqueue(rxq, netbuf[i]);
>> +        if (unlikely(rc < 0)) {
>> +            uk_pr_err("Failed to add a buffer to rx queue %p: %d\n",
>> +                rxq, rc);
>> +
>> +            /*
>> +             * Release netbufs that we are not going
>> +             * to use anymore
>> +             */
>> +            for (uint16_t j = i; j < cnt; j++)
>> +                uk_netbuf_free(netbuf[j]);
>> +
>> +            status |= UK_NETDEV_STATUS_UNDERRUN;
>> +
>> +            goto out;
>> +        }
>> +    }
>> +
>> +    if (unlikely(cnt < nb_desc))
>> +        status |= UK_NETDEV_STATUS_UNDERRUN;
>> +
>> +out:
>> +    return status;
>> +}
>> +
>>   static struct uk_netdev_tx_queue *netfront_txq_setup(struct
>> uk_netdev *n,
>>           uint16_t queue_id,
>>           uint16_t nb_desc __unused,
>> @@ -103,15 +184,19 @@ static struct uk_netdev_tx_queue
>> *netfront_txq_setup(struct uk_netdev *n,
>>       UK_ASSERT(txq->ring_ref != GRANT_INVALID_REF);
>>         /* Setup event channel */
>> -    rc = evtchn_alloc_unbound(nfdev->xendev->otherend_id,
>> -            NULL, NULL,
>> -            &txq->evtchn);
>> -    if (rc) {
>> -        uk_pr_err("Error creating event channel: %d\n", rc);
>> -        gnttab_end_access(txq->ring_ref);
>> -        uk_pfree(drv_allocator, sring, 1);
>> -        return ERR2PTR(-rc);
>> -    }
>> +    if (nfdev->split_evtchn || !nfdev->rxqs[queue_id].initialized) {
>> +        rc = evtchn_alloc_unbound(nfdev->xendev->otherend_id,
>> +                NULL, NULL,
>> +                &txq->evtchn);
>> +        if (rc) {
>> +            uk_pr_err("Error creating event channel: %d\n", rc);
>> +            gnttab_end_access(txq->ring_ref);
>> +            uk_pfree(drv_allocator, sring, 1);
>> +            return ERR2PTR(rc);
>> +        }
>> +    } else
>> +        txq->evtchn = nfdev->rxqs[queue_id].evtchn;
> 
> In the next version maybe this can go to the previous patch.
> 

Well actually this cannot be moved in the previous patch because we
don't have the definitions for the rx queue structure there.

> 
>> +
>>       /* Events are always disabled for tx queue */
>>       mask_evtchn(txq->evtchn);
>>   @@ -128,6 +213,86 @@ static struct uk_netdev_tx_queue
>> *netfront_txq_setup(struct uk_netdev *n,
>>       return txq;
>>   }
>>   +static void netfront_handler(evtchn_port_t port __unused,
>> +        struct __regs *regs __unused, void *arg)
>> +{
>> +    struct uk_netdev_rx_queue *rxq = arg;
>> +
>> +    /* Indicate to the network stack about an event */
>> +    uk_netdev_drv_rx_event(&rxq->netfront_dev->netdev, rxq->lqueue_id);
>> +}
>> +
>> +static struct uk_netdev_rx_queue *netfront_rxq_setup(struct uk_netdev
>> *n,
>> +        uint16_t queue_id,
>> +        uint16_t nb_desc __unused,
>> +        struct uk_netdev_rxqueue_conf *conf)
>> +{
>> +    int rc;
>> +    struct netfront_dev *nfdev;
>> +    struct uk_netdev_rx_queue *rxq;
>> +    netif_rx_sring_t *sring;
>> +
>> +    UK_ASSERT(n != NULL);
>> +    UK_ASSERT(conf != NULL);
>> +
>> +    nfdev = to_netfront_dev(n);
>> +    if (queue_id >= nfdev->max_queue_pairs) {
>> +        uk_pr_err("Invalid queue identifier: %"__PRIu16"\n", queue_id);
>> +        return ERR2PTR(-EINVAL);
>> +    }
>> +
>> +    rxq = &nfdev->rxqs[queue_id];
>> +    UK_ASSERT(!rxq->initialized);
>> +    rxq->netfront_dev = nfdev;
>> +    rxq->lqueue_id = queue_id;
>> +
>> +    /* Setup shared ring */
>> +    sring = uk_palloc(drv_allocator, 1);
> Allocator from the conf should be used here.

Ack.

>> +    if (!sring)
>> +        return ERR2PTR(-ENOMEM);
>> +    memset(sring, 0, PAGE_SIZE);
>> +    SHARED_RING_INIT(sring);
>> +    FRONT_RING_INIT(&rxq->ring, sring, PAGE_SIZE);
>> +    rxq->ring_size = NET_RX_RING_SIZE;
>> +    rxq->ring_ref = gnttab_grant_access(nfdev->xendev->otherend_id,
>> +        virt_to_mfn(sring), 0);
>> +    UK_ASSERT(rxq->ring_ref != GRANT_INVALID_REF);
>> +
>> +    /* Setup event channel */
>> +    if (nfdev->split_evtchn || !nfdev->txqs[queue_id].initialized) {
>> +        rc = evtchn_alloc_unbound(nfdev->xendev->otherend_id,
>> +                netfront_handler, rxq,
>> +                &rxq->evtchn);
>> +        if (rc) {
>> +            uk_pr_err("Error creating event channel: %d\n", rc);
>> +            gnttab_end_access(rxq->ring_ref);
>> +            uk_pfree(drv_allocator, sring, 1);
>> +            return ERR2PTR(rc);
>> +        }
>> +    } else {
>> +        rxq->evtchn = nfdev->txqs[queue_id].evtchn;
>> +        /* overwriting event handler */
>> +        bind_evtchn(rxq->evtchn, netfront_handler, rxq);
>> +    }
>> +    /*
>> +     * By default, events are disabled and it is up to the user or
>> +     * network stack to explicitly enable them.
>> +     */
>> +    mask_evtchn(rxq->evtchn);
>> +    rxq->intr_enabled = 0;
>> +
>> +    rxq->alloc_rxpkts = conf->alloc_rxpkts;
>> +    rxq->alloc_rxpkts_argp = conf->alloc_rxpkts_argp;
>> +
>> +    /* Allocate receive buffers for this queue */
>> +    netfront_rx_fillup(rxq, rxq->ring_size);
>> +
>> +    rxq->initialized = true;
>> +    nfdev->rxqs_num++;
>> +
>> +    return rxq;
>> +}
>> +
>>   static int netfront_rxtx_alloc(struct netfront_dev *nfdev,
>>           const struct uk_netdev_conf *conf)
>>   {
>> @@ -196,6 +361,33 @@ exit:
>>       return rc;
>>   }
>>   +static int netfront_rxq_info_get(struct uk_netdev *n,
>> +        uint16_t queue_id,
>> +        struct uk_netdev_queue_info *qinfo)
>> +{
>> +    struct netfront_dev *nfdev;
>> +    struct uk_netdev_rx_queue *rxq;
>> +    int rc = 0;
>> +
>> +    UK_ASSERT(n != NULL);
>> +    UK_ASSERT(qinfo != NULL);
>> +
>> +    nfdev = to_netfront_dev(n);
>> +    if (unlikely(queue_id >= nfdev->rxqs_num)) {
> Same as the txq info get

Ack.

>> +        uk_pr_err("Invalid queue id: %"__PRIu16"\n", queue_id);
>> +        rc = -EINVAL;
>> +        goto exit;
>> +    }
>> +    rxq = &nfdev->rxqs[queue_id];
>> +    qinfo->nb_min = rxq->ring_size;
>> +    qinfo->nb_max = rxq->ring_size;
>> +    qinfo->nb_align = PAGE_SIZE;
>> +    qinfo->nb_is_power_of_two = 1;
>> +
>> +exit:
>> +    return rc;
>> +}
>> +
>>   static int netfront_configure(struct uk_netdev *n,
>>           const struct uk_netdev_conf *conf)
>>   {
>> @@ -286,7 +478,9 @@ static unsigned int netfront_promisc_get(struct
>> uk_netdev *n)
>>   static const struct uk_netdev_ops netfront_ops = {
>>       .configure = netfront_configure,
>>       .txq_configure = netfront_txq_setup,
>> +    .rxq_configure = netfront_rxq_setup,
>>       .txq_info_get = netfront_txq_info_get,
>> +    .rxq_info_get = netfront_rxq_info_get,
>>       .info_get = netfront_info_get,
>>       .einfo_get = netfront_einfo_get,
>>       .hwaddr_get = netfront_mac_get,
>> diff --git a/plat/xen/drivers/net/netfront.h
>> b/plat/xen/drivers/net/netfront.h
>> index d3f603b3..c8364815 100644
>> --- a/plat/xen/drivers/net/netfront.h
>> +++ b/plat/xen/drivers/net/netfront.h
>> @@ -44,6 +44,7 @@
>>       #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
>> +#define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
>>     /**
>>    * internal structure to represent the transmit queue.
>> @@ -77,6 +78,33 @@ struct uk_netdev_tx_queue {
>>    * internal structure to represent the receive queue.
>>    */
>>   struct uk_netdev_rx_queue {
>> +    /* The netfront device */
>> +    struct netfront_dev *netfront_dev;
>> +    /* The libuknet queue identifier */
>> +    uint16_t lqueue_id;
>> +    /* True if initialized */
>> +    bool initialized;
>> +
>> +    /* Shared ring size */
>> +    uint16_t ring_size;
>> +    /* Shared ring */
>> +    netif_rx_front_ring_t ring;
>> +    /* Shared ring grant ref */
>> +    grant_ref_t ring_ref;
>> +    /* Queue event channel */
>> +    evtchn_port_t evtchn;
>> +
>> +    /* The flag to interrupt on the transmit queue */
>> +    uint8_t intr_enabled;
>> +
>> +    /* User-provided receive buffer allocation function */
>> +    uk_netdev_alloc_rxpkts alloc_rxpkts;
>> +    void *alloc_rxpkts_argp;
>> +
>> +    /* Receive buffers for incoming packets */
>> +    struct uk_netbuf *netbuf[NET_RX_RING_SIZE];
>> +    /* Grants for receive buffers */
>> +    grant_ref_t gref[NET_RX_RING_SIZE];
>>   };
>>     struct xs_econf {
>> @@ -93,6 +121,7 @@ struct netfront_dev {
>>         /* List of the Rx/Tx queues */
>>       uint16_t txqs_num;
>> +    uint16_t rxqs_num;
>>       struct uk_netdev_tx_queue *txqs;
>>       struct uk_netdev_rx_queue *rxqs;
>>       /* Maximum number of queue pairs */
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.