[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [UNIKRAFT PATCH] plat/virtio: Use padded buffer for tx and rx



Reviewed-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>

On 04.12.18, 14:56, "Sharan Santhanam" <Sharan.Santhanam@xxxxxxxxx> wrote:

    We introduce buffer padding while transmitting and receiving data. We
    explicitly introduce padding to make sure that each network packet
    needs two descriptors (one for virtio header and another for
    network packet).
    
    According the specification 5.1.6.6, we need to explicitly use 2
    descriptor for each transmit and receive network packet since we do not
    negotiate for VIRTIO_F_ANY_LAYOUT.
    
    In the previous implementation the sg list merged the two descriptor
    into one as the memory region were contiguous.
    
    Signed-off-by: Sharan Santhanam <sharan.santhanam@xxxxxxxxx>
    ---
     plat/drivers/virtio/virtio_net.c | 33 ++++++++++++++++++++++-----------
     1 file changed, 22 insertions(+), 11 deletions(-)
    
    diff --git a/plat/drivers/virtio/virtio_net.c 
b/plat/drivers/virtio/virtio_net.c
    index a5e6729..47a6161 100644
    --- a/plat/drivers/virtio/virtio_net.c
    +++ b/plat/drivers/virtio/virtio_net.c
    @@ -79,12 +79,12 @@ typedef enum {
     } virtq_type_t;
     
     /**
    - * When mergeable buffers are not negotiated, the vtnet_rx_header structure
    + * When mergeable buffers are not negotiated, the virtio_net_hdr_padded 
struct
      * below is placed at the beginning of the netbuf data. Use 4 bytes of pad 
to
      * both keep the VirtIO header and the data non-contiguous and to keep the
      * frame's payload 4 byte aligned.
      */
    -struct virtio_net_rx_hdr {
    +struct virtio_net_hdr_padded {
        struct virtio_net_hdr vhdr;
        char            vrh_pad[VTNET_RX_HEADER_PAD];
     };
    @@ -306,9 +306,12 @@ static int virtio_netdev_xmit(struct uk_netdev *dev,
     {
        struct virtio_net_device *vndev __unused;
        struct virtio_net_hdr *vhdr;
    -   int16_t header_sz = sizeof(*vhdr);
    +   struct virtio_net_hdr_padded *padded_hdr;
    +   int16_t header_sz = sizeof(*padded_hdr);
        int rc = 0;
        size_t total_len = 0;
    +   __u8  *buf_start;
    +   size_t buf_len;
     
        UK_ASSERT(dev);
        UK_ASSERT(pkt && queue);
    @@ -321,6 +324,8 @@ static int virtio_netdev_xmit(struct uk_netdev *dev,
         */
        virtio_netdev_xmit_free(queue);
     
    +   buf_start = pkt->data;
    +   buf_len = pkt->len;
        /**
         * Use the preallocated header space for the virtio header.
         */
    @@ -352,13 +357,12 @@ static int virtio_netdev_xmit(struct uk_netdev *dev,
         * 1 for the virtio header and the other for the actual network packet.
         */
        /* Appending the data to the list. */
    -   rc = uk_sglist_append(&queue->sg, vhdr, header_sz);
    +   rc = uk_sglist_append(&queue->sg, vhdr, sizeof(*vhdr));
        if (unlikely(rc != 0)) {
                uk_pr_err("Failed to append to the sg list\n");
                goto exit;
        }
    -   rc = uk_sglist_append(&queue->sg, pkt->data + header_sz,
    -                   (pkt->len - header_sz));
    +   rc = uk_sglist_append(&queue->sg, buf_start, buf_len);
        if (unlikely(rc != 0)) {
                uk_pr_err("Failed to append to the sg list\n");
                goto exit;
    @@ -375,7 +379,8 @@ static int virtio_netdev_xmit(struct uk_netdev *dev,
        if (unlikely(total_len > VIRTIO_PKT_BUFFER_LEN)) {
                uk_pr_err("Packet size too big: %lu, max:%u\n",
                          total_len, VIRTIO_PKT_BUFFER_LEN);
    -           return -ENOTSUP;
    +           rc = -ENOTSUP;
    +           goto remove_vhdr;
        }
     
        /**
    @@ -396,20 +401,26 @@ static int virtio_netdev_xmit(struct uk_netdev *dev,
        } else if (rc == -ENOSPC) {
                uk_pr_debug("No more descriptor available\n");
                rc = 0;
    +           goto remove_vhdr;
        } else {
                uk_pr_err("Failed to enqueue descriptors into the ring: %d\n",
                          rc);
    +           goto remove_vhdr;
        }
     
     exit:
        return rc;
    +
    +remove_vhdr:
    +   uk_netbuf_header(pkt, -header_sz);
    +   goto exit;
     }
     
     static int virtio_netdev_rxq_enqueue(struct uk_netdev_rx_queue *rxq,
                                     struct uk_netbuf *netbuf)
     {
        int rc = 0;
    -   struct virtio_net_rx_hdr *rxhdr;
    +   struct virtio_net_hdr_padded *rxhdr;
        int16_t header_sz = sizeof(*rxhdr);
        __u8 *buf_start;
        size_t buf_len = 0;
    @@ -478,7 +489,7 @@ static int virtio_netdev_rxq_dequeue(struct 
uk_netdev_rx_queue *rxq,
         */
        buf->len = len + VTNET_RX_HEADER_PAD;
        rc = uk_netbuf_header(buf,
    -                         -((int16_t)sizeof(struct virtio_net_rx_hdr)));
    +                         -((int16_t)sizeof(struct virtio_net_hdr_padded)));
        UK_ASSERT(rc == 1);
        *netbuf = buf;
     
    @@ -993,8 +1004,8 @@ static void virtio_net_info_get(struct uk_netdev *dev,
     
        dev_info->max_rx_queues = vndev->max_vqueue_pairs;
        dev_info->max_tx_queues = vndev->max_vqueue_pairs;
    -   dev_info->nb_encap_tx = sizeof(struct virtio_net_hdr);
    -   dev_info->nb_encap_rx = sizeof(struct virtio_net_rx_hdr);
    +   dev_info->nb_encap_tx = sizeof(struct virtio_net_hdr_padded);
    +   dev_info->nb_encap_rx = sizeof(struct virtio_net_hdr_padded);
     }
     
     static int virtio_net_start(struct uk_netdev *n)
    -- 
    2.7.4
    
    

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.