[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC 2/4] xen-netback: Add support for multiple queues



On Wed, Jan 15, 2014 at 04:23:22PM +0000, Andrew J. Bennieston wrote:
[...]
> --- a/drivers/net/xen-netback/interface.c
> +++ b/drivers/net/xen-netback/interface.c
> @@ -355,7 +355,13 @@ struct xenvif *xenvif_alloc(struct device *parent, 
> domid_t domid,
>       char name[IFNAMSIZ] = {};
>  
>       snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
> -     dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 1);
> +     /*
> +      * Allocate a netdev with the max. supported number of queues.
> +      * When the guest selects the desired number, it will be updated
> +      * via netif_set_real_num_tx_queues().
> +      */
> +     dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
> +                     xenvif_max_queues);

Indentation.


>       if (dev == NULL) {
>               pr_warn("Could not allocate netdev for %s\n", name);
>               return ERR_PTR(-ENOMEM);
> diff --git a/drivers/net/xen-netback/netback.c 
> b/drivers/net/xen-netback/netback.c
> index 586e741..5d717d7 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -55,6 +55,9 @@
>  bool separate_tx_rx_irq = 1;
>  module_param(separate_tx_rx_irq, bool, 0644);
>  
> +unsigned int xenvif_max_queues = 4;
> +module_param(xenvif_max_queues, uint, 0644);
> +

This looks a bit arbitrary. I guess it is better to set the default
value to number of CPUs in Dom0?

>  /*
>   * This is the maximum slots a skb can have. If a guest sends a skb
>   * which exceeds this limit it is considered malicious.
> diff --git a/drivers/net/xen-netback/xenbus.c 
> b/drivers/net/xen-netback/xenbus.c
> index c3332e2..ce7ca9a 100644
> --- a/drivers/net/xen-netback/xenbus.c
> +++ b/drivers/net/xen-netback/xenbus.c
> @@ -21,6 +21,7 @@
>  
>  #include "common.h"
>  #include <linux/vmalloc.h>
> +#include <linux/rtnetlink.h>
>  
>  struct backend_info {
>       struct xenbus_device *dev;
> @@ -160,6 +161,14 @@ static int netback_probe(struct xenbus_device *dev,
>       if (err)
>               pr_debug("Error writing feature-split-event-channels\n");
>  
> +     /*
> +      * Multi-queue support: This is an optional feature.
> +      */
> +     err = xenbus_printf(XBT_NIL, dev->nodename,
> +                     "multi-queue-max-queues", "%u", xenvif_max_queues);

Should prefix this with "feature-".

> +     if (err)
> +             pr_debug("Error writing multi-queue-max-queues\n");
> +
>       err = xenbus_switch_state(dev, XenbusStateInitWait);
>       if (err)
>               goto fail;
> @@ -491,6 +500,16 @@ static void connect(struct backend_info *be)
>       unsigned long credit_bytes, credit_usec;
>       unsigned int queue_index;
>       struct xenvif_queue *queue;
> +     unsigned int requested_num_queues;
> +
> +     /* Check whether the frontend requested multiple queues
> +      * and read the number requested.
> +      */
> +     err = xenbus_scanf(XBT_NIL, dev->otherend,
> +                     "multi-queue-num-queues",
> +                     "%u", &requested_num_queues);
> +     if (err < 0)
> +             requested_num_queues = 1; /* Fall back to single queue */

You also need to check whether it gets back something larger than
permitted -- guest can be buggy or malicious.

>  
>       err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
>       if (err) {
> @@ -501,9 +520,13 @@ static void connect(struct backend_info *be)
>       xen_net_read_rate(dev, &credit_bytes, &credit_usec);
>       read_xenbus_vif_flags(be);
>  
> -     be->vif->num_queues = 1;
> +     /* Use the number of queues requested by the frontend */
> +     be->vif->num_queues = requested_num_queues;
>       be->vif->queues = vzalloc(be->vif->num_queues *
>                       sizeof(struct xenvif_queue));
> +     rtnl_lock();
> +     netif_set_real_num_tx_queues(be->vif->dev, be->vif->num_queues);
> +     rtnl_unlock();
>  
>       for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
>       {
> @@ -549,29 +572,51 @@ static int connect_rings(struct backend_info *be, 
> struct xenvif_queue *queue)
>       unsigned long tx_ring_ref, rx_ring_ref;
>       unsigned int tx_evtchn, rx_evtchn;
>       int err;
> +     char *xspath = NULL;
> +     size_t xspathsize;
> +
> +     /* If the frontend requested 1 queue, or we have fallen back
> +      * to single queue due to lack of frontend support for multi-
> +      * queue, expect the remaining XenStore keys in the toplevel
> +      * directory. Otherwise, expect them in a subdirectory called
> +      * queue-N.
> +      */

I think even if the frontend only requests 1 queue you can still put it
under subdirectory? I don't have strong preference here though...

After the protocol is settled it needs to be documented in netif.h.

> +     if (queue->vif->num_queues == 1)
> +             xspath = (char *)dev->otherend;
> +     else {
> +             xspathsize = strlen(dev->otherend) + 10;

Please avoid using magic number.

> +             xspath = kzalloc(xspathsize, GFP_KERNEL);
> +             if (!xspath) {
> +                     xenbus_dev_fatal(dev, -ENOMEM,
> +                                     "reading ring references");

"ring reference"?

> +                     return -ENOMEM;
> +             }
> +             snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
> +                             queue->number);

Indentation.

Wei.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.