[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [NET] back: Added tx queue



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 7bb2e0be9cc2cc5a471fdc8b3e62e2fe830972af
# Parent  2d2ed4d9b1c14aeee29dfdd77acd6017d31290cd
[NET] back: Added tx queue

This patch adds a tx queue to the backend if the frontend supports rx
refill notification.  A queue is needed because SG/TSO greatly reduces
the number of packets that can be stored in the rx ring.  Given an rx
ring with 256 entries, a maximum TSO packet can occupy as many as 18
entries, meaning that the entire ring can only hold 14 packets.  This
is too small at high bandwidths with large TCP RX windows.

Having a tx queue does not present a new security risk as the queue is
a fixed size buffer just like the rx ring.  So each guest can only
hold a
fixed amount of memory (proportional to the tx queue length) on the
host.

Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>

Cannot BUG_ON netbk_queue_full yet !netbk_can_queue, as this can be
triggered by a misbehaving client. Set req_event appropriately when
stopping the packet queue, or we will not receive a notification.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/netback/common.h    |   10 +++++
 linux-2.6-xen-sparse/drivers/xen/netback/interface.c |    9 ++--
 linux-2.6-xen-sparse/drivers/xen/netback/netback.c   |   36 ++++++++++++++++---
 linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c    |   10 +++++
 4 files changed, 57 insertions(+), 8 deletions(-)

diff -r 2d2ed4d9b1c1 -r 7bb2e0be9cc2 
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Mon Jul 31 10:40:21 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Mon Jul 31 17:29:00 
2006 +0100
@@ -76,6 +76,10 @@ typedef struct netif_st {
        struct vm_struct *tx_comms_area;
        struct vm_struct *rx_comms_area;
 
+       /* Set of features that can be turned on in dev->features. */
+       int features;
+       int can_queue;
+
        /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
        RING_IDX rx_req_cons_peek;
 
@@ -119,4 +123,10 @@ struct net_device_stats *netif_be_get_st
 struct net_device_stats *netif_be_get_stats(struct net_device *dev);
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
+static inline int netbk_can_queue(struct net_device *dev)
+{
+       netif_t *netif = netdev_priv(dev);
+       return netif->can_queue;
+}
+
 #endif /* __NETIF__BACKEND__COMMON_H__ */
diff -r 2d2ed4d9b1c1 -r 7bb2e0be9cc2 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Mon Jul 31 
10:40:21 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Mon Jul 31 
17:29:00 2006 +0100
@@ -51,14 +51,12 @@ static int net_open(struct net_device *d
        netif_t *netif = netdev_priv(dev);
        if (netif_carrier_ok(dev))
                __netif_up(netif);
-       netif_start_queue(dev);
        return 0;
 }
 
 static int net_close(struct net_device *dev)
 {
        netif_t *netif = netdev_priv(dev);
-       netif_stop_queue(dev);
        if (netif_carrier_ok(dev))
                __netif_down(netif);
        return 0;
@@ -107,8 +105,11 @@ netif_t *netif_alloc(domid_t domid, unsi
 
        SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
 
-       /* Disable queuing. */
-       dev->tx_queue_len = 0;
+       /*
+        * Reduce default TX queuelen so that each guest interface only
+        * allows it to eat around 6.4MB of host memory.
+        */
+       dev->tx_queue_len = 100;
 
        for (i = 0; i < ETH_ALEN; i++)
                if (be_mac[i] != 0)
diff -r 2d2ed4d9b1c1 -r 7bb2e0be9cc2 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Jul 31 
10:40:21 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Jul 31 
17:29:00 2006 +0100
@@ -136,6 +136,14 @@ static inline int is_xen_skb(struct sk_b
        return (cp == skbuff_cachep);
 }
 
+static inline int netbk_queue_full(netif_t *netif)
+{
+       RING_IDX peek = netif->rx_req_cons_peek;
+
+       return ((netif->rx.sring->req_prod - peek) <= 0) ||
+              ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) <= 0);
+}
+
 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        netif_t *netif = netdev_priv(dev);
@@ -143,11 +151,15 @@ int netif_be_start_xmit(struct sk_buff *
        BUG_ON(skb->dev != dev);
 
        /* Drop the packet if the target domain has no receive buffers. */
-       if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)) ||
-           (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
-           ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
-            NET_RX_RING_SIZE))
+       if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)))
                goto drop;
+
+       if (unlikely(netbk_queue_full(netif))) {
+               /* Not a BUG_ON() -- misbehaving netfront can trigger this. */
+               if (netbk_can_queue(dev))
+                       DPRINTK("Queue full but not stopped!\n");
+               goto drop;
+       }
 
        /*
         * We do not copy the packet unless:
@@ -178,6 +190,13 @@ int netif_be_start_xmit(struct sk_buff *
        netif->rx_req_cons_peek++;
        netif_get(netif);
 
+       if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
+               netif->rx.sring->req_event = netif->rx_req_cons_peek + 1;
+               mb(); /* request notification /then/ check & stop the queue */
+               if (netbk_queue_full(netif))
+                       netif_stop_queue(dev);
+       }
+
        skb_queue_tail(&rx_queue, skb);
        tasklet_schedule(&net_rx_tasklet);
 
@@ -351,6 +370,10 @@ static void net_rx_action(unsigned long 
                        notify_list[notify_nr++] = irq;
                }
 
+               if (netif_queue_stopped(netif->dev) &&
+                   !netbk_queue_full(netif))
+                       netif_wake_queue(netif->dev);
+
                netif_put(netif);
                dev_kfree_skb(skb);
                gop++;
@@ -974,8 +997,13 @@ irqreturn_t netif_be_int(int irq, void *
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 {
        netif_t *netif = dev_id;
+
        add_to_net_schedule_list_tail(netif);
        maybe_schedule_tx_action();
+
+       if (netif_queue_stopped(netif->dev) && !netbk_queue_full(netif))
+               netif_wake_queue(netif->dev);
+
        return IRQ_HANDLED;
 }
 
diff -r 2d2ed4d9b1c1 -r 7bb2e0be9cc2 
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Jul 31 10:40:21 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Jul 31 17:29:00 
2006 +0100
@@ -353,6 +353,7 @@ static int connect_rings(struct backend_
        unsigned long tx_ring_ref, rx_ring_ref;
        unsigned int evtchn;
        int err;
+       int val;
 
        DPRINTK("");
 
@@ -366,6 +367,15 @@ static int connect_rings(struct backend_
                                 dev->otherend);
                return err;
        }
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d",
+                        &val) < 0)
+               val = 0;
+       if (val)
+               be->netif->can_queue = 1;
+       else
+               /* Must be non-zero for pfifo_fast to work. */
+               be->netif->dev->tx_queue_len = 1;
 
        /* Map the shared frame, irq etc. */
        err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.