[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] If netfront fails to allocate a receive skbuff, push all pending



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 3e0f4fe2281080cda9205e49de0d5f77f0d9d87f
# Parent  013eab60cb78455fb602d99866e0cba8846bc244
If netfront fails to allocate a receive skbuff, push all pending
skbuffs out onto the shared ring. If there are no skbuffs to push,
schedule a timer to try again later. This will avoid interface
lockups in low-memory conditions.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 013eab60cb78 -r 3e0f4fe22810 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Tue Dec 27 
10:18:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Wed Dec 28 
11:57:18 2005
@@ -135,6 +135,8 @@
     int rx_min_target, rx_max_target, rx_target;
     struct sk_buff_head rx_batch;
 
+    struct timer_list rx_refill_timer;
+
     /*
      * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
      * array is an index into a chain of free entries.
@@ -347,6 +349,13 @@
 }
 
 
+static void rx_refill_timeout(unsigned long data)
+{
+    struct net_device *dev = (struct net_device *)data;
+    netif_rx_schedule(dev);
+}
+
+
 static void network_alloc_rx_buffers(struct net_device *dev)
 {
     unsigned short id;
@@ -366,8 +375,14 @@
      */
     batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
     for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
-        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
-            break;
+        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL)) {
+            /* Any skbuffs queued for refill? Force them out. */
+            if (i != 0)
+                goto refill;
+            /* Could not allocate any skbuffs. Try again later. */
+            mod_timer(&np->rx_refill_timer, jiffies + (HZ/10));
+            return;
+        }
         __skb_queue_tail(&np->rx_batch, skb);
     }
 
@@ -375,6 +390,12 @@
     if (i < (np->rx_target/2))
         return;
 
+    /* Adjust our floating fill target if we risked running out of buffers. */
+    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+         ((np->rx_target *= 2) > np->rx_max_target))
+        np->rx_target = np->rx_max_target;
+
+ refill:
     for (i = 0; ; i++) {
         if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
             break;
@@ -428,11 +449,6 @@
 
     /* Above is a suitable barrier to ensure backend will see requests. */
     np->rx->req_prod = req_prod + i;
-
-    /* Adjust our floating fill target if we risked running out of buffers. */
-    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
-         ((np->rx_target *= 2) > np->rx_max_target))
-        np->rx_target = np->rx_max_target;
 }
 
 
@@ -967,6 +983,10 @@
     np->rx_target     = RX_MIN_TARGET;
     np->rx_min_target = RX_MIN_TARGET;
     np->rx_max_target = RX_MAX_TARGET;
+ 
+    init_timer(&np->rx_refill_timer);
+    np->rx_refill_timer.data = (unsigned long)dev;
+    np->rx_refill_timer.function = rx_refill_timeout;
 
     /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
     for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
@@ -1300,6 +1320,7 @@
     /* Avoid having tx/rx stuff happen until we're ready. */
     free_irq(np->irq, np->dev);
     unbind_evtchn_from_irq(np->evtchn);
+    del_timer_sync(&np->rx_refill_timer);
 }
 
 static void vif_resume(struct net_private *np)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.