[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [NET] front: Discard packets in tx ring rather than attempting retransmit



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1e49997c8146b7c12face5ac4ab19c67ae726863
# Parent  8070050cc30f3d969835d7b1d6eda57959d56842
[NET] front: Discard packets in tx ring rather than attempting retransmit
when reconnecting to backend driver (e.g., after save/restore or migrate).

Two main reasons for this: 
 1. The retransmit code is broken for fragmented packets. It would need
    a rewrite to cope with the new scatter-gather format.
 2. We will drop packets anyway, in both directions (e.g., takes some
    time for received packets to be redirected to new virtual interface;
    also further transmitted packets from the network stack are dropped
    after we call netif_carrier_off(), so even if we retransmite what's
    already in the ring it is likely that some subsequent packets will
    already be lost).

If this causes downtimes that are too long (particularly for live
relocation) then the whole strategy for buffering packets while the
frontend-backend connection is severed needs to be considered.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c |   68 +++++--------------
 1 files changed, 20 insertions(+), 48 deletions(-)

diff -r 8070050cc30f -r 1e49997c8146 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Wed Jun 14 
12:36:06 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Wed Jun 14 
13:11:44 2006 +0100
@@ -1072,68 +1072,39 @@ static void xennet_set_features(struct n
 
 static void network_connect(struct net_device *dev)
 {
-       struct netfront_info *np;
+       struct netfront_info *np = netdev_priv(dev);
        int i, requeue_idx;
-       struct netif_tx_request *tx;
        struct sk_buff *skb;
 
        xennet_set_features(dev);
 
-       np = netdev_priv(dev);
        spin_lock_irq(&np->tx_lock);
        spin_lock(&np->rx_lock);
 
-       /* Recovery procedure: */
-
        /*
-        * Step 1: Rebuild the RX and TX ring contents.
-        * NB. We could just free the queued TX packets now but we hope
-        * that sending them out might do some good.  We have to rebuild
-        * the RX ring because some of our pages are currently flipped out
-        * so we can't just free the RX skbs.
-        * NB2. Freelist index entries are always going to be less than
+         * Recovery procedure:
+        *  NB. Freelist index entries are always going to be less than
         *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
-        * greater than PAGE_OFFSET: we use this property to distinguish
-        * them.
-        */
-
-       /*
-        * Rebuild the TX buffer freelist and the TX ring itself.
-        * NB. This reorders packets.  We could keep more private state
-        * to avoid this but maybe it doesn't matter so much given the
-        * interface has been down.
-        */
+        *  greater than PAGE_OFFSET: we use this property to distinguish
+        *  them.
+         */
+
+       /* Step 1: Discard all pending TX packet fragments. */
        for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
                if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
                        continue;
 
                skb = np->tx_skbs[i];
-
-               tx = RING_GET_REQUEST(&np->tx, requeue_idx);
-               requeue_idx++;
-
-               tx->id = i;
-               gnttab_grant_foreign_access_ref(
-                       np->grant_tx_ref[i], np->xbdev->otherend_id,
-                       virt_to_mfn(np->tx_skbs[i]->data),
-                       GNTMAP_readonly);
-               tx->gref = np->grant_tx_ref[i];
-               tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
-               tx->size = skb->len;
-               tx->flags = 0;
-               if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
-                       tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
-               if (skb->proto_data_valid) /* remote but checksummed? */
-                       tx->flags |= NETTXF_data_validated;
-
-               np->stats.tx_bytes += skb->len;
-               np->stats.tx_packets++;
-       }
-
-       np->tx.req_prod_pvt = requeue_idx;
-       RING_PUSH_REQUESTS(&np->tx);
-
-       /* Rebuild the RX buffer freelist and the RX ring itself. */
+               gnttab_end_foreign_access_ref(
+                       np->grant_tx_ref[i], GNTMAP_readonly);
+               gnttab_release_grant_reference(
+                       &np->gref_tx_head, np->grant_tx_ref[i]);
+               np->grant_tx_ref[i] = GRANT_INVALID_REF;
+               add_id_to_freelist(np->tx_skbs, i);
+               dev_kfree_skb_irq(skb);
+       }
+
+       /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
        for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
                if ((unsigned long)np->rx_skbs[i] < PAGE_OFFSET)
                        continue;
@@ -1150,7 +1121,7 @@ static void network_connect(struct net_d
        RING_PUSH_REQUESTS(&np->rx);
 
        /*
-        * Step 2: All public and private state should now be sane.  Get
+        * Step 3: All public and private state should now be sane.  Get
         * ready to start sending and receiving packets and give the driver
         * domain a kick because we've probably just requeued some
         * packets.
@@ -1158,6 +1129,7 @@ static void network_connect(struct net_d
        netif_carrier_on(dev);
        notify_remote_via_irq(np->irq);
        network_tx_buf_gc(dev);
+       network_alloc_rx_buffers(dev);
 
        spin_unlock(&np->rx_lock);
        spin_unlock_irq(&np->tx_lock);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.