[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [NET] back: Transmit TSO packets if supported



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 637fa5352fad1490153638e34c4ff0705fb9b8e8
# Parent  485616ab73e380e11791fd43d1d90133fff8c8a0
[NET] back: Transmit TSO packets if supported

This patch adds TSO transmission support to the backend.

Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>

Disable for now, as domU->dom0 direction.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/netback/interface.c |   14 +++
 linux-2.6-xen-sparse/drivers/xen/netback/netback.c   |   75 +++++++++++++------
 linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c    |   10 ++
 3 files changed, 78 insertions(+), 21 deletions(-)

diff -r 485616ab73e3 -r 637fa5352fad 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Mon Jul 31 
17:45:22 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Mon Jul 31 
17:49:36 2006 +0100
@@ -84,12 +84,26 @@ static int netbk_set_sg(struct net_devic
        return ethtool_op_set_sg(dev, data);
 }
 
+static int netbk_set_tso(struct net_device *dev, u32 data)
+{
+       if (data) {
+               netif_t *netif = netdev_priv(dev);
+
+               if (!(netif->features & NETIF_F_TSO))
+                       return -ENOSYS;
+       }
+
+       return ethtool_op_set_tso(dev, data);
+}
+
 static struct ethtool_ops network_ethtool_ops =
 {
        .get_tx_csum = ethtool_op_get_tx_csum,
        .set_tx_csum = ethtool_op_set_tx_csum,
        .get_sg = ethtool_op_get_sg,
        .set_sg = netbk_set_sg,
+       .get_tso = ethtool_op_get_tso,
+       .set_tso = netbk_set_tso,
        .get_link = ethtool_op_get_link,
 };
 
diff -r 485616ab73e3 -r 637fa5352fad 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Jul 31 
17:45:22 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Jul 31 
17:49:36 2006 +0100
@@ -50,12 +50,12 @@ static void make_tx_response(netif_t *ne
 static void make_tx_response(netif_t *netif, 
                             netif_tx_request_t *txp,
                             s8       st);
-static int  make_rx_response(netif_t *netif, 
-                            u16      id, 
-                            s8       st,
-                            u16      offset,
-                            u16      size,
-                            u16      flags);
+static netif_rx_response_t *make_rx_response(netif_t *netif, 
+                                            u16      id, 
+                                            s8       st,
+                                            u16      offset,
+                                            u16      size,
+                                            u16      flags);
 
 static void net_tx_action(unsigned long unused);
 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
@@ -225,9 +225,9 @@ static inline int netbk_queue_full(netif
 {
        RING_IDX peek = netif->rx_req_cons_peek;
 
-       return ((netif->rx.sring->req_prod - peek) <= MAX_SKB_FRAGS) ||
+       return ((netif->rx.sring->req_prod - peek) <= (MAX_SKB_FRAGS + 1)) ||
               ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) <=
-               MAX_SKB_FRAGS);
+               (MAX_SKB_FRAGS + 1));
 }
 
 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -265,12 +265,13 @@ int netif_be_start_xmit(struct sk_buff *
                skb = nskb;
        }
 
-       netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1;
+       netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
+                                  !!skb_shinfo(skb)->gso_size;
        netif_get(netif);
 
        if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
                netif->rx.sring->req_event = netif->rx_req_cons_peek +
-                       MAX_SKB_FRAGS + 1;
+                       MAX_SKB_FRAGS + 2;
                mb(); /* request notification /then/ check & stop the queue */
                if (netbk_queue_full(netif))
                        netif_stop_queue(dev);
@@ -347,11 +348,16 @@ static void netbk_gop_skb(struct sk_buff
        netif_t *netif = netdev_priv(skb->dev);
        int nr_frags = skb_shinfo(skb)->nr_frags;
        int i;
+       int extra;
+
+       meta[count].frag.page_offset = skb_shinfo(skb)->gso_type;
+       meta[count].frag.size = skb_shinfo(skb)->gso_size;
+       extra = !!meta[count].frag.size + 1;
 
        for (i = 0; i < nr_frags; i++) {
                meta[++count].frag = skb_shinfo(skb)->frags[i];
                meta[count].id = netbk_gop_frag(netif, meta[count].frag.page,
-                                               count, i + 1);
+                                               count, i + extra);
        }
 
        /*
@@ -361,7 +367,7 @@ static void netbk_gop_skb(struct sk_buff
        meta[count - nr_frags].id = netbk_gop_frag(netif,
                                                   virt_to_page(skb->data),
                                                   count - nr_frags, 0);
-       netif->rx.req_cons += nr_frags + 1;
+       netif->rx.req_cons += nr_frags + extra;
 }
 
 static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
@@ -422,6 +428,8 @@ static void net_rx_action(unsigned long 
        netif_t *netif = NULL; 
        s8 status;
        u16 id, irq, flags;
+       netif_rx_response_t *resp;
+       struct netif_extra_info *extra;
        multicall_entry_t *mcl;
        struct sk_buff_head rxq;
        struct sk_buff *skb;
@@ -511,8 +519,33 @@ static void net_rx_action(unsigned long 
                else if (skb->proto_data_valid) /* remote but checksummed? */
                        flags |= NETRXF_data_validated;
 
-               make_rx_response(netif, id, status, offset_in_page(skb->data),
-                                skb_headlen(skb), flags);
+               resp = make_rx_response(netif, id, status,
+                                       offset_in_page(skb->data),
+                                       skb_headlen(skb), flags);
+
+               extra = NULL;
+
+               if (meta[count].frag.size) {
+                       struct netif_extra_info *gso =
+                               (struct netif_extra_info *)
+                               RING_GET_RESPONSE(&netif->rx,
+                                                 netif->rx.rsp_prod_pvt++);
+
+                       if (extra)
+                               extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+                       else
+                               resp->flags |= NETRXF_extra_info;
+
+                       gso->u.gso.size = meta[count].frag.size;
+                       gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+                       gso->u.gso.pad = 0;
+                       gso->u.gso.features = 0;
+
+                       gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
+                       gso->flags = 0;
+                       extra = gso;
+               }
+
                netbk_add_frag_responses(netif, status, meta + count + 1,
                                         nr_frags);
 
@@ -1190,12 +1223,12 @@ static void make_tx_response(netif_t *ne
 #endif
 }
 
-static int make_rx_response(netif_t *netif, 
-                           u16      id, 
-                           s8       st,
-                           u16      offset,
-                           u16      size,
-                           u16      flags)
+static netif_rx_response_t *make_rx_response(netif_t *netif, 
+                                            u16      id, 
+                                            s8       st,
+                                            u16      offset,
+                                            u16      size,
+                                            u16      flags)
 {
        RING_IDX i = netif->rx.rsp_prod_pvt;
        netif_rx_response_t *resp;
@@ -1210,7 +1243,7 @@ static int make_rx_response(netif_t *net
 
        netif->rx.rsp_prod_pvt = ++i;
 
-       return 0;
+       return resp;
 }
 
 #ifdef NETBE_DEBUG_INTERRUPT
diff -r 485616ab73e3 -r 637fa5352fad 
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Jul 31 17:45:22 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Jul 31 17:49:36 
2006 +0100
@@ -384,6 +384,16 @@ static int connect_rings(struct backend_
                be->netif->dev->features |= NETIF_F_SG;
        }
 
+#if 0 /* KAF: After the protocol is finalised. */
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
+                        &val) < 0)
+               val = 0;
+       if (val) {
+               be->netif->features |= NETIF_F_TSO;
+               be->netif->dev->features |= NETIF_F_TSO;
+       }
+#endif
+
        /* Map the shared frame, irq etc. */
        err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
        if (err) {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.