[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 13/17] TSO support.



This includes both TSO-send and TSO-receive support.

Signed-off-by: Steven Smith <steven.smith@xxxxxxxxxx>
---
 drivers/net/xen-netchannel2/chan.c             |    3 +-
 drivers/net/xen-netchannel2/netchannel2_core.h |    4 +++
 drivers/net/xen-netchannel2/offload.c          |   33 ++++++++++++++++++++++-
 drivers/net/xen-netchannel2/recv_packet.c      |   19 +++++++++++++
 drivers/net/xen-netchannel2/xmit_packet.c      |    8 ++++++
 include/xen/interface/io/netchannel2.h         |   19 ++++++++++---
 6 files changed, 78 insertions(+), 8 deletions(-)

diff --git a/drivers/net/xen-netchannel2/chan.c 
b/drivers/net/xen-netchannel2/chan.c
index 238e7fe..d5eb26e 100644
--- a/drivers/net/xen-netchannel2/chan.c
+++ b/drivers/net/xen-netchannel2/chan.c
@@ -568,9 +568,10 @@ void nc2_detach_rings(struct netchannel2 *nc)
        nc->rings.irq = -1;
 
        /* Disable all offloads */
-       nc->net_device->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG);
+       nc->net_device->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | 
NETIF_F_TSO);
        nc->allow_tx_csum_offload = 0;
        nc->rings.max_fragments_per_tx_packet = 1;
+       nc->allow_tso = 0;
 }
 
 #if defined(CONFIG_XEN_NETDEV2_BACKEND)
diff --git a/drivers/net/xen-netchannel2/netchannel2_core.h 
b/drivers/net/xen-netchannel2/netchannel2_core.h
index 6197550..b5aa584 100644
--- a/drivers/net/xen-netchannel2/netchannel2_core.h
+++ b/drivers/net/xen-netchannel2/netchannel2_core.h
@@ -260,6 +260,10 @@ struct netchannel2 {
           Whether we actually use TX checksumming is controlled by
           the net device feature bits. */
        uint8_t allow_tx_csum_offload;
+       /* The remote endpoint allows us to use TSO for TCPv4.  As for
+          checksumming, we only actually use the feature if the net
+          device says to. */
+       uint8_t allow_tso;
        /* At some point in the past, we tried to tell the other end
           what our current offload policy is and failed.  Try again
           as soon as possible. */
diff --git a/drivers/net/xen-netchannel2/offload.c 
b/drivers/net/xen-netchannel2/offload.c
index 552b0ad..5e9c8d0 100644
--- a/drivers/net/xen-netchannel2/offload.c
+++ b/drivers/net/xen-netchannel2/offload.c
@@ -6,6 +6,7 @@
 
 static int nc2_set_tx_csum(struct net_device *nd, u32 val);
 static int nc2_set_sg(struct net_device *nd, u32 val);
+static int nc2_set_tso(struct net_device *nd, u32 val);
 
 /* ---------------- Interface to the other domain ----------------------- */
 void nc2_handle_set_offload(struct netchannel2 *nc,
@@ -35,6 +36,11 @@ void nc2_handle_set_offload(struct netchannel2 *nc,
                   manually enable it every time. */
                nc2_set_sg(nc->net_device, msg.csum);
        }
+
+       if (msg.tcpv4_segmentation_offload != nc->allow_tso) {
+               nc->allow_tso = msg.tcpv4_segmentation_offload;
+               nc2_set_tso(nc->net_device, msg.tcpv4_segmentation_offload);
+       }
 }
 
 /* Tell the other end what sort of offloads it's allowed to use. */
@@ -46,6 +52,14 @@ void advertise_offloads(struct netchannel2 *nc)
 
        if (nc2_can_send_payload_bytes(&nc->rings.prod_ring, sizeof(msg))) {
                msg.csum = nc->use_rx_csum;
+               /* We always claim to be able to accept TSO packets,
+                  and don't provide any way of turning it off through
+                  ethtool.  We used to use the LRO flag, but that's
+                  not quite right: receiving an LRO packet and
+                  receiving a TSO one are subtly different, due to
+                  the way they get packed into the skbuff
+                  structure. */
+               msg.tcpv4_segmentation_offload = 1;
                nc2_send_message(&nc->rings.prod_ring,
                                 NETCHANNEL2_MSG_SET_OFFLOAD,
                                 0, &msg, sizeof(msg));
@@ -142,11 +156,26 @@ static int nc2_set_sg(struct net_device *nd, u32 val)
        return 0;
 }
 
+static int nc2_set_tso(struct net_device *nd, u32 val)
+{
+       struct netchannel2 *nc = netdev_priv(nd);
+       /* We only allow ourselves to use TSO if the other end's
+          allowed us to use sufficiently many fragments per
+          packet. */
+       if (val != 0 &&
+           (!nc->allow_tso ||
+            nc->rings.max_fragments_per_tx_packet < MAX_SKB_FRAGS))
+               return -EOPNOTSUPP;
+       return ethtool_op_set_tso(nd, val);
+}
+
 struct ethtool_ops nc2_ethtool_ops = {
        .get_tx_csum = ethtool_op_get_tx_csum,
        .set_tx_csum = nc2_set_tx_csum,
        .get_rx_csum = nc2_get_rx_csum,
        .set_rx_csum = nc2_set_rx_csum,
-       .get_sg      = ethtool_op_get_sg,
-       .set_sg      = nc2_set_sg,
+       .get_sg      = ethtool_op_get_sg,
+       .set_sg      = nc2_set_sg,
+       .get_tso     = ethtool_op_get_tso,
+       .set_tso     = nc2_set_tso
 };
diff --git a/drivers/net/xen-netchannel2/recv_packet.c 
b/drivers/net/xen-netchannel2/recv_packet.c
index 958a3a6..80c5d5d 100644
--- a/drivers/net/xen-netchannel2/recv_packet.c
+++ b/drivers/net/xen-netchannel2/recv_packet.c
@@ -169,6 +169,25 @@ void nc2_handle_packet_msg(struct netchannel2 *nc,
                        break;
                }
 
+               switch (msg.segmentation_type) {
+               case NC2_PACKET_SEGMENTATION_TYPE_none:
+                       break;
+               case NC2_PACKET_SEGMENTATION_TYPE_tcpv4:
+                       if (msg.mss == 0) {
+                               pr_debug("TSO request with mss == 0?\n");
+                               goto err;
+                       }
+                       skb_shinfo(skb)->gso_type =
+                               SKB_GSO_TCPV4 | SKB_GSO_DODGY;
+                       skb_shinfo(skb)->gso_size = msg.mss;
+                       skb_shinfo(skb)->gso_segs = 0;
+                       break;
+               default:
+                       pr_debug("Unknown segmentation offload type %d!\n",
+                                msg.segmentation_type);
+                       goto err;
+               }
+
                __skb_queue_tail(pending_rx_queue, skb);
 
                if (ncrp->pending_rx_hypercalls.nr_pending_gops >=
diff --git a/drivers/net/xen-netchannel2/xmit_packet.c 
b/drivers/net/xen-netchannel2/xmit_packet.c
index a3304f2..4c9e0b5 100644
--- a/drivers/net/xen-netchannel2/xmit_packet.c
+++ b/drivers/net/xen-netchannel2/xmit_packet.c
@@ -102,6 +102,14 @@ static void set_offload_flags(struct sk_buff *skb,
                msg->csum_offset = msg->csum_start + skb->csum_offset;
        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                msg->flags |= NC2_PACKET_FLAG_data_validated;
+
+       if (skb_shinfo(skb)->gso_size != 0) {
+               msg->mss = skb_shinfo(skb)->gso_size;
+               msg->segmentation_type = NC2_PACKET_SEGMENTATION_TYPE_tcpv4;
+       } else {
+               msg->mss = 0;
+               msg->segmentation_type = NC2_PACKET_SEGMENTATION_TYPE_none;
+       }
 }
 
 /* Transmit a packet which has previously been prepared with
diff --git a/include/xen/interface/io/netchannel2.h 
b/include/xen/interface/io/netchannel2.h
index 11bb469..1cca607 100644
--- a/include/xen/interface/io/netchannel2.h
+++ b/include/xen/interface/io/netchannel2.h
@@ -54,13 +54,13 @@ struct netchannel2_msg_packet {
                        packet message. */
        uint8_t type;
        uint8_t flags;
-       uint8_t pad0;
-       uint8_t pad1;
+       uint8_t segmentation_type;
+       uint8_t pad;
        uint16_t prefix_size;
-       uint16_t pad2;
+       uint16_t mss;
        uint16_t csum_start;
        uint16_t csum_offset;
-       /* Variable-size array.  The number of elements is determined
+       /* Variable-size array.  The number of elements is determined
           by the size of the message. */
        struct netchannel2_fragment frags[0];
 };
@@ -112,6 +112,9 @@ struct netchannel2_msg_packet {
 #define NC2_PACKET_TYPE_receiver_copy 1
 #define NC2_PACKET_TYPE_small 4
 
+#define NC2_PACKET_SEGMENTATION_TYPE_none  0
+#define NC2_PACKET_SEGMENTATION_TYPE_tcpv4 1
+
 /* Tell the other end that we're finished with a message it sent us,
    and it can release the transmit buffers etc.         This must be sent in
    response to receiver_copy and receiver_map packets. It must not be
@@ -140,7 +143,13 @@ struct netchannel2_msg_set_offload {
         * the other end does not have to perform the calculation.
         */
        uint8_t csum;
-       uint8_t pad;
+       /* Segmentation offload.  If this is 0, the other end must not
+        * generate any packet messages with a segmentation type other
+        * than NC2_PACKET_SEGMENTATION_TYPE_none.  If it is 1, the
+        * other end may also generate packets with a type of
+        * NC2_PACKET_SEGMENTATION_TYPE_tcpv4.
+        */
+       uint8_t tcpv4_segmentation_offload;
        uint16_t reserved;
 };
 
-- 
1.6.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.