[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [11/11] [NET] back: Transmit TSO packets if supported
Hi: [NET] back: Transmit TSO packets if supported This patch adds TSO transmission support to the backend. Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Cheers, -- Visit Openswan at http://www.openswan.org/ Email: Herbert Xu ~{PmV>HI~} <herbert@xxxxxxxxxxxxxxxxxxx> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt -- diff -r 5861968091dd -r 51252bc644da linux-2.6-xen-sparse/drivers/xen/netback/interface.c --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Fri Jul 07 23:38:54 2006 +1000 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Fri Jul 07 23:38:57 2006 +1000 @@ -90,12 +90,26 @@ static int netbk_set_sg(struct net_devic return ethtool_op_set_sg(dev, data); } +static int netbk_set_tso(struct net_device *dev, u32 data) +{ + if (data) { + netif_t *netif = netdev_priv(dev); + + if (!(netif->features & NETIF_F_TSO)) + return -ENOSYS; + } + + return ethtool_op_set_tso(dev, data); +} + static struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = netbk_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = netbk_set_tso, }; netif_t *netif_alloc(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]) diff -r 5861968091dd -r 51252bc644da linux-2.6-xen-sparse/drivers/xen/netback/netback.c --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Fri Jul 07 23:38:54 2006 +1000 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Fri Jul 07 23:38:57 2006 +1000 @@ -50,12 +50,12 @@ static void make_tx_response(netif_t *ne static void make_tx_response(netif_t *netif, netif_tx_request_t *txp, s8 st); -static int make_rx_response(netif_t *netif, - u16 id, - s8 st, - u16 offset, - u16 size, - u16 flags); +static netif_rx_response_t *make_rx_response(netif_t *netif, + u16 id, + s8 st, + u16 offset, + u16 size, + u16 flags); static void net_tx_action(unsigned long unused); static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0); @@ -225,9 +225,9 @@ static inline int netbk_queue_full(netif { RING_IDX peek = netif->rx_req_cons_peek; - return netif->rx.sring->req_prod - peek <= MAX_SKB_FRAGS || + return netif->rx.sring->req_prod - peek <= MAX_SKB_FRAGS + 1 || netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek <= - MAX_SKB_FRAGS; + MAX_SKB_FRAGS + 1; } int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -263,7 +263,8 @@ int netif_be_start_xmit(struct sk_buff * skb = nskb; } - netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1; + netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 + + !!skb_shinfo(skb)->gso_size; netif_get(netif); if (netbk_can_queue(dev) && netbk_queue_full(netif)) @@ -340,11 +341,16 @@ static void netbk_gop_skb(struct sk_buff netif_t *netif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; int i; + int extra; + + meta[count].frag.page_offset = skb_shinfo(skb)->gso_type; + meta[count].frag.size = skb_shinfo(skb)->gso_size; + extra = !!meta[count].frag.size + 1; for (i = 0; i < nr_frags; i++) { meta[++count].frag = skb_shinfo(skb)->frags[i]; meta[count].id = netbk_gop_frag(netif, meta[count].frag.page, - count, i + 1); + count, i + extra); } /* @@ -354,7 +360,7 @@ static void netbk_gop_skb(struct sk_buff meta[count - nr_frags].id = netbk_gop_frag(netif, virt_to_page(skb->data), count - nr_frags, 0); - netif->rx.req_cons += nr_frags + 1; + netif->rx.req_cons += nr_frags + extra; } static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta) @@ -415,6 +421,8 @@ static void net_rx_action(unsigned long netif_t *netif = NULL; s8 status; u16 id, irq, flags; + netif_rx_response_t *resp; + struct netif_extra_info *extra; multicall_entry_t *mcl; struct sk_buff_head rxq; struct sk_buff *skb; @@ -504,8 +512,33 @@ static void net_rx_action(unsigned long else if (skb->proto_data_valid) /* remote but checksummed? */ flags |= NETRXF_data_validated; - make_rx_response(netif, id, status, offset_in_page(skb->data), - skb_headlen(skb), flags); + resp = make_rx_response(netif, id, status, + offset_in_page(skb->data), + skb_headlen(skb), flags); + + extra = NULL; + + if (meta[count].frag.size) { + struct netif_extra_info *gso = + (struct netif_extra_info *) + RING_GET_RESPONSE(&netif->rx, + netif->rx.rsp_prod_pvt++); + + if (extra) + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; + else + resp->flags |= NETRXF_extra_info; + + gso->u.gso.size = meta[count].frag.size; + gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.pad = 0; + gso->u.gso.features = 0; + + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; + gso->flags = 0; + extra = gso; + } + netbk_add_frag_responses(netif, status, meta + count + 1, nr_frags); @@ -1182,12 +1215,12 @@ static void make_tx_response(netif_t *ne #endif } -static int make_rx_response(netif_t *netif, - u16 id, - s8 st, - u16 offset, - u16 size, - u16 flags) +static netif_rx_response_t *make_rx_response(netif_t *netif, + u16 id, + s8 st, + u16 offset, + u16 size, + u16 flags) { RING_IDX i = netif->rx.rsp_prod_pvt; netif_rx_response_t *resp; @@ -1202,7 +1235,7 @@ static int make_rx_response(netif_t *net netif->rx.rsp_prod_pvt = ++i; - return 0; + return resp; } #ifdef NETBE_DEBUG_INTERRUPT diff -r 5861968091dd -r 51252bc644da linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Jul 07 23:38:54 2006 +1000 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Jul 07 23:38:57 2006 +1000 @@ -384,6 +384,14 @@ static int connect_rings(struct backend_ be->netif->dev->features |= NETIF_F_SG; } + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", + &val) < 0) + val = 0; + if (val) { + be->netif->features |= NETIF_F_TSO; + be->netif->dev->features |= NETIF_F_TSO; + } + /* Map the shared frame, irq etc. */ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn); if (err) { _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |