drivers/xen/*net*: use skb_is_gso() Suggested-by: Paul Durrant Signed-off-by: Jan Beulich --- a/drivers/xen/netback/netback.c +++ b/drivers/xen/netback/netback.c @@ -322,7 +322,7 @@ int netif_be_start_xmit(struct sk_buff * } netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 + - !!skb_shinfo(skb)->gso_size; + !!skb_is_gso(skb); netif_get(netif); if (netbk_can_queue(dev) && netbk_queue_full(netif)) { --- a/drivers/xen/netfront/netfront.c +++ b/drivers/xen/netfront/netfront.c @@ -1019,7 +1019,7 @@ static int network_start_xmit(struct sk_ #endif #if HAVE_TSO - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); --- a/drivers/xen/sfc_netfront/accel_vi.c +++ b/drivers/xen/sfc_netfront/accel_vi.c @@ -676,13 +676,11 @@ netfront_accel_vi_tx_post(netfront_accel VPRINTK("%s: %d bytes, gso %d\n", __FUNCTION__, skb->len, skb_shinfo(skb)->gso_size); - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) return netfront_accel_enqueue_skb_tso(vnic, skb); - } - if (skb->len <= NETFRONT_ACCEL_TX_BUF_LENGTH) { + if (skb->len <= NETFRONT_ACCEL_TX_BUF_LENGTH) return netfront_accel_enqueue_skb_single(vnic, skb); - } return netfront_accel_enqueue_skb_multi(vnic, skb); }