[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH net-next v4 6/9] xen-netback: Handle guests with too many frags



On Tue, Jan 14, 2014 at 08:39:52PM +0000, Zoltan Kiss wrote:
[...]
>       /* Skip first skb fragment if it is on same page as header fragment. */
> @@ -832,6 +851,29 @@ static struct gnttab_map_grant_ref 
> *xenvif_get_requests(struct xenvif *vif,
>  
>       BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
>  
> +     if (frag_overflow) {
> +             struct sk_buff *nskb = xenvif_alloc_skb(0);
> +             if (unlikely(nskb == NULL)) {
> +                     netdev_err(vif->dev,
> +                                "Can't allocate the frag_list skb.\n");

This, and other occurences of netdev_* logs need to be rate limit.
Otherwise you risk flooding kernel log when system is under memory
pressure.

> +                     return NULL;
> +             }
> +
> +             shinfo = skb_shinfo(nskb);
> +             frags = shinfo->frags;
> +
> +             for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
> +                  shinfo->nr_frags++, txp++, gop++) {
> +                     index = pending_index(vif->pending_cons++);
> +                     pending_idx = vif->pending_ring[index];
> +                     xenvif_tx_create_gop(vif, pending_idx, txp, gop);
> +                     frag_set_pending_idx(&frags[shinfo->nr_frags],
> +                                          pending_idx);
> +             }
> +
> +             skb_shinfo(skb)->frag_list = nskb;
> +     }
> +
>       return gop;
>  }
>  
[...]
> @@ -1537,6 +1613,32 @@ static int xenvif_tx_submit(struct xenvif *vif)
>                                 pending_idx :
>                                 INVALID_PENDING_IDX);
>  
> +             if (skb_shinfo(skb)->frag_list) {
> +                     nskb = skb_shinfo(skb)->frag_list;
> +                     xenvif_fill_frags(vif, nskb, INVALID_PENDING_IDX);
> +                     skb->len += nskb->len;
> +                     skb->data_len += nskb->len;
> +                     skb->truesize += nskb->truesize;
> +                     skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
> +                     skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
> +                     vif->tx_zerocopy_sent += 2;
> +                     nskb = skb;
> +
> +                     skb = skb_copy_expand(skb,
> +                                           0,
> +                                           0,
> +                                           GFP_ATOMIC | __GFP_NOWARN);
> +                     if (!skb) {
> +                             netdev_dbg(vif->dev,
> +                                        "Can't consolidate skb with too many 
> fragments\n");

Rate limit.

> +                             if (skb_shinfo(nskb)->destructor_arg)
> +                                     skb_shinfo(nskb)->tx_flags |=
> +                                             SKBTX_DEV_ZEROCOPY;

Why is this needed? nskb is the saved pointer to original skb, which has
already had SKBTX_DEV_ZEROCOPY in tx_flags. Did I miss something?

Wei.

> +                             kfree_skb(nskb);
> +                             continue;
> +                     }
> +                     skb_shinfo(skb)->destructor_arg = NULL;
> +             }
>               if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
>                       int target = min_t(int, skb->len, PKT_PROT_LEN);
>                       __pskb_pull_tail(skb, target - skb_headlen(skb));
> @@ -1590,6 +1692,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
>               }
>  
>               netif_receive_skb(skb);
> +
> +             if (nskb)
> +                     kfree_skb(nskb);
>       }
>  
>       return work_done;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.