diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3f27707..1a6f816 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -776,8 +776,7 @@ static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) vif = list_first_entry(&netbk->net_schedule_list, struct xenvif, schedule_list); - if (!vif) - goto out; + BUG_ON(!vif); xenvif_get(vif); @@ -857,7 +856,9 @@ static void tx_credit_callback(unsigned long data) } static void netbk_tx_err(struct xenvif *vif, - struct xen_netif_tx_request *txp, RING_IDX end) + struct xen_netif_tx_request *txp, + RING_IDX end, + int fatal) { RING_IDX cons = vif->tx.req_cons; @@ -868,8 +869,15 @@ static void netbk_tx_err(struct xenvif *vif, txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); vif->tx.req_cons = cons; - xen_netbk_check_rx_xenvif(vif); + if (!fatal) { + /* If this is a fatal error then we don't need to poll + the device again (and doing so might lead to some + quite bad behaviour if the ring structure has + become corrupted in some way). */ + xen_netbk_check_rx_xenvif(vif); + } xenvif_put(vif); + } static int netbk_count_requests(struct xenvif *vif, @@ -1232,7 +1240,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; + int cntr; + cntr = 0; while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; @@ -1245,15 +1255,43 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) int work_to_do; unsigned int data_len; pending_ring_idx_t index; + int loud; + + cntr++; + loud = 0; + if (cntr % 1000 == 0) { + printk("<0>In xen_netbk_tx_build_gops; done %d iterations so far\n", + cntr); + printk("<0>nr_pending_reqs %d\n", + nr_pending_reqs(netbk)); + loud = 1; + } /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); - if (!vif) + if (!vif) { + /* This can sometimes happen because the test + of list_empty(net_schedule_list) at the top + of the loop is unlocked. Just go back and + have another look. */ + printk("<0>No vif, keep going\n"); continue; + } + + if (loud) { + printk("<0>Ring status: rsp_prod_pvt %x, req_cons %x\n", + vif->tx.rsp_prod_pvt, vif->tx.req_cons); + printk("<0>Shared: req_prod %x, req_event %x, rsp_prod %x, rsp_event %x\n", + vif->tx.sring->req_prod, + vif->tx.sring->req_event, + vif->tx.sring->rsp_prod, + vif->tx.sring->rsp_event); + } RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); + printk("<0>No work on %p\n", vif); continue; } @@ -1264,6 +1302,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && tx_credit_exceeded(vif, txreq.size)) { + printk("<0>Stopped by credit scheduler\n"); xenvif_put(vif); continue; } @@ -1279,14 +1318,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) { - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 1); continue; } } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) { - netbk_tx_err(vif, &txreq, idx - ret); + netbk_tx_err(vif, &txreq, idx - ret, 1); continue; } idx += ret; @@ -1294,7 +1333,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 1); continue; } @@ -1304,7 +1343,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 1); continue; } @@ -1320,7 +1359,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) if (unlikely(skb == NULL)) { netdev_dbg(vif->dev, "Can't allocate a skb in start_xmit.\n"); - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 0); break; } @@ -1333,7 +1372,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) if (netbk_set_skb_gso(vif, skb, gso)) { kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 1); + printk("<0>Killed by netbk_set_skb_gso\n"); continue; } } @@ -1342,7 +1382,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) { kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 0); + printk("<0>No pages for payload?\n"); continue; } @@ -1382,7 +1423,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); + netbk_tx_err(vif, &txreq, idx, 0); continue; } gop = request_gop;