[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] netback: support multiple extra info fragments passed from frontend
# HG changeset patch # User Paul Durrant <paul.durrant@xxxxxxxxxx> # Date 1464185023 -7200 # Wed May 25 16:03:43 2016 +0200 # Node ID 4437d405cfd7099ea45ac2028338990730c24be0 # Parent d98fd9aa81eaf7b0108fc1b2bd1d7bbd248b0e81 netback: support multiple extra info fragments passed from frontend The code does not currently support a frontend passing multiple extra info fragments to the backend in a tx request. The xenvif_get_extras() function handles multiple extra_info fragments but make_tx_response() assumes there is only ever a single extra info fragment. This patch modifies xenvif_get_extras() to pass back a count of extra info fragments, which is then passed to make_tx_response() (after possibly being stashed in pending_tx_info for deferred responses). Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> Adjust for legacy netback. Clear extra_count at the end of netbk_tx_err()'s loop body. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Committed-by: Jan Beulich <jbeulich@xxxxxxxx> --- diff -r d98fd9aa81ea -r 4437d405cfd7 drivers/xen/netback/netback.c --- a/drivers/xen/netback/netback.c Wed May 25 16:01:00 2016 +0200 +++ b/drivers/xen/netback/netback.c Wed May 25 16:03:43 2016 +0200 @@ -58,7 +58,8 @@ struct netbk_tx_pending_inuse { static void netif_idx_release(u16 pending_idx); static void make_tx_response(netif_t *netif, netif_tx_request_t *txp, - s8 st); + s8 st, + unsigned int extra_count); static netif_rx_response_t *make_rx_response(netif_t *netif, u16 id, s8 st, @@ -120,6 +121,7 @@ static inline int netif_page_index(struc static struct pending_tx_info { netif_tx_request_t req; + unsigned int extra_count; netif_t *netif; } pending_tx_info[MAX_PENDING_REQS]; static u16 pending_ring[MAX_PENDING_REQS]; @@ -991,7 +993,8 @@ inline static void net_tx_action_dealloc netif = pending_tx_info[pending_idx].netif; make_tx_response(netif, &pending_tx_info[pending_idx].req, - NETIF_RSP_OKAY); + NETIF_RSP_OKAY, + pending_tx_info[pending_idx].extra_count); /* Ready for next use. */ gnttab_reset_grant_page(mmap_pages[pending_idx]); @@ -1004,15 +1007,17 @@ inline static void net_tx_action_dealloc } } -static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end) +static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end, + unsigned int extra_count) { RING_IDX cons = netif->tx.req_cons; do { - make_tx_response(netif, txp, NETIF_RSP_ERROR); + make_tx_response(netif, txp, NETIF_RSP_ERROR, extra_count); if (cons == end) break; txp = RING_GET_REQUEST(&netif->tx, cons++); + extra_count = 0; } while (1); netif->tx.req_cons = cons; netif_schedule_work(netif); @@ -1030,7 +1035,8 @@ static void netbk_fatal_tx_err(netif_t * } static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first, - netif_tx_request_t *txp, int work_to_do) + netif_tx_request_t *txp, + unsigned int extra_count, int work_to_do) { RING_IDX cons = netif->tx.req_cons; int frags = 0, drop_err = 0; @@ -1086,7 +1092,7 @@ static int netbk_count_requests(netif_t } while ((txp++)->flags & NETTXF_more_data); if (drop_err) { - netbk_tx_err(netif, first, cons + frags); + netbk_tx_err(netif, first, cons + frags, extra_count); return drop_err; } @@ -1116,6 +1122,7 @@ static gnttab_map_grant_ref_t *netbk_get memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); netif_get(netif); pending_tx_info[pending_idx].netif = netif; + pending_tx_info[pending_idx].extra_count = 0; frags[i].page = (void *)pending_idx; } @@ -1129,6 +1136,7 @@ static int netbk_tx_check_mop(struct sk_ int pending_idx = *((u16 *)skb->data); netif_t *netif = pending_tx_info[pending_idx].netif; netif_tx_request_t *txp; + unsigned int extra_count; struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i, err, start; @@ -1137,7 +1145,8 @@ static int netbk_tx_check_mop(struct sk_ err = mop->status; if (unlikely(err != GNTST_okay)) { txp = &pending_tx_info[pending_idx].req; - make_tx_response(netif, txp, NETIF_RSP_ERROR); + extra_count = pending_tx_info[pending_idx].extra_count; + make_tx_response(netif, txp, NETIF_RSP_ERROR, extra_count); pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; netif_put(netif); } else { @@ -1168,7 +1177,7 @@ static int netbk_tx_check_mop(struct sk_ /* Error on this fragment: respond to client with an error. */ txp = &pending_tx_info[pending_idx].req; - make_tx_response(netif, txp, NETIF_RSP_ERROR); + make_tx_response(netif, txp, NETIF_RSP_ERROR, 0); pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; netif_put(netif); @@ -1220,8 +1229,8 @@ static void netbk_fill_frags(struct sk_b } } -int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras, - int work_to_do) +static int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras, + unsigned int *extra_count, int work_to_do) { struct netif_extra_info extra; RING_IDX cons = netif->tx.req_cons; @@ -1237,9 +1246,12 @@ int netbk_get_extras(netif_t *netif, str memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons), sizeof(extra)); barrier(); + + netif->tx.req_cons = ++cons; + ++*extra_count; + if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { - netif->tx.req_cons = ++cons; printk(KERN_ERR "%s: Invalid extra type: %d\n", netif->dev->name, extra.type); netbk_fatal_tx_err(netif); @@ -1247,7 +1259,6 @@ int netbk_get_extras(netif_t *netif, str } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); - netif->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; @@ -1300,6 +1311,8 @@ static void net_tx_action(unsigned long mop = tx_map_ops; while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&net_schedule_list)) { + unsigned int extra_count = 0; + /* Get a netif from the list with work to do. */ netif = poll_net_schedule_list(); /* @@ -1375,14 +1388,15 @@ static void net_tx_action(unsigned long memset(extras, 0, sizeof(extras)); if (txreq.flags & NETTXF_extra_info) { - work_to_do = netbk_get_extras(netif, extras, + work_to_do = netbk_get_extras(netif, extras, &extra_count, work_to_do); i = netif->tx.req_cons; if (unlikely(work_to_do < 0)) continue; } - ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do); + ret = netbk_count_requests(netif, &txreq, txfrags, extra_count, + work_to_do); if (unlikely(ret < 0)) continue; @@ -1390,7 +1404,7 @@ static void net_tx_action(unsigned long if (unlikely(txreq.size < ETH_HLEN)) { DPRINTK("Bad packet size: %d\n", txreq.size); - netbk_tx_err(netif, &txreq, i); + netbk_tx_err(netif, &txreq, i, extra_count); continue; } @@ -1413,7 +1427,7 @@ static void net_tx_action(unsigned long GFP_ATOMIC | __GFP_NOWARN); if (unlikely(skb == NULL)) { DPRINTK("Can't allocate a skb in start_xmit.\n"); - netbk_tx_err(netif, &txreq, i); + netbk_tx_err(netif, &txreq, i, extra_count); break; } @@ -1439,6 +1453,7 @@ static void net_tx_action(unsigned long memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); pending_tx_info[pending_idx].netif = netif; + pending_tx_info[pending_idx].extra_count = extra_count; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); @@ -1598,7 +1613,8 @@ irqreturn_t netif_be_int(int irq, void * static void make_tx_response(netif_t *netif, netif_tx_request_t *txp, - s8 st) + s8 st, + unsigned int extra_count) { RING_IDX i = netif->tx.rsp_prod_pvt; netif_tx_response_t *resp; @@ -1608,7 +1624,7 @@ static void make_tx_response(netif_t *ne resp->id = txp->id; resp->status = st; - if (txp->flags & NETTXF_extra_info) + while (extra_count--) RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL; netif->tx.rsp_prod_pvt = ++i; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |