[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 2/4] xen/netfront: don't read data from request on the ring page



In order to avoid a malicious backend being able to influence the local
processing of a request build the request locally first and then copy
it to the ring page. Any reading from the request influencing the
processing in the frontend needs to be done on the local instance.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V2:
- drop local tx variable (Jan Beulich)
- fix oversight of reading value from ring page (Jan Beulich)
---
 drivers/net/xen-netfront.c | 86 +++++++++++++++++++-------------------
 1 file changed, 42 insertions(+), 44 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 003cdf2ffc92..714fe9d2c534 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -435,7 +435,8 @@ struct xennet_gnttab_make_txreq {
        struct netfront_queue *queue;
        struct sk_buff *skb;
        struct page *page;
-       struct xen_netif_tx_request *tx; /* Last request */
+       struct xen_netif_tx_request *tx;      /* Last request on ring page */
+       struct xen_netif_tx_request tx_local; /* Last request local copy*/
        unsigned int size;
 };
 
@@ -463,30 +464,27 @@ static void xennet_tx_setup_grant(unsigned long gfn, 
unsigned int offset,
        queue->grant_tx_page[id] = page;
        queue->grant_tx_ref[id] = ref;
 
-       tx->id = id;
-       tx->gref = ref;
-       tx->offset = offset;
-       tx->size = len;
-       tx->flags = 0;
+       info->tx_local.id = id;
+       info->tx_local.gref = ref;
+       info->tx_local.offset = offset;
+       info->tx_local.size = len;
+       info->tx_local.flags = 0;
+
+       *tx = info->tx_local;
 
        info->tx = tx;
-       info->size += tx->size;
+       info->size += info->tx_local.size;
 }
 
 static struct xen_netif_tx_request *xennet_make_first_txreq(
-       struct netfront_queue *queue, struct sk_buff *skb,
-       struct page *page, unsigned int offset, unsigned int len)
+       struct xennet_gnttab_make_txreq *info,
+       unsigned int offset, unsigned int len)
 {
-       struct xennet_gnttab_make_txreq info = {
-               .queue = queue,
-               .skb = skb,
-               .page = page,
-               .size = 0,
-       };
+       info->size = 0;
 
-       gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
+       gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, 
info);
 
-       return info.tx;
+       return info->tx;
 }
 
 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
@@ -499,35 +497,27 @@ static void xennet_make_one_txreq(unsigned long gfn, 
unsigned int offset,
        xennet_tx_setup_grant(gfn, offset, len, data);
 }
 
-static struct xen_netif_tx_request *xennet_make_txreqs(
-       struct netfront_queue *queue, struct xen_netif_tx_request *tx,
-       struct sk_buff *skb, struct page *page,
+static void xennet_make_txreqs(
+       struct xennet_gnttab_make_txreq *info,
+       struct page *page,
        unsigned int offset, unsigned int len)
 {
-       struct xennet_gnttab_make_txreq info = {
-               .queue = queue,
-               .skb = skb,
-               .tx = tx,
-       };
-
        /* Skip unused frames from start of page */
        page += offset >> PAGE_SHIFT;
        offset &= ~PAGE_MASK;
 
        while (len) {
-               info.page = page;
-               info.size = 0;
+               info->page = page;
+               info->size = 0;
 
                gnttab_foreach_grant_in_range(page, offset, len,
                                              xennet_make_one_txreq,
-                                             &info);
+                                             info);
 
                page++;
                offset = 0;
-               len -= info.size;
+               len -= info->size;
        }
-
-       return info.tx;
 }
 
 /*
@@ -580,10 +570,14 @@ static int xennet_xdp_xmit_one(struct net_device *dev,
 {
        struct netfront_info *np = netdev_priv(dev);
        struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+       struct xennet_gnttab_make_txreq info = {
+               .queue = queue,
+               .skb = NULL,
+               .page = virt_to_page(xdpf->data),
+       };
        int notify;
 
-       xennet_make_first_txreq(queue, NULL,
-                               virt_to_page(xdpf->data),
+       xennet_make_first_txreq(&info,
                                offset_in_page(xdpf->data),
                                xdpf->len);
 
@@ -638,7 +632,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
 {
        struct netfront_info *np = netdev_priv(dev);
        struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
-       struct xen_netif_tx_request *tx, *first_tx;
+       struct xen_netif_tx_request *first_tx;
        unsigned int i;
        int notify;
        int slots;
@@ -647,6 +641,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
        unsigned int len;
        unsigned long flags;
        struct netfront_queue *queue = NULL;
+       struct xennet_gnttab_make_txreq info = { };
        unsigned int num_queues = dev->real_num_tx_queues;
        u16 queue_index;
        struct sk_buff *nskb;
@@ -704,21 +699,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
        }
 
        /* First request for the linear area. */
-       first_tx = tx = xennet_make_first_txreq(queue, skb,
-                                               page, offset, len);
-       offset += tx->size;
+       info.queue = queue;
+       info.skb = skb;
+       info.page = page;
+       first_tx = xennet_make_first_txreq(&info, offset, len);
+       offset += info.tx_local.size;
        if (offset == PAGE_SIZE) {
                page++;
                offset = 0;
        }
-       len -= tx->size;
+       len -= info.tx_local.size;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                /* local packet? */
-               tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
+               first_tx->flags |= XEN_NETTXF_csum_blank |
+                                  XEN_NETTXF_data_validated;
        else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                /* remote but checksummed. */
-               tx->flags |= XEN_NETTXF_data_validated;
+               first_tx->flags |= XEN_NETTXF_data_validated;
 
        /* Optional extra info after the first request. */
        if (skb_shinfo(skb)->gso_size) {
@@ -727,7 +725,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
                gso = (struct xen_netif_extra_info *)
                        RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 
-               tx->flags |= XEN_NETTXF_extra_info;
+               first_tx->flags |= XEN_NETTXF_extra_info;
 
                gso->u.gso.size = skb_shinfo(skb)->gso_size;
                gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
@@ -741,12 +739,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
        }
 
        /* Requests for the rest of the linear area. */
-       tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
+       xennet_make_txreqs(&info, page, offset, len);
 
        /* Requests for all the frags. */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
+               xennet_make_txreqs(&info, skb_frag_page(frag),
                                        skb_frag_off(frag),
                                        skb_frag_size(frag));
        }
-- 
2.26.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.