[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] netback: shutdown the ring if it contains garbage


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Tue, 05 Feb 2013 13:33:03 +0000
  • Delivery-date: Tue, 05 Feb 2013 13:33:11 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1360056009 -3600
# Node ID 5108c6901b3057c52c180b589469c68ea9c2121f
# Parent  008c65d4caed95fb356741e20c20b1d63bca5333
netback: shutdown the ring if it contains garbage

A buggy or malicious frontend should not be able to confuse netback.
If we spot anything which is not as it should be then shutdown the
device and don't try to continue with the ring in a potentially
hostile state. Well behaved and non-hostile frontends will not be
penalised.

As well as making the existing checks for such errors fatal also add a
new check that ensures that there isn't an insane number of requests
on the ring (i.e. more than would fit in the ring). If the ring
contains garbage then previously is was possible to loop over this
insane number, getting an error each time and therefore not generating
any more pending requests and therefore not exiting the loop in
xen_netbk_tx_build_gops for an externded period.

Also turn various netdev_dbg calls which no precipitate a fatal error
into netdev_err, they are rate limited because the device is shutdown
afterwards.

This fixes at least one known DoS/softlockup of the backend domain.

This is CVE-2013-0216 / XSA-39.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 008c65d4caed -r 5108c6901b30 drivers/xen/netback/common.h
--- a/drivers/xen/netback/common.h      Wed Jan 30 10:22:04 2013 +0100
+++ b/drivers/xen/netback/common.h      Tue Feb 05 10:20:09 2013 +0100
@@ -204,6 +204,9 @@ int netif_be_start_xmit(struct sk_buff *
 struct net_device_stats *netif_be_get_stats(struct net_device *dev);
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(netif_t *netif);
+
 static inline int netbk_can_queue(struct net_device *dev)
 {
        netif_t *netif = netdev_priv(dev);
diff -r 008c65d4caed -r 5108c6901b30 drivers/xen/netback/interface.c
--- a/drivers/xen/netback/interface.c   Wed Jan 30 10:22:04 2013 +0100
+++ b/drivers/xen/netback/interface.c   Tue Feb 05 10:20:09 2013 +0100
@@ -347,19 +347,23 @@ err_rx:
        return err;
 }
 
+void xenvif_carrier_off(netif_t *netif)
+{
+       rtnl_lock();
+       netback_carrier_off(netif);
+       netif_carrier_off(netif->dev); /* discard queued packets */
+       if (netif_running(netif->dev))
+               __netif_down(netif);
+       rtnl_unlock();
+       netif_put(netif);
+}
+
 void netif_disconnect(struct backend_info *be)
 {
        netif_t *netif = be->netif;
 
-       if (netback_carrier_ok(netif)) {
-               rtnl_lock();
-               netback_carrier_off(netif);
-               netif_carrier_off(netif->dev); /* discard queued packets */
-               if (netif_running(netif->dev))
-                       __netif_down(netif);
-               rtnl_unlock();
-               netif_put(netif);
-       }
+       if (netback_carrier_ok(netif))
+               xenvif_carrier_off(netif);
 
        atomic_dec(&netif->refcnt);
        wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
diff -r 008c65d4caed -r 5108c6901b30 drivers/xen/netback/netback.c
--- a/drivers/xen/netback/netback.c     Wed Jan 30 10:22:04 2013 +0100
+++ b/drivers/xen/netback/netback.c     Tue Feb 05 10:20:09 2013 +0100
@@ -1020,6 +1020,14 @@ static void netbk_tx_err(netif_t *netif,
        netif_put(netif);
 }
 
+static void netbk_fatal_tx_err(netif_t *netif)
+{
+       printk(KERN_ERR "%s: fatal error; disabling device\n",
+              netif->dev->name);
+       xenvif_carrier_off(netif);
+       netif_put(netif);
+}
+
 static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
                                netif_tx_request_t *txp, int work_to_do)
 {
@@ -1031,19 +1039,25 @@ static int netbk_count_requests(netif_t 
 
        do {
                if (frags >= work_to_do) {
-                       DPRINTK("Need more frags\n");
+                       printk(KERN_ERR "%s: Need more frags\n",
+                              netif->dev->name);
+                       netbk_fatal_tx_err(netif);
                        return -frags;
                }
 
                if (unlikely(frags >= MAX_SKB_FRAGS)) {
-                       DPRINTK("Too many frags\n");
+                       printk(KERN_ERR "%s: Too many frags\n",
+                              netif->dev->name);
+                       netbk_fatal_tx_err(netif);
                        return -frags;
                }
 
                memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
                       sizeof(*txp));
                if (txp->size > first->size) {
-                       DPRINTK("Frags galore\n");
+                       printk(KERN_ERR "%s: Frag is bigger than frame.\n",
+                              netif->dev->name);
+                       netbk_fatal_tx_err(netif);
                        return -frags;
                }
 
@@ -1051,8 +1065,9 @@ static int netbk_count_requests(netif_t 
                frags++;
 
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-                       DPRINTK("txp->offset: %x, size: %u\n",
-                               txp->offset, txp->size);
+                       printk(KERN_ERR "%s: txp->offset: %x, size: %u\n",
+                              netif->dev->name, txp->offset, txp->size);
+                       netbk_fatal_tx_err(netif);
                        return -frags;
                }
        } while ((txp++)->flags & NETTXF_more_data);
@@ -1195,7 +1210,9 @@ int netbk_get_extras(netif_t *netif, str
 
        do {
                if (unlikely(work_to_do-- <= 0)) {
-                       DPRINTK("Missing extra info\n");
+                       printk(KERN_ERR "%s: Missing extra info\n",
+                              netif->dev->name);
+                       netbk_fatal_tx_err(netif);
                        return -EBADR;
                }
 
@@ -1204,7 +1221,9 @@ int netbk_get_extras(netif_t *netif, str
                if (unlikely(!extra.type ||
                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
                        netif->tx.req_cons = ++cons;
-                       DPRINTK("Invalid extra type: %d\n", extra.type);
+                       printk(KERN_ERR "%s: Invalid extra type: %d\n",
+                              netif->dev->name, extra.type);
+                       netbk_fatal_tx_err(netif);
                        return -EINVAL;
                }
 
@@ -1215,16 +1234,21 @@ int netbk_get_extras(netif_t *netif, str
        return work_to_do;
 }
 
-static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
+static int netbk_set_skb_gso(netif_t *netif, struct sk_buff *skb,
+                            struct netif_extra_info *gso)
 {
        if (!gso->u.gso.size) {
-               DPRINTK("GSO size must not be zero.\n");
+               printk(KERN_ERR "%s: GSO size must not be zero.\n",
+                      netif->dev->name);
+               netbk_fatal_tx_err(netif);
                return -EINVAL;
        }
 
        /* Currently only TCPv4 S.O. is supported. */
        if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-               DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
+               printk(KERN_ERR "%s: Bad GSO type %d.\n",
+                      netif->dev->name, gso->u.gso.type);
+               netbk_fatal_tx_err(netif);
                return -EINVAL;
        }
 
@@ -1259,9 +1283,25 @@ static void net_tx_action(unsigned long 
                !list_empty(&net_schedule_list)) {
                /* Get a netif from the list with work to do. */
                netif = poll_net_schedule_list();
+               /*
+                * This can sometimes happen because the test of
+                * list_empty(net_schedule_list) at the top of the
+                * loop is unlocked.  Just go back and have another
+                * look.
+                */
                if (!netif)
                        continue;
 
+               if (netif->tx.sring->req_prod - netif->tx.req_cons >
+                   NET_TX_RING_SIZE) {
+                       printk(KERN_ERR "%s: Impossible number of requests. "
+                              "req_prod %u, req_cons %u, size %lu\n",
+                              netif->dev->name, netif->tx.sring->req_prod,
+                              netif->tx.req_cons, NET_TX_RING_SIZE);
+                       netbk_fatal_tx_err(netif);
+                       continue;
+               }
+
                RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
                if (!work_to_do) {
                        netif_put(netif);
@@ -1313,17 +1353,14 @@ static void net_tx_action(unsigned long 
                        work_to_do = netbk_get_extras(netif, extras,
                                                      work_to_do);
                        i = netif->tx.req_cons;
-                       if (unlikely(work_to_do < 0)) {
-                               netbk_tx_err(netif, &txreq, i);
+                       if (unlikely(work_to_do < 0))
                                continue;
-                       }
                }
 
                ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
-               if (unlikely(ret < 0)) {
-                       netbk_tx_err(netif, &txreq, i - ret);
+               if (unlikely(ret < 0))
                        continue;
-               }
+
                i += ret;
 
                if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1334,10 +1371,10 @@ static void net_tx_action(unsigned long 
 
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-                       DPRINTK("txreq.offset: %x, size: %u, end: %lu\n", 
-                               txreq.offset, txreq.size, 
-                               (txreq.offset &~PAGE_MASK) + txreq.size);
-                       netbk_tx_err(netif, &txreq, i);
+                       printk(KERN_ERR "%s: txreq.offset: %x, size: %u, end: 
%lu\n",
+                              netif->dev->name, txreq.offset, txreq.size,
+                              (txreq.offset & ~PAGE_MASK) + txreq.size);
+                       netbk_fatal_tx_err(netif);
                        continue;
                }
 
@@ -1362,9 +1399,9 @@ static void net_tx_action(unsigned long 
                        struct netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 
-                       if (netbk_set_skb_gso(skb, gso)) {
+                       if (netbk_set_skb_gso(netif, skb, gso)) {
+                               /* Failure in netbk_set_skb_gso is fatal. */
                                kfree_skb(skb);
-                               netbk_tx_err(netif, &txreq, i);
                                continue;
                        }
                }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.