[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.5] xen/mm: make sure node is less than MAX_NUMNODES



commit e3f076893c48dfb63218f24d0977e43903bc48e1
Author:     George Dunlap <george.dunlap@xxxxxxxxxx>
AuthorDate: Tue Sep 12 15:16:48 2017 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Sep 12 15:16:48 2017 +0200

    xen/mm: make sure node is less than MAX_NUMNODES
    
    The output of MEMF_get_node(memflags) can be as large as nodeid_t can
    hold (currently 255).  This is then used as an index to arrays of size
    MAX_NUMNODE, which is 64 on x86 and 1 on ARM, can be passed in by an
    untrusted guest (via memory_exchange and increase_reservation) and is
    not currently bounds-checked.
    
    Check the value in page_alloc.c before using it, and also check the
    value in the hypercall call sites and return -EINVAL if appropriate.
    Don't permit domains other than the hardware or control domain to
    allocate node-constrained memory.
    
    This is CVE-2017-14316 / XSA-231.
    
    Reported-by: Matthew Daley <mattd@xxxxxxxxxxx>
    Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: 2fece35303529395bfea6b03d2268380ef682c93
    master date: 2017-09-12 14:43:16 +0200
---
 xen/common/memory.c     | 36 ++++++++++++++++++++++++++++++++----
 xen/common/page_alloc.c |  6 +++++-
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index a925f4d..3132cd3 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -383,6 +383,30 @@ static void decrease_reservation(struct memop_args *a)
     a->nr_done = i;
 }
 
+static bool_t propagate_node(unsigned int xmf, unsigned int *memflags)
+{
+    const struct domain *currd = current->domain;
+
+    BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
+
+    if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
+        return 1;
+
+    if ( is_hardware_domain(currd) || is_control_domain(currd) )
+    {
+        if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
+            return 0;
+
+        *memflags |= MEMF_node(XENMEMF_get_node(xmf));
+        if ( xmf & XENMEMF_exact_node_request )
+            *memflags |= MEMF_exact_node;
+    }
+    else if ( xmf & XENMEMF_exact_node_request )
+        return 0;
+
+    return 1;
+}
+
 static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
 {
     struct xen_memory_exchange exch;
@@ -455,6 +479,12 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         }
     }
 
+    if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
+    {
+        rc = -EINVAL;
+        goto fail_early;
+    }
+
     d = rcu_lock_domain_by_any_id(exch.in.domid);
     if ( d == NULL )
     {
@@ -473,7 +503,6 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         d,
         XENMEMF_get_address_bits(exch.out.mem_flags) ? :
         (BITS_PER_LONG+PAGE_SHIFT)));
-    memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
 
     for ( i = (exch.nr_exchanged >> in_chunk_order);
           i < (exch.in.nr_extents >> in_chunk_order);
@@ -814,9 +843,8 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             args.memflags = MEMF_bits(address_bits);
         }
 
-        args.memflags |= MEMF_node(XENMEMF_get_node(reservation.mem_flags));
-        if ( reservation.mem_flags & XENMEMF_exact_node_request )
-            args.memflags |= MEMF_exact_node;
+        if ( unlikely(!propagate_node(reservation.mem_flags, &args.memflags)) )
+            return -EINVAL;
 
         if ( op == XENMEM_populate_physmap
              && (reservation.mem_flags & XENMEMF_populate_on_demand) )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 926a8b2..7d6d20c 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -610,9 +610,13 @@ static struct page_info *alloc_heap_pages(
         if ( node >= MAX_NUMNODES )
             node = cpu_to_node(smp_processor_id());
     }
+    else if ( unlikely(node >= MAX_NUMNODES) )
+    {
+        ASSERT_UNREACHABLE();
+        return NULL;
+    }
     first_node = node;
 
-    ASSERT(node >= 0);
     ASSERT(zone_lo <= zone_hi);
     ASSERT(zone_hi < NR_ZONES);
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.5

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.