[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] timers: limit heap size



First and foremost make timer_softirq_action() avoid growing the heap
if its new size can't be stored without truncation. 64k entries is a
lot, and I don't think we're at risk of actually running into the issue,
but I also think it's better not to allow for hard to debug problems to
occur in the first place.

Furthermore also adjust the code such the size/limit fields becoming
unsigned int would at least work from a mere sizing point of view. For
this also switch various uses of plain int to unsigned int.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Log (once) when heap limit would have been exceeded.

--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -63,9 +63,9 @@ static struct heap_metadata *heap_metada
 }
 
 /* Sink down element @pos of @heap. */
-static void down_heap(struct timer **heap, int pos)
+static void down_heap(struct timer **heap, unsigned int pos)
 {
-    int sz = heap_metadata(heap)->size, nxt;
+    unsigned int sz = heap_metadata(heap)->size, nxt;
     struct timer *t = heap[pos];
 
     while ( (nxt = (pos << 1)) <= sz )
@@ -84,7 +84,7 @@ static void down_heap(struct timer **hea
 }
 
 /* Float element @pos up @heap. */
-static void up_heap(struct timer **heap, int pos)
+static void up_heap(struct timer **heap, unsigned int pos)
 {
     struct timer *t = heap[pos];
 
@@ -103,8 +103,8 @@ static void up_heap(struct timer **heap,
 /* Delete @t from @heap. Return TRUE if new top of heap. */
 static int remove_from_heap(struct timer **heap, struct timer *t)
 {
-    int sz = heap_metadata(heap)->size;
-    int pos = t->heap_offset;
+    unsigned int sz = heap_metadata(heap)->size;
+    unsigned int pos = t->heap_offset;
 
     if ( unlikely(pos == sz) )
     {
@@ -130,7 +130,7 @@ static int remove_from_heap(struct timer
 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
 static int add_to_heap(struct timer **heap, struct timer *t)
 {
-    int sz = heap_metadata(heap)->size;
+    unsigned int sz = heap_metadata(heap)->size;
 
     /* Fail if the heap is full. */
     if ( unlikely(sz == heap_metadata(heap)->limit) )
@@ -463,9 +463,17 @@ static void timer_softirq_action(void)
     if ( unlikely(ts->list != NULL) )
     {
         /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
-        int old_limit = heap_metadata(heap)->limit;
-        int new_limit = ((old_limit + 1) << 4) - 1;
-        struct timer **newheap = xmalloc_array(struct timer *, new_limit + 1);
+        unsigned int old_limit = heap_metadata(heap)->limit;
+        unsigned int new_limit = ((old_limit + 1) << 4) - 1;
+        struct timer **newheap = NULL;
+
+        /* Don't grow the heap beyond what is representable in its metadata. */
+        if ( new_limit == (typeof(heap_metadata(heap)->limit))new_limit &&
+             new_limit + 1 )
+            newheap = xmalloc_array(struct timer *, new_limit + 1);
+        else
+            printk_once(XENLOG_WARNING "CPU%u: timer heap limit reached\n",
+                        smp_processor_id());
         if ( newheap != NULL )
         {
             spin_lock_irq(&ts->lock);
@@ -544,7 +549,7 @@ static void dump_timerq(unsigned char ke
     struct timers *ts;
     unsigned long  flags;
     s_time_t       now = NOW();
-    int            i, j;
+    unsigned int   i, j;
 
     printk("Dumping timer queues:\n");
 
@@ -556,7 +561,7 @@ static void dump_timerq(unsigned char ke
         spin_lock_irqsave(&ts->lock, flags);
         for ( j = 1; j <= heap_metadata(ts->heap)->size; j++ )
             dump_timer(ts->heap[j], now);
-        for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
+        for ( t = ts->list; t != NULL; t = t->list_next )
             dump_timer(t, now);
         spin_unlock_irqrestore(&ts->lock, flags);
     }





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.