[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[UNIKRAFT PATCH 15/18] lib/ukalloc: Per-library allocation statistics



Per-library allocation statistics is achieved by replacing the default
allocator for each library with a wrapper. This wrapper forwards operations
to the actual default allocator while recording changes of the statistics
caused by the operations.
Some statistic counters are updated to support signed values (e.g.,
current memory usage or current number of allocations). This is
needed to cover cases where, for instance, a library A is allocating while
a library B is freeing.

Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
 lib/ukalloc/Config.uk             |  14 ++
 lib/ukalloc/Makefile.uk           |   4 +
 lib/ukalloc/include/uk/alloc.h    |  19 +-
 lib/ukalloc/libstats.c            | 310 ++++++++++++++++++++++++++++++
 lib/ukalloc/libstats.ld           |   7 +
 lib/ukalloc/libstats.localsyms.uk |   2 +
 6 files changed, 352 insertions(+), 4 deletions(-)
 create mode 100644 lib/ukalloc/libstats.c
 create mode 100644 lib/ukalloc/libstats.ld
 create mode 100644 lib/ukalloc/libstats.localsyms.uk

diff --git a/lib/ukalloc/Config.uk b/lib/ukalloc/Config.uk
index ffeec00e..838dd50d 100644
--- a/lib/ukalloc/Config.uk
+++ b/lib/ukalloc/Config.uk
@@ -26,4 +26,18 @@ if LIBUKALLOC
                        Please note that this option may slow down allocation
                        perfomance due to competition to the single global
                        counters.
+
+       config LIBUKALLOC_IFSTATS_PERLIB
+               bool "Per-library statistics"
+               default n
+               depends on LIBUKALLOC_IFSTATS
+               help
+                       Additionally compute per-library statistics. This is
+                       achieved by returning a per library uk_alloc wrapper
+                       with the uk_alloc_get_default() call. This wrapper
+                       forwards operations to the actual default allocator
+                       but records statistics changes for each library.
+                       Please note that memory usage numbers can be negative:
+                       This can be a result of a library A allocating memory
+                       and another library B freeing it.
 endif
diff --git a/lib/ukalloc/Makefile.uk b/lib/ukalloc/Makefile.uk
index d56aabb6..1a693bdb 100644
--- a/lib/ukalloc/Makefile.uk
+++ b/lib/ukalloc/Makefile.uk
@@ -5,3 +5,7 @@ CXXINCLUDES-$(CONFIG_LIBUKALLOC)        += 
-I$(LIBUKALLOC_BASE)/include
 
 LIBUKALLOC_SRCS-y += $(LIBUKALLOC_BASE)/alloc.c
 LIBUKALLOC_SRCS-$(CONFIG_LIBUKALLOC_IFSTATS) += $(LIBUKALLOC_BASE)/stats.c
+
+EACHOLIB_SRCS-$(CONFIG_LIBUKALLOC_IFSTATS_PERLIB)   += 
$(LIBUKALLOC_BASE)/libstats.c|libukalloc
+LIBUKALLOC_SRCS-$(CONFIG_LIBUKALLOC_IFSTATS_PERLIB) += 
$(LIBUKALLOC_BASE)/libstats.ld
+EACHOLIB_LOCALS-$(CONFIG_LIBUKALLOC_IFSTATS_PERLIB) += 
$(LIBUKALLOC_BASE)/libstats.localsyms.uk
diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h
index 28137e4e..e7166dfa 100644
--- a/lib/ukalloc/include/uk/alloc.h
+++ b/lib/ukalloc/include/uk/alloc.h
@@ -79,11 +79,11 @@ struct uk_alloc_stats {
 
        uint64_t tot_nb_allocs; /* total number of satisfied allocations */
        uint64_t tot_nb_frees;  /* total number of satisfied free operations */
-       uint64_t cur_nb_allocs; /* current number of active allocations */
-       uint64_t max_nb_allocs; /* maximum number of active allocations */
+       int64_t cur_nb_allocs; /* current number of active allocations */
+       int64_t max_nb_allocs; /* maximum number of active allocations */
 
-       size_t cur_mem_use; /* current used memory by allocations */
-       size_t max_mem_use; /* maximum amount of memory used by allocations */
+       ssize_t cur_mem_use; /* current used memory by allocations */
+       ssize_t max_mem_use; /* maximum amount of memory used by allocations */
 
        uint64_t nb_enomem; /* number of times failing allocation requests */
 };
@@ -131,10 +131,14 @@ extern struct uk_alloc *_uk_alloc_head;
             iter != NULL;                      \
             iter = iter->next)
 
+#if CONFIG_LIBUKALLOC_IFSTATS_PERLIB
+struct uk_alloc *uk_alloc_get_default(void);
+#else /* !CONFIG_LIBUKALLOC_IFSTATS_PERLIB */
 static inline struct uk_alloc *uk_alloc_get_default(void)
 {
        return _uk_alloc_head;
 }
+#endif /* !CONFIG_LIBUKALLOC_IFSTATS_PERLIB */
 
 /* wrapper functions */
 static inline void *uk_do_malloc(struct uk_alloc *a, size_t size)
@@ -314,6 +318,13 @@ void uk_alloc_stats(struct uk_alloc *a, struct 
uk_alloc_stats *dst);
 #if CONFIG_LIBUKALLOC_IFSTATS_GLOBAL
 void uk_alloc_stats_global(struct uk_alloc_stats *dst);
 #endif /* CONFIG_LIBUKALLOC_IFSTATS_GLOBAL */
+
+#if CONFIG_LIBUKALLOC_IFSTATS_PERLIB
+struct uk_alloc_libstats_entry {
+       const char *libname;
+       struct uk_alloc *a; /* default allocator wrapper for the library */
+};
+#endif /* CONFIG_LIBUKALLOC_IFSTATS_PERLIB */
 #endif /* CONFIG_LIBUKALLOC_IFSTATS */
 
 #ifdef __cplusplus
diff --git a/lib/ukalloc/libstats.c b/lib/ukalloc/libstats.c
new file mode 100644
index 00000000..0424e8f5
--- /dev/null
+++ b/lib/ukalloc/libstats.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Authors: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
+ *
+ * Copyright (c) 2020, NEC Laboratories Europe GmbH, NEC Corporation.
+ *                     All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Per library statistics
+ * ----------------------
+ * This file is compiled together with each library. Provided symbols are set
+ * private to each library.
+ * The idea is that this file hooks into `uk_alloc_get_default()`. Instead of
+ * returning the actual default allocator, a per-library wrapper is returned
+ * for keeping the statistics. Allocator requests are forwarded to the actual
+ * default allocator while changes of memory usage are observed. These changes
+ * are accounted to the library-local statistics.
+ */
+
+#include <uk/print.h>
+#include <uk/alloc_impl.h>
+#include <uk/preempt.h>
+
+static inline struct uk_alloc *_uk_alloc_get_actual_default(void)
+{
+       return _uk_alloc_head;
+}
+
+#define WATCH_STATS_START(p)                                           \
+       ssize_t _before_mem_use;                                        \
+       size_t _before_nb_allocs;                                       \
+       size_t _before_tot_nb_allocs;                                   \
+       size_t _before_nb_enomem;                                       \
+                                                                       \
+       uk_preempt_disable();                                           \
+       _before_mem_use       = (p)->_stats.cur_mem_use;                \
+       _before_nb_allocs     = (p)->_stats.cur_nb_allocs;              \
+       _before_tot_nb_allocs = (p)->_stats.tot_nb_allocs;              \
+       _before_nb_enomem     = (p)->_stats.nb_enomem;
+
+#define WATCH_STATS_END(p, nb_allocs_diff, nb_enomem_diff,             \
+                       mem_use_diff, alloc_size)                       \
+       size_t _nb_allocs = (p)->_stats.tot_nb_allocs                   \
+                           - _before_tot_nb_allocs;                    \
+                                                                       \
+       /* NOTE: We assume that an allocator call does at
+        * most one allocation. Otherwise we cannot currently
+        * keep track of `last_alloc_size` properly
+        */                                                             \
+       UK_ASSERT(_nb_allocs <= 1);                                     \
+                                                                       \
+       *(mem_use_diff)   = (p)->_stats.cur_mem_use                     \
+                           - _before_mem_use;                          \
+       *(nb_allocs_diff) = (ssize_t) (p)->_stats.cur_nb_allocs         \
+                           - _before_nb_allocs;                        \
+       *(nb_enomem_diff) = (ssize_t) (p)->_stats.nb_enomem             \
+                           - _before_nb_enomem;                        \
+       if (_nb_allocs > 0)                                             \
+               *(alloc_size) = (p)->_stats.last_alloc_size;            \
+       else                                                            \
+               *(alloc_size) = 0; /* there was no new allocation */    \
+       uk_preempt_enable();
+
+static inline void update_stats(struct uk_alloc_stats *stats,
+                               ssize_t nb_allocs_diff,
+                               ssize_t nb_enomem_diff,
+                               ssize_t mem_use_diff,
+                               size_t last_alloc_size)
+{
+       uk_preempt_disable();
+       if (nb_allocs_diff >= 0)
+               stats->tot_nb_allocs += nb_allocs_diff;
+       else
+               stats->tot_nb_frees  += -nb_allocs_diff;
+       stats->cur_nb_allocs += nb_allocs_diff;
+       stats->nb_enomem     += nb_enomem_diff;
+       stats->cur_mem_use   += mem_use_diff;
+       if (last_alloc_size)
+               stats->last_alloc_size = last_alloc_size;
+       __uk_alloc_stats_refresh_minmax(stats);
+       uk_preempt_enable();
+}
+
+static void *wrapper_malloc(struct uk_alloc *a, size_t size)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+       void *ret;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       ret = uk_do_malloc(p, size);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       /* NOTE: We count to our library local stats only */
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+       return ret;
+}
+
+static void *wrapper_calloc(struct uk_alloc *a, size_t nmemb, size_t size)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+       void *ret;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       ret = uk_do_calloc(p, nmemb, size);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+       return ret;
+}
+
+static int wrapper_posix_memalign(struct uk_alloc *a, void **memptr,
+                                 size_t align, size_t size)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+       int ret;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       ret = uk_do_posix_memalign(p, memptr, align, size);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+       return ret;
+}
+
+static void *wrapper_memalign(struct uk_alloc *a, size_t align, size_t size)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+       void *ret;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       ret = uk_do_memalign(p, align, size);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+       return ret;
+}
+
+static void *wrapper_realloc(struct uk_alloc *a, void *ptr, size_t size)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+       void *ret;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       ret = uk_do_realloc(p, ptr, size);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+       return ret;
+}
+
+static void wrapper_free(struct uk_alloc *a, void *ptr)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       uk_do_free(p, ptr);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+}
+
+static void *wrapper_palloc(struct uk_alloc *a, unsigned long num_pages)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+       void *ret;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       ret = uk_do_palloc(p, num_pages);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+       return ret;
+}
+
+static void wrapper_pfree(struct uk_alloc *a, void *ptr,
+                         unsigned long num_pages)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+       ssize_t nb_allocs, mem_use, nb_enomem;
+       size_t alloc_size;
+
+       UK_ASSERT(p);
+
+       WATCH_STATS_START(p);
+       uk_do_pfree(p, ptr, num_pages);
+       WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
+
+       update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
+}
+
+/* The following interfaces do not change allocation statistics,
+ * this is why we just forward the calls
+ */
+static int wrapper_addmem(struct uk_alloc *a __unused, void *base, size_t size)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+
+       UK_ASSERT(p);
+       return uk_alloc_addmem(p, base, size);
+}
+
+static size_t wrapper_maxalloc(struct uk_alloc *a __unused)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+
+       UK_ASSERT(p);
+       return uk_alloc_maxalloc(p);
+}
+
+static size_t wrapper_availmem(struct uk_alloc *a __unused)
+{
+       struct uk_alloc *p = _uk_alloc_get_actual_default();
+
+       UK_ASSERT(p);
+       return uk_alloc_availmem(p);
+}
+
+/*
+ * Allocator layer that hooks in between the actual default allocator and the
+ * library.
+ * allocators but replaces the default allocator - for each library another 
one.
+ * It forwards requests to the default allocator but keeps statistics for the
+ * library.
+ */
+static struct uk_alloc _uk_alloc_lib_default = {
+       .malloc         = wrapper_malloc,
+       .calloc         = wrapper_calloc,
+       .realloc        = wrapper_realloc,
+       .posix_memalign = wrapper_posix_memalign,
+       .memalign       = wrapper_memalign,
+       .free           = wrapper_free,
+       .palloc         = wrapper_palloc,
+       .pfree          = wrapper_pfree,
+       .maxalloc       = wrapper_maxalloc,
+       .pmaxalloc      = uk_alloc_pmaxalloc_compat,
+       .availmem       = wrapper_availmem,
+       .pavail         = uk_alloc_pavail_compat,
+       .addmem         = wrapper_addmem,
+
+       ._stats         = { 0 },
+};
+
+static __used __section(".uk_alloc_libstats") __align(8)
+struct uk_alloc_libstats_entry _uk_alloc_libstats_entry = {
+       .libname = STRINGIFY(__LIBNAME__),
+       .a       = &_uk_alloc_lib_default,
+};
+
+/* Return this wrapper allocator instead of the actual default allocator */
+struct uk_alloc *uk_alloc_get_default(void)
+{
+       uk_pr_debug("Wrap default allocator %p with allocator %p\n",
+                   _uk_alloc_get_actual_default(),
+                   &_uk_alloc_lib_default);
+       return &_uk_alloc_lib_default;
+}
diff --git a/lib/ukalloc/libstats.ld b/lib/ukalloc/libstats.ld
new file mode 100644
index 00000000..79a0a837
--- /dev/null
+++ b/lib/ukalloc/libstats.ld
@@ -0,0 +1,7 @@
+SECTIONS
+{
+       .uk_alloc_libstats : {
+               KEEP (*(.uk_alloc_libstats))
+       }
+}
+INSERT BEFORE .data;
diff --git a/lib/ukalloc/libstats.localsyms.uk 
b/lib/ukalloc/libstats.localsyms.uk
new file mode 100644
index 00000000..59f4aad4
--- /dev/null
+++ b/lib/ukalloc/libstats.localsyms.uk
@@ -0,0 +1,2 @@
+_uk_alloc_lib_default
+uk_alloc_get_default
-- 
2.20.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.