[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [UNIKRAFT PATCH] lib/ukalloc: add ifmalloc compatibility interface



Hi Hugo,

I think this patch looks pretty fine. I have just a slight suggestions but nothing of it is critical. I experienced once on a microcontroller architecture that accessing mis-aligned struct allocation can lead to opcode crashes. Because of this I would prefer using `METADATA_IFMALLOC_SIZE_POW2` instead of using `sizeof()`. The rest of comments is beautifying.

Let me know what you think.

Thanks,

Simon

On 26.06.20 10:50, Hugo Lefeuvre wrote:
Add ifmalloc, the malloc compatibility interface. This interface implements
a POSIX compliant allocation interface on top of a simple malloc() and
free() interface. This interface is similar to ifpages, which does the same
for palloc() and pfree().

ifmalloc will be used for the port of TLSF and tinyalloc which do not
support (posix_)memalign().

Signed-off-by: Hugo Lefeuvre <hugo.lefeuvre@xxxxxxxxx>
---
  lib/ukalloc/Config.uk               |   5 ++
  lib/ukalloc/alloc.c                 | 144 ++++++++++++++++++++++++++++++++++++
  lib/ukalloc/exportsyms.uk           |   4 +
  lib/ukalloc/include/uk/alloc.h      |   5 ++
  lib/ukalloc/include/uk/alloc_impl.h |  27 +++++++
  5 files changed, 185 insertions(+)

diff --git a/lib/ukalloc/Config.uk b/lib/ukalloc/Config.uk
index 0de7464..c965d08 100644
--- a/lib/ukalloc/Config.uk
+++ b/lib/ukalloc/Config.uk
@@ -5,6 +5,11 @@ menuconfig LIBUKALLOC
        select LIBUKDEBUG
if LIBUKALLOC
+       config LIBUKALLOC_IFMALLOC
+               bool "Malloc compatibility interface"
+               default n
+               help
+                       Provide helpers for allocators defining exclusively 
malloc and free
        config LIBUKALLOC_IFSTATS
                bool "Statistics interface"
                default n
diff --git a/lib/ukalloc/alloc.c b/lib/ukalloc/alloc.c
index 437bc4b..3b8d8d8 100644
--- a/lib/ukalloc/alloc.c
+++ b/lib/ukalloc/alloc.c
@@ -304,6 +304,150 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
        return 0;
  }
+#if CONFIG_LIBUKALLOC_IFMALLOC
+
+struct metadata_ifmalloc {
+       size_t  size;
+       void    *base;
+};
+
+#define METADATA_IFMALLOC_SIZE_POW2 16
+UK_CTASSERT(!(sizeof(struct metadata_ifmalloc) > METADATA_IFMALLOC_SIZE_POW2));
+
+static struct metadata_ifmalloc *uk_get_metadata_ifmalloc(const void *ptr)
+{
+       return (struct metadata_ifmalloc *)((uintptr_t) ptr -
+               sizeof(struct metadata_ifmalloc));

Maybe substract METADATA_IFMALLOC_SIZE_POW2 instead of sizeof. THis is to make sure that we are not ending up in alignement problems

+}
+
+static size_t uk_getmallocsize_ifmalloc(const void *ptr)
+{
+       struct metadata_ifmalloc *metadata = uk_get_metadata_ifmalloc(ptr);
+       return (size_t) ((uintptr_t) metadata->base + metadata->size -
+                        (uintptr_t) ptr);
+}
+
+void uk_free_ifmalloc(struct uk_alloc *a, void *ptr)
+{
+       struct metadata_ifmalloc *metadata;
+
+       UK_ASSERT(a);
+       UK_ASSERT(a->free_backend);
+       if (!ptr)
+               return;
+
+       metadata = uk_get_metadata_ifmalloc(ptr);
+       a->free_backend(a, metadata->base);
+}
+
+void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size)
+{
+       struct metadata_ifmalloc *metadata;
+       size_t realsize = size + sizeof(*metadata);

METADATA_IFMALLOC_SIZE_POW2 instead of sizeof - just to make sure.

+       void *ptr;
+
+       UK_ASSERT(a);
+       UK_ASSERT(a->malloc_backend);
+
+       /* check for overflow */
+       if (realsize < size)
+               return NULL;

This is quiet unlikely. You can put an `unlikely` to let the compiler know.

+
+       ptr = a->malloc_backend(a, realsize);
+       if (!ptr)
+               return NULL;
+
+       metadata = ptr;
+       metadata->size = realsize;
+       metadata->base = ptr;
+
+       return (void *) ((uintptr_t) ptr + sizeof(*metadata));

sizeof -> METADATA_IFMALLOC_SIZE_POW2

+}
+
+void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size)
+{
+       void *retptr;
+       size_t mallocsize;
+
+       UK_ASSERT(a);
+       if (!ptr)
+               return uk_malloc_ifmalloc(a, size);
+
+       if (ptr && !size) {
+               uk_free_ifmalloc(a, ptr);
+               return NULL;
+       }
+
+       retptr = uk_malloc_ifmalloc(a, size);
+       if (!retptr)
+               return NULL;
+
+       mallocsize = uk_getmallocsize_ifmalloc(ptr);
+
+       if (size < mallocsize)
+               memcpy(retptr, ptr, size);
+       else
+               memcpy(retptr, ptr, mallocsize);

you could also use MIN instead of the ifcase (but this is just for beauty):

memcpy(retptr, ptr, MIN(size, mallocsize));

+
+       uk_free_ifmalloc(a, ptr);
+       return retptr;
+}
+
+int uk_posix_memalign_ifmalloc(struct uk_alloc *a,
+                                    void **memptr, size_t align, size_t size)
+{
+       struct metadata_ifmalloc *metadata;
+       size_t realsize, padding;
+       uintptr_t intptr;
+
+       UK_ASSERT(a);
+       if (((align - 1) & align) != 0
+           || (align % sizeof(void *)) != 0)

Maybe take a different check than using slow `%` operation. The previous check tested already for power of 2, right? So we are fine to do the check with:
        align < sizeof(void *)
right?

+               return EINVAL;
+
+       if (!size) {
+               *memptr = NULL;
+               return EINVAL;
+       }
+
+       /* Store size information preceeding the memory block. Since we return
+        * pointers aligned at `align` we need to reserve at least that much
+        * space for the size information.
+        */
+       if (align < METADATA_IFMALLOC_SIZE_POW2) {
+               align = METADATA_IFMALLOC_SIZE_POW2;
+               padding = 0;
+       } else {
+               padding = sizeof(*metadata);

sizeof -> METADATA_IFMALLOC_SIZE_POW2

+       }
+
+       realsize = size + padding + align;
+
+       /* check for overflow */
+       if (realsize < size)
+               return NULL;

unlikely

+
+       intptr = (uintptr_t) a->malloc_backend(a, realsize);
+
+       if (!intptr)
+               return ENOMEM;
+
+       *memptr = (void *) ALIGN_UP(intptr + sizeof(*metadata),
+                                   (uintptr_t) align);

sizeof -> METADATA_IFMALLOC_SIZE_POW2

+
+       metadata = uk_get_metadata_ifmalloc(*memptr);
+
+       /* check for underflow */
+       UK_ASSERT(intptr <= (uintptr_t) metadata);
+
+       metadata->size = realsize;
+       metadata->base = (void *) intptr;
+
+       return 0;
+}
+
+#endif
+
  void uk_pfree_compat(struct uk_alloc *a, void *ptr,
                     unsigned long num_pages __unused)
  {
diff --git a/lib/ukalloc/exportsyms.uk b/lib/ukalloc/exportsyms.uk
index 2357501..594999f 100644
--- a/lib/ukalloc/exportsyms.uk
+++ b/lib/ukalloc/exportsyms.uk
@@ -5,6 +5,10 @@ uk_malloc_ifpages
  uk_free_ifpages
  uk_realloc_ifpages
  uk_posix_memalign_ifpages
+uk_malloc_ifmalloc
+uk_realloc_ifmalloc
+uk_posix_memalign_ifmalloc
+uk_free_ifmalloc
  uk_calloc_compat
  uk_memalign_compat
  uk_realloc_compat
diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h
index 8ab41d6..73b3a45 100644
--- a/lib/ukalloc/include/uk/alloc.h
+++ b/lib/ukalloc/include/uk/alloc.h
@@ -88,6 +88,11 @@ struct uk_alloc {
        uk_alloc_memalign_func_t memalign;
        uk_alloc_free_func_t free;
+#if CONFIG_LIBUKALLOC_IFMALLOC
+       uk_alloc_free_func_t free_backend;
+       uk_alloc_malloc_func_t malloc_backend;
+#endif
+
        /* page allocation interface */
        uk_alloc_palloc_func_t palloc;
        uk_alloc_pfree_func_t pfree;
diff --git a/lib/ukalloc/include/uk/alloc_impl.h 
b/lib/ukalloc/include/uk/alloc_impl.h
index 8bcca94..b15f49e 100644
--- a/lib/ukalloc/include/uk/alloc_impl.h
+++ b/lib/ukalloc/include/uk/alloc_impl.h
@@ -62,6 +62,14 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a, void 
**memptr,
                                size_t align, size_t size);
  void uk_free_ifpages(struct uk_alloc *a, void *ptr);
+#if CONFIG_LIBUKALLOC_IFMALLOC
+void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size);
+void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size);
+int uk_posix_memalign_ifmalloc(struct uk_alloc *a, void **memptr,
+                                    size_t align, size_t size);
+void uk_free_ifmalloc(struct uk_alloc *a, void *ptr);
+#endif
+
  /* Functionality that is provided based on malloc() and posix_memalign() */
  void *uk_calloc_compat(struct uk_alloc *a, size_t num, size_t len);
  void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size);
@@ -88,6 +96,25 @@ void uk_pfree_compat(struct uk_alloc *a, void *ptr, unsigned 
long num_pages);
                uk_alloc_register((a));                                 \
        } while (0)
+#if CONFIG_LIBUKALLOC_IFMALLOC
+#define uk_alloc_init_malloc_ifmalloc(a, malloc_f, free_f, addmem_f)   \
+       do {                                                            \
+               (a)->malloc         = uk_malloc_ifmalloc;            \
+               (a)->calloc         = uk_calloc_compat;                      \
+               (a)->realloc        = uk_realloc_ifmalloc;           \
+               (a)->posix_memalign = uk_posix_memalign_ifmalloc;    \
+               (a)->memalign       = uk_memalign_compat;            \
+               (a)->malloc_backend = (malloc_f);                    \
+               (a)->free_backend   = (free_f);                              \
+               (a)->free           = uk_free_ifmalloc;                      \
+               (a)->palloc         = uk_palloc_compat;                      \
+               (a)->pfree          = uk_pfree_compat;                       \
+               (a)->addmem         = (addmem_f);                    \
+                                                                       \
+               uk_alloc_register((a));                                 \
+       } while (0)
+#endif
+
  /* Shortcut for doing a registration of an allocator that only
   * implements palloc(), pfree(), addmem()
   */




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.