[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[UNIKRAFT PATCH v2 4/5] lib/ukallocpool: `lib/ukalloc` compatible interface



Provide a ukalloc compatible interface. This enables the usage of pools
with common allocator interfaces, like `uk_malloc()`, `uk_free()`,
and `uk_memalign()`.

Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
 lib/ukallocpool/exportsyms.uk          |  1 +
 lib/ukallocpool/include/uk/allocpool.h | 16 ++++++
 lib/ukallocpool/pool.c                 | 70 ++++++++++++++++++++++++++
 3 files changed, 87 insertions(+)

diff --git a/lib/ukallocpool/exportsyms.uk b/lib/ukallocpool/exportsyms.uk
index 9e19e8b2..d119db32 100644
--- a/lib/ukallocpool/exportsyms.uk
+++ b/lib/ukallocpool/exportsyms.uk
@@ -5,3 +5,4 @@ uk_allocpool_reqmem
 uk_allocpool_availcount
 uk_allocpool_take
 uk_allocpool_return
+uk_allocpool2ukalloc
diff --git a/lib/ukallocpool/include/uk/allocpool.h 
b/lib/ukallocpool/include/uk/allocpool.h
index 13fbd187..a60acb35 100644
--- a/lib/ukallocpool/include/uk/allocpool.h
+++ b/lib/ukallocpool/include/uk/allocpool.h
@@ -122,6 +122,18 @@ struct uk_allocpool *uk_allocpool_init(void *base, size_t 
len,
                                       uk_allocpool_obj_init_t obj_init,
                                       void *obj_init_cookie);
 
+/**
+ * Return uk_alloc compatible interface for allocpool.
+ * With this interface, uk_malloc(), uk_free(), etc. can
+ * be used with the pool.
+ *
+ * @param p
+ *  Pointer to memory pool.
+ * @return
+ *  Pointer to uk_alloc interface of given pool.
+ */
+struct uk_alloc *uk_allocpool2alloc(struct uk_allocpool *p);
+
 /**
  * Return the number of current available (free) objects
  *
@@ -134,6 +146,8 @@ unsigned int uk_allocpool_availcount(struct uk_allocpool 
*p);
 
 /**
  * Get one object from a pool
+ * HINT: It is recommended to use this call instead of uk_malloc() whenever
+ *       feasible. This call is avoiding indirections.
  *
  * @param p
  *  Pointer to memory pool.
@@ -145,6 +159,8 @@ void *uk_allocpool_take(struct uk_allocpool *p);
 
 /**
  * Return one object back to a pool
+ * HINT: It is recommended to use this call instead of uk_free() whenever
+ *       feasible. This call is avoiding indirections.
  *
  * @param p
  *  Pointer to memory pool.
diff --git a/lib/ukallocpool/pool.c b/lib/ukallocpool/pool.c
index a6d868b9..f063ed74 100644
--- a/lib/ukallocpool/pool.c
+++ b/lib/ukallocpool/pool.c
@@ -64,6 +64,8 @@
 #define MIN_OBJ_LEN   sizeof(struct uk_list_head)
 
 struct uk_allocpool {
+       struct uk_alloc self;
+
        struct uk_list_head free_obj;
        unsigned int free_obj_count;
 
@@ -81,6 +83,18 @@ struct free_obj {
        struct uk_list_head list;
 };
 
+static inline struct uk_allocpool *ukalloc2pool(struct uk_alloc *a)
+{
+       UK_ASSERT(a);
+       return __containerof(a, struct uk_allocpool, self);
+}
+
+struct uk_alloc *uk_allocpool2alloc(struct uk_allocpool *p)
+{
+       UK_ASSERT(p);
+       return &p->self;
+}
+
 static inline void _prepend_free_obj(struct uk_allocpool *p, void *obj)
 {
        struct uk_list_head *entry;
@@ -110,6 +124,42 @@ static inline void *_take_free_obj(struct uk_allocpool *p)
        return (void *) obj;
 }
 
+static void pool_free(struct uk_alloc *a, void *ptr)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       if (likely(ptr))
+               _prepend_free_obj(p, ptr);
+}
+
+static void *pool_malloc(struct uk_alloc *a, size_t size)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       if (unlikely((size > p->obj_len)
+                    || uk_list_empty(&p->free_obj))) {
+               errno = ENOMEM;
+               return NULL;
+       }
+
+       return _take_free_obj(p);
+}
+
+static int pool_posix_memalign(struct uk_alloc *a, void **memptr, size_t align,
+                               size_t size)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       if (unlikely((size > p->obj_len)
+                    || (align > p->obj_align)
+                    || uk_list_empty(&p->free_obj))) {
+               return ENOMEM;
+       }
+
+       *memptr = _take_free_obj(p);
+       return 0;
+}
+
 void *uk_allocpool_take(struct uk_allocpool *p)
 {
        UK_ASSERT(p);
@@ -127,6 +177,14 @@ void uk_allocpool_return(struct uk_allocpool *p, void *obj)
        _prepend_free_obj(p, obj);
 }
 
+#if CONFIG_LIBUKALLOC_IFSTATS
+static size_t pool_availmem(struct uk_alloc *a)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+       return (size_t) p->free_obj_count * p->obj_len;
+}
+#endif
+
 size_t uk_allocpool_reqmem(unsigned int obj_count, size_t obj_len,
                           size_t obj_align)
 {
@@ -202,6 +260,18 @@ out:
        p->base            = base;
        p->parent          = NULL;
 
+       uk_alloc_init_malloc(a,
+                            pool_malloc,
+                            uk_calloc_compat,
+                            uk_realloc_compat,
+                            pool_free,
+                            pool_posix_memalign,
+                            uk_memalign_compat,
+                            NULL);
+#if CONFIG_LIBUKALLOC_IFSTATS
+       ret->availmem = pool_availmem;
+#endif
+
        uk_pr_debug("%p: Pool created (%"__PRIsz" B): %u objs of %"__PRIsz" B, 
aligned to %"__PRIsz" B\n",
                    p, len, p->obj_count, p->obj_len, p->obj_align);
        return p;
-- 
2.20.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.