[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[UNIKRAFT PATCH v4 4/5] lib/ukallocpool: `lib/ukalloc` compatible interface



Provide a ukalloc compatible interface. This enables the usage of pools
with common allocator interfaces, like `uk_malloc()`, `uk_free()`,
and `uk_memalign()`.

Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
 lib/ukallocpool/exportsyms.uk          |  1 +
 lib/ukallocpool/include/uk/allocpool.h | 16 ++++++
 lib/ukallocpool/pool.c                 | 76 +++++++++++++++++++++++++-
 3 files changed, 92 insertions(+), 1 deletion(-)

diff --git a/lib/ukallocpool/exportsyms.uk b/lib/ukallocpool/exportsyms.uk
index 0bd38595..68c16ebe 100644
--- a/lib/ukallocpool/exportsyms.uk
+++ b/lib/ukallocpool/exportsyms.uk
@@ -6,3 +6,4 @@ uk_allocpool_availcount
 uk_allocpool_objlen
 uk_allocpool_take
 uk_allocpool_return
+uk_allocpool2ukalloc
diff --git a/lib/ukallocpool/include/uk/allocpool.h 
b/lib/ukallocpool/include/uk/allocpool.h
index 7299286b..6974ec0c 100644
--- a/lib/ukallocpool/include/uk/allocpool.h
+++ b/lib/ukallocpool/include/uk/allocpool.h
@@ -118,6 +118,18 @@ void uk_allocpool_free(struct uk_allocpool *p);
 struct uk_allocpool *uk_allocpool_init(void *base, size_t len,
                                       size_t obj_len, size_t obj_align);
 
+/**
+ * Return uk_alloc compatible interface for allocpool.
+ * With this interface, uk_malloc(), uk_free(), etc. can
+ * be used with the pool.
+ *
+ * @param p
+ *  Pointer to memory pool.
+ * @return
+ *  Pointer to uk_alloc interface of given pool.
+ */
+struct uk_alloc *uk_allocpool2ukalloc(struct uk_allocpool *p);
+
 /**
  * Return the number of current available (free) objects
  *
@@ -140,6 +152,8 @@ size_t uk_allocpool_objlen(struct uk_allocpool *p);
 
 /**
  * Get one object from a pool
+ * HINT: It is recommended to use this call instead of uk_malloc() whenever
+ *       feasible. This call is avoiding indirections.
  *
  * @param p
  *  Pointer to memory pool.
@@ -151,6 +165,8 @@ void *uk_allocpool_take(struct uk_allocpool *p);
 
 /**
  * Return one object back to a pool
+ * HINT: It is recommended to use this call instead of uk_free() whenever
+ *       feasible. This call is avoiding indirections.
  *
  * @param p
  *  Pointer to memory pool.
diff --git a/lib/ukallocpool/pool.c b/lib/ukallocpool/pool.c
index 31fc8b2d..f9e5c389 100644
--- a/lib/ukallocpool/pool.c
+++ b/lib/ukallocpool/pool.c
@@ -67,6 +67,8 @@
 #define MIN_OBJ_LEN   sizeof(struct uk_list_head)
 
 struct uk_allocpool {
+       struct uk_alloc self;
+
        struct uk_list_head free_obj;
        unsigned int free_obj_count;
 
@@ -82,6 +84,21 @@ struct free_obj {
        struct uk_list_head list;
 };
 
+static inline struct uk_allocpool *ukalloc2pool(struct uk_alloc *a)
+{
+       UK_ASSERT(a);
+       return __containerof(a, struct uk_allocpool, self);
+}
+
+#define allocpool2ukalloc(p) \
+       (&(p)->self)
+
+struct uk_alloc *uk_allocpool2ukalloc(struct uk_allocpool *p)
+{
+       UK_ASSERT(p);
+       return allocpool2ukalloc(p);
+}
+
 static inline void _prepend_free_obj(struct uk_allocpool *p, void *obj)
 {
        struct uk_list_head *entry;
@@ -109,6 +126,42 @@ static inline void *_take_free_obj(struct uk_allocpool *p)
        return (void *) obj;
 }
 
+static void pool_free(struct uk_alloc *a, void *ptr)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       if (likely(ptr))
+               _prepend_free_obj(p, ptr);
+}
+
+static void *pool_malloc(struct uk_alloc *a, size_t size)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       if (unlikely((size > p->obj_len)
+                    || uk_list_empty(&p->free_obj))) {
+               errno = ENOMEM;
+               return NULL;
+       }
+
+       return _take_free_obj(p);
+}
+
+static int pool_posix_memalign(struct uk_alloc *a, void **memptr, size_t align,
+                               size_t size)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       if (unlikely((size > p->obj_len)
+                    || (align > p->obj_align)
+                    || uk_list_empty(&p->free_obj))) {
+               return ENOMEM;
+       }
+
+       *memptr = _take_free_obj(p);
+       return 0;
+}
+
 void *uk_allocpool_take(struct uk_allocpool *p)
 {
        UK_ASSERT(p);
@@ -126,6 +179,15 @@ void uk_allocpool_return(struct uk_allocpool *p, void *obj)
        _prepend_free_obj(p, obj);
 }
 
+#if CONFIG_LIBUKALLOC_IFSTATS
+static ssize_t pool_availmem(struct uk_alloc *a)
+{
+       struct uk_allocpool *p = ukalloc2pool(a);
+
+       return (size_t) p->free_obj_count * p->obj_len;
+}
+#endif
+
 size_t uk_allocpool_reqmem(unsigned int obj_count, size_t obj_len,
                           size_t obj_align)
 {
@@ -173,7 +235,7 @@ struct uk_allocpool *uk_allocpool_init(void *base, size_t 
len,
 
        p = (struct uk_allocpool *) base;
        memset(p, 0, sizeof(*p));
-       a = uk_allocpool2alloc(p);
+       a = uk_allocpool2ukalloc(p);
 
        obj_alen = ALIGN_UP(obj_len, obj_align);
        obj_ptr = (void *) ALIGN_UP((uintptr_t) base + sizeof(*p),
@@ -202,6 +264,18 @@ out:
        p->base            = base;
        p->parent          = NULL;
 
+       uk_alloc_init_malloc(a,
+                            pool_malloc,
+                            uk_calloc_compat,
+                            uk_realloc_compat,
+                            pool_free,
+                            pool_posix_memalign,
+                            uk_memalign_compat,
+                            NULL);
+#if CONFIG_LIBUKALLOC_IFSTATS
+       p->self.availmem = pool_availmem;
+#endif
+
        uk_pr_debug("%p: Pool created (%"__PRIsz" B): %u objs of %"__PRIsz" B, 
aligned to %"__PRIsz" B\n",
                    p, len, p->obj_count, p->obj_len, p->obj_align);
        return p;
-- 
2.20.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.