[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v5 1/4] lib/ukalloc: change page allocation api to take a number of pages



Change palloc and pfree to take a number of pages instead of an order as
argument.

Number of pages is then converted to an order within the binary buddy
allocator.  This can be done in constant time on many CPU architectures,
including x86 (rounding to log of the next power of two).

This allows for a cleaner interface and simplifies the implementation of
new, non-power-of-two-based memory allocators.

This also makes uk_malloc_ifpages more efficient, because we replace
uk_alloc_size_to_order which was basically an inefficient way of rounding
to log of the next power of two (assuming it wasn't optimized by a smart
compiler).

Port all calls to palloc withing the main tree to the new API.

Signed-off-by: Hugo Lefeuvre <hugo.lefeuvre@xxxxxxxxx>

---
Changes v5:
 - change page allocation API to take a number of pages instead of a size

diff --git a/lib/ukalloc/alloc.c b/lib/ukalloc/alloc.c
index 708bd02..a57a13c 100644
--- a/lib/ukalloc/alloc.c
+++ b/lib/ukalloc/alloc.c
@@ -53,6 +53,8 @@
 #include <uk/assert.h>
 #include <uk/arch/limits.h>
 
+#define size_to_num_pages(size) ALIGN_UP((unsigned long)(size), __PAGE_SIZE) / 
__PAGE_SIZE
+
 static struct uk_alloc *uk_alloc_head;
 
 int uk_alloc_register(struct uk_alloc *a)
@@ -141,7 +143,7 @@ static size_t uk_getmallocsize(const void *ptr)
                /*
                 * special case: the memory was page-aligned
                 * In this case the allocated size should not account for the
-                * previous page which was used for storing the order
+                * previous page which was used for storing the number of pages
                 */
                mallocsize -= __PAGE_SIZE;
        } else {
@@ -155,34 +157,24 @@ static size_t uk_getmallocsize(const void *ptr)
        return mallocsize;
 }
 
-/* return the smallest order (1<<order pages) that can fit size bytes */
-static inline size_t uk_alloc_size_to_order(size_t size)
-{
-       size_t order = 0;
-
-       while ((__PAGE_SIZE << order) < size)
-               order++;
-       return order;
-}
-
 void *uk_malloc_ifpages(struct uk_alloc *a, size_t size)
 {
        uintptr_t intptr;
-       size_t order;
-       size_t realsize = sizeof(order) + size;
+       unsigned long num_pages;
+       size_t realsize = sizeof(num_pages) + size;
 
        UK_ASSERT(a);
        if (!size)
                return NULL;
 
-       order = uk_alloc_size_to_order(realsize);
-       intptr = (uintptr_t)uk_palloc(a, order);
+       num_pages = size_to_num_pages(realsize);
+       intptr = (uintptr_t)uk_palloc(a, num_pages);
 
        if (!intptr)
                return NULL;
 
-       *(size_t *)intptr = order;
-       return (void *)(intptr + sizeof(order));
+       *(size_t *)intptr = num_pages;
+       return (void *)(intptr + sizeof(num_pages));
 }
 
 void uk_free_ifpages(struct uk_alloc *a, void *ptr)
@@ -231,7 +223,7 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
 {
        uintptr_t *intptr;
        size_t realsize;
-       size_t order;
+       unsigned long num_pages;
 
        UK_ASSERT(a);
        if (((align - 1) & align) != 0
@@ -251,16 +243,16 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
        if (align == __PAGE_SIZE)
                realsize = ALIGN_UP(size + __PAGE_SIZE, align);
        else
-               realsize = ALIGN_UP(size + sizeof(order), align);
+               realsize = ALIGN_UP(size + sizeof(num_pages), align);
 
-       order = uk_alloc_size_to_order(realsize);
-       intptr = uk_palloc(a, order);
+       num_pages = size_to_num_pages(realsize);
+       intptr = uk_palloc(a, num_pages);
 
        if (!intptr)
                return ENOMEM;
 
-       *(size_t *)intptr = order;
-       *memptr = (void *) ALIGN_UP((uintptr_t)intptr + sizeof(order), align);
+       *(size_t *)intptr = num_pages;
+       *memptr = (void *) ALIGN_UP((uintptr_t)intptr + sizeof(num_pages), 
align);
        return 0;
 }
 
diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h
index 798d45a..371eb8d 100644
--- a/lib/ukalloc/include/uk/alloc.h
+++ b/lib/ukalloc/include/uk/alloc.h
@@ -70,9 +70,9 @@ typedef void  (*uk_alloc_free_func_t)
                (struct uk_alloc *a, void *ptr);
 #if CONFIG_LIBUKALLOC_IFPAGES
 typedef void* (*uk_alloc_palloc_func_t)
-               (struct uk_alloc *a, size_t order);
+               (struct uk_alloc *a, unsigned long num_pages);
 typedef void  (*uk_alloc_pfree_func_t)
-               (struct uk_alloc *a, void *ptr, size_t order);
+               (struct uk_alloc *a, void *ptr, unsigned long num_pages);
 #endif
 typedef int   (*uk_alloc_addmem_func_t)
                (struct uk_alloc *a, void *base, size_t size);
@@ -194,33 +194,33 @@ static inline void uk_free(struct uk_alloc *a, void *ptr)
 }
 
 #if CONFIG_LIBUKALLOC_IFPAGES
-static inline void *uk_do_palloc(struct uk_alloc *a, size_t order)
+static inline void *uk_do_palloc(struct uk_alloc *a, unsigned long num_pages)
 {
        UK_ASSERT(a);
-       return a->palloc(a, order);
+       return a->palloc(a, num_pages);
 }
-static inline void *uk_palloc(struct uk_alloc *a, size_t order)
+static inline void *uk_palloc(struct uk_alloc *a, unsigned long num_pages)
 {
        if (unlikely(!a || !a->palloc))
                return NULL;
-       return a->palloc(a, order);
+       return a->palloc(a, num_pages);
 }
 static inline void *uk_malloc_page(struct uk_alloc *a)
 {
-       return uk_palloc(a, 0);
+       return uk_palloc(a, 1);
 }
-static inline void uk_do_pfree(struct uk_alloc *a, void *ptr, size_t order)
+static inline void uk_do_pfree(struct uk_alloc *a, void *ptr, unsigned long 
num_pages)
 {
        UK_ASSERT(a);
-       a->pfree(a, ptr, order);
+       a->pfree(a, ptr, num_pages);
 }
-static inline void uk_pfree(struct uk_alloc *a, void *ptr, size_t order)
+static inline void uk_pfree(struct uk_alloc *a, void *ptr, unsigned long 
num_pages)
 {
-       uk_do_pfree(a, ptr, order);
+       uk_do_pfree(a, ptr, num_pages);
 }
 static inline void uk_free_page(struct uk_alloc *a, void *ptr)
 {
-       return uk_pfree(a, ptr, 0);
+       return uk_pfree(a, ptr, 1);
 }
 #endif
 static inline int uk_alloc_addmem(struct uk_alloc *a, void *base,
diff --git a/lib/ukallocbbuddy/bbuddy.c b/lib/ukallocbbuddy/bbuddy.c
index 7de3b84..e20a188 100644
--- a/lib/ukallocbbuddy/bbuddy.c
+++ b/lib/ukallocbbuddy/bbuddy.c
@@ -44,6 +44,7 @@
 #include <uk/allocbbuddy.h>
 #include <uk/alloc_impl.h>
 #include <uk/arch/limits.h>
+#include <uk/arch/atomic.h>
 #include <uk/print.h>
 #include <uk/assert.h>
 #include <uk/page.h>
@@ -224,10 +225,27 @@ static ssize_t bbuddy_availmem(struct uk_alloc *a)
 }
 #endif
 
+/* return log of the next power of two of passed number */
+static inline unsigned long page_number_to_order(unsigned long num_pages)
+{
+       UK_ASSERT(num_pages != 0);
+
+       /* ukarch_flsl has undefined behavior when called with zero */
+       if (num_pages == 1)
+               return 0;
+
+       /* ukarch_flsl(num_pages - 1) returns log of the previous power of two
+        * of num_pages. ukarch_flsl is called with `num_pages - 1` and not
+        * `num_pages` to handle the case where num_pages is already a power
+        * of two.
+        */
+       return ukarch_flsl(num_pages - 1) + 1;
+}
+
 /*********************
  * BINARY BUDDY PAGE ALLOCATOR
  */
-static void *bbuddy_palloc(struct uk_alloc *a, size_t order)
+static void *bbuddy_palloc(struct uk_alloc *a, unsigned long num_pages)
 {
        struct uk_bbpalloc *b;
        size_t i;
@@ -237,6 +255,8 @@ static void *bbuddy_palloc(struct uk_alloc *a, size_t order)
        UK_ASSERT(a != NULL);
        b = (struct uk_bbpalloc *)&a->priv;
 
+       size_t order = (size_t)page_number_to_order(num_pages);
+
        /* Find smallest order which can satisfy the request. */
        for (i = order; i < FREELIST_SIZE; i++) {
                if (!FREELIST_EMPTY(b->free_head[i]))
@@ -280,7 +300,7 @@ no_memory:
        return NULL;
 }
 
-static void bbuddy_pfree(struct uk_alloc *a, void *obj, size_t order)
+static void bbuddy_pfree(struct uk_alloc *a, void *obj, unsigned long 
num_pages)
 {
        struct uk_bbpalloc *b;
        chunk_head_t *freed_ch, *to_merge_ch;
@@ -290,6 +310,8 @@ static void bbuddy_pfree(struct uk_alloc *a, void *obj, 
size_t order)
        UK_ASSERT(a != NULL);
        b = (struct uk_bbpalloc *)&a->priv;
 
+       size_t order = (size_t)page_number_to_order(num_pages);
+
        /* if the object is not page aligned it was clearly not from us */
        UK_ASSERT((((uintptr_t)obj) & (__PAGE_SIZE - 1)) == 0);
 
diff --git a/lib/uksched/sched.c b/lib/uksched/sched.c
index a250547..9adc841 100644
--- a/lib/uksched/sched.c
+++ b/lib/uksched/sched.c
@@ -141,7 +141,7 @@ static void *create_stack(struct uk_alloc *allocator)
 {
        void *stack;
 
-       stack = uk_palloc(allocator, STACK_SIZE_PAGE_ORDER);
+       stack = uk_palloc(allocator, 1 << STACK_SIZE_PAGE_ORDER);
        if (stack == NULL) {
                uk_pr_warn("Error allocating thread stack.");
                return NULL;
@@ -250,7 +250,7 @@ void uk_sched_thread_destroy(struct uk_sched *sched, struct 
uk_thread *thread)
 
        UK_TAILQ_REMOVE(&sched->exited_threads, thread, thread_list);
        uk_thread_fini(thread, sched->allocator);
-       uk_pfree(sched->allocator, thread->stack, STACK_SIZE_PAGE_ORDER);
+       uk_pfree(sched->allocator, thread->stack, 1 << STACK_SIZE_PAGE_ORDER);
        if (thread->tls)
                uk_free(sched->allocator, thread->tls);
        uk_free(sched->allocator, thread);
diff --git a/plat/xen/drivers/9p/9pfront.c b/plat/xen/drivers/9p/9pfront.c
index da55fd6..61610ee 100644
--- a/plat/xen/drivers/9p/9pfront.c
+++ b/plat/xen/drivers/9p/9pfront.c
@@ -189,10 +189,10 @@ static void p9front_free_dev_ring(struct p9front_dev 
*p9fdev, int idx)
        unbind_evtchn(ring->evtchn);
        for (i = 0; i < (1 << p9fdev->ring_order); i++)
                gnttab_end_access(ring->intf->ref[i]);
-       uk_pfree(a, ring->data.in,
-               p9fdev->ring_order + XEN_PAGE_SHIFT - PAGE_SHIFT);
+       unsigned long num_data_byte_pages = 1 << (p9fdev->ring_order + 
XEN_PAGE_SHIFT - PAGE_SHIFT);
+       uk_pfree(a, ring->data.in, num_data_byte_pages);
        gnttab_end_access(ring->ref);
-       uk_pfree(a, ring->intf, 0);
+       uk_pfree(a, ring->intf, 1);
        ring->initialized = false;
 }
 
@@ -226,7 +226,7 @@ static int p9front_allocate_dev_ring(struct p9front_dev 
*p9fdev, int idx)
        ring->dev = p9fdev;
 
        /* Allocate ring intf page. */
-       ring->intf = uk_palloc(a, 0);
+       ring->intf = uk_palloc(a, 1);
        if (!ring->intf) {
                rc = -ENOMEM;
                goto out;
@@ -239,8 +239,8 @@ static int p9front_allocate_dev_ring(struct p9front_dev 
*p9fdev, int idx)
        UK_ASSERT(ring->ref != GRANT_INVALID_REF);
 
        /* Allocate memory for the data. */
-       data_bytes = uk_palloc(a,
-                       p9fdev->ring_order + XEN_PAGE_SHIFT - PAGE_SHIFT);
+       unsigned long num_data_byte_pages = 1 << (p9fdev->ring_order + 
XEN_PAGE_SHIFT - PAGE_SHIFT);
+       data_bytes = uk_palloc(a, num_data_byte_pages);
        if (!data_bytes) {
                rc = -ENOMEM;
                goto out_free_intf;
@@ -295,11 +295,11 @@ out_free_thread:
 out_free_grants:
        for (i = 0; i < (1 << p9fdev->ring_order); i++)
                gnttab_end_access(ring->intf->ref[i]);
-       uk_pfree(a, data_bytes,
-               p9fdev->ring_order + XEN_PAGE_SHIFT - PAGE_SHIFT);
+       unsigned long num_data_byte_pages = 1 << (p9fdev->ring_order + 
XEN_PAGE_SHIFT - PAGE_SHIFT);
+       uk_pfree(a, data_bytes, num_data_byte_pages);
 out_free_intf:
        gnttab_end_access(ring->ref);
-       uk_pfree(a, ring->intf, 0);
+       uk_pfree(a, ring->intf, 1);
 out:
        return rc;
 }
-- 
2.7.4


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.