[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v6 2/5] lib/ukalloc: change page allocation api to take a number of pages



Change palloc and pfree to take a number of pages instead of an order as
argument.

Number of pages is then converted to an order within the binary buddy
allocator.  This can be done in constant time on many CPU architectures,
including x86 (rounding to log of the next power of two).

This allows for a cleaner interface and simplifies the implementation of
new, non-power-of-two-based memory allocators.

This also makes uk_malloc_ifpages more efficient, because we replace
uk_alloc_size_to_order which was basically an inefficient way of rounding
to log of the next power of two (assuming it wasn't optimized by a smart
compiler).

Port all calls to palloc withing the main tree to the new API.

Signed-off-by: Hugo Lefeuvre <hugo.lefeuvre@xxxxxxxxx>

diff --git a/lib/ukalloc/alloc.c b/lib/ukalloc/alloc.c
index 56d6b7d..6dc71f4 100644
--- a/lib/ukalloc/alloc.c
+++ b/lib/ukalloc/alloc.c
@@ -53,6 +53,8 @@
 #include <uk/assert.h>
 #include <uk/arch/limits.h>
 
+#define size_to_num_pages(size) ALIGN_UP((unsigned long)(size), __PAGE_SIZE) / 
__PAGE_SIZE
+
 static struct uk_alloc *uk_alloc_head;
 
 int uk_alloc_register(struct uk_alloc *a)
@@ -140,7 +142,7 @@ static size_t uk_getmallocsize(const void *ptr)
                /*
                 * special case: the memory was page-aligned
                 * In this case the allocated size should not account for the
-                * previous page which was used for storing the order
+                * previous page which was used for storing the number of pages
                 */
                mallocsize -= __PAGE_SIZE;
        } else {
@@ -154,34 +156,24 @@ static size_t uk_getmallocsize(const void *ptr)
        return mallocsize;
 }
 
-/* return the smallest order (1<<order pages) that can fit size bytes */
-static inline size_t uk_alloc_size_to_order(size_t size)
-{
-       size_t order = 0;
-
-       while ((__PAGE_SIZE << order) < size)
-               order++;
-       return order;
-}
-
 void *uk_malloc_ifpages(struct uk_alloc *a, size_t size)
 {
        uintptr_t intptr;
-       size_t order;
-       size_t realsize = sizeof(order) + size;
+       unsigned long num_pages;
+       size_t realsize = sizeof(num_pages) + size;
 
        UK_ASSERT(a);
        if (!size)
                return NULL;
 
-       order = uk_alloc_size_to_order(realsize);
-       intptr = (uintptr_t)uk_palloc(a, order);
+       num_pages = size_to_num_pages(realsize);
+       intptr = (uintptr_t)uk_palloc(a, num_pages);
 
        if (!intptr)
                return NULL;
 
-       *(size_t *)intptr = order;
-       return (void *)(intptr + sizeof(order));
+       *(unsigned long *)intptr = num_pages;
+       return (void *)(intptr + sizeof(num_pages));
 }
 
 void uk_free_ifpages(struct uk_alloc *a, void *ptr)
@@ -230,7 +222,7 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
 {
        uintptr_t *intptr;
        size_t realsize;
-       size_t order;
+       unsigned long num_pages;
 
        UK_ASSERT(a);
        if (((align - 1) & align) != 0
@@ -250,16 +242,16 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
        if (align == __PAGE_SIZE)
                realsize = ALIGN_UP(size + __PAGE_SIZE, align);
        else
-               realsize = ALIGN_UP(size + sizeof(order), align);
+               realsize = ALIGN_UP(size + sizeof(num_pages), align);
 
-       order = uk_alloc_size_to_order(realsize);
-       intptr = uk_palloc(a, order);
+       num_pages = size_to_num_pages(realsize);
+       intptr = uk_palloc(a, num_pages);
 
        if (!intptr)
                return ENOMEM;
 
-       *(size_t *)intptr = order;
-       *memptr = (void *) ALIGN_UP((uintptr_t)intptr + sizeof(order), align);
+       *(size_t *)intptr = num_pages;
+       *memptr = (void *) ALIGN_UP((uintptr_t)intptr + sizeof(num_pages), 
align);
        return 0;
 }
 
diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h
index 9e5a411..c638c27 100644
--- a/lib/ukalloc/include/uk/alloc.h
+++ b/lib/ukalloc/include/uk/alloc.h
@@ -69,9 +69,9 @@ typedef void* (*uk_alloc_realloc_func_t)
 typedef void  (*uk_alloc_free_func_t)
                (struct uk_alloc *a, void *ptr);
 typedef void* (*uk_alloc_palloc_func_t)
-               (struct uk_alloc *a, size_t order);
+               (struct uk_alloc *a, unsigned long num_pages);
 typedef void  (*uk_alloc_pfree_func_t)
-               (struct uk_alloc *a, void *ptr, size_t order);
+               (struct uk_alloc *a, void *ptr, unsigned long num_pages);
 typedef int   (*uk_alloc_addmem_func_t)
                (struct uk_alloc *a, void *base, size_t size);
 #if CONFIG_LIBUKALLOC_IFSTATS
@@ -195,28 +195,28 @@ static inline void uk_free(struct uk_alloc *a, void *ptr)
        uk_do_free(a, ptr);
 }
 
-static inline void *uk_do_palloc(struct uk_alloc *a, size_t order)
+static inline void *uk_do_palloc(struct uk_alloc *a, unsigned long num_pages)
 {
        UK_ASSERT(a);
-       return a->palloc(a, order);
+       return a->palloc(a, num_pages);
 }
 
-static inline void *uk_palloc(struct uk_alloc *a, size_t order)
+static inline void *uk_palloc(struct uk_alloc *a, unsigned long num_pages)
 {
        if (unlikely(!a || !a->palloc))
                return NULL;
-       return uk_do_palloc(a, order);
+       return uk_do_palloc(a, num_pages);
 }
 
-static inline void uk_do_pfree(struct uk_alloc *a, void *ptr, size_t order)
+static inline void uk_do_pfree(struct uk_alloc *a, void *ptr, unsigned long 
num_pages)
 {
        UK_ASSERT(a);
-       a->pfree(a, ptr, order);
+       a->pfree(a, ptr, num_pages);
 }
 
-static inline void uk_pfree(struct uk_alloc *a, void *ptr, size_t order)
+static inline void uk_pfree(struct uk_alloc *a, void *ptr, unsigned long 
num_pages)
 {
-       uk_do_pfree(a, ptr, order);
+       uk_do_pfree(a, ptr, num_pages);
 }
 
 static inline int uk_alloc_addmem(struct uk_alloc *a, void *base,
diff --git a/lib/ukallocbbuddy/bbuddy.c b/lib/ukallocbbuddy/bbuddy.c
index 7de3b84..2a35de0 100644
--- a/lib/ukallocbbuddy/bbuddy.c
+++ b/lib/ukallocbbuddy/bbuddy.c
@@ -44,6 +44,7 @@
 #include <uk/allocbbuddy.h>
 #include <uk/alloc_impl.h>
 #include <uk/arch/limits.h>
+#include <uk/arch/atomic.h>
 #include <uk/print.h>
 #include <uk/assert.h>
 #include <uk/page.h>
@@ -224,10 +225,27 @@ static ssize_t bbuddy_availmem(struct uk_alloc *a)
 }
 #endif
 
+/* return log of the next power of two of passed number */
+static inline unsigned long num_pages_to_order(unsigned long num_pages)
+{
+       UK_ASSERT(num_pages != 0);
+
+       /* ukarch_flsl has undefined behavior when called with zero */
+       if (num_pages == 1)
+               return 0;
+
+       /* ukarch_flsl(num_pages - 1) returns log of the previous power of two
+        * of num_pages. ukarch_flsl is called with `num_pages - 1` and not
+        * `num_pages` to handle the case where num_pages is already a power
+        * of two.
+        */
+       return ukarch_flsl(num_pages - 1) + 1;
+}
+
 /*********************
  * BINARY BUDDY PAGE ALLOCATOR
  */
-static void *bbuddy_palloc(struct uk_alloc *a, size_t order)
+static void *bbuddy_palloc(struct uk_alloc *a, unsigned long num_pages)
 {
        struct uk_bbpalloc *b;
        size_t i;
@@ -237,6 +255,8 @@ static void *bbuddy_palloc(struct uk_alloc *a, size_t order)
        UK_ASSERT(a != NULL);
        b = (struct uk_bbpalloc *)&a->priv;
 
+       size_t order = (size_t)num_pages_to_order(num_pages);
+
        /* Find smallest order which can satisfy the request. */
        for (i = order; i < FREELIST_SIZE; i++) {
                if (!FREELIST_EMPTY(b->free_head[i]))
@@ -280,7 +300,7 @@ no_memory:
        return NULL;
 }
 
-static void bbuddy_pfree(struct uk_alloc *a, void *obj, size_t order)
+static void bbuddy_pfree(struct uk_alloc *a, void *obj, unsigned long 
num_pages)
 {
        struct uk_bbpalloc *b;
        chunk_head_t *freed_ch, *to_merge_ch;
@@ -290,6 +310,8 @@ static void bbuddy_pfree(struct uk_alloc *a, void *obj, 
size_t order)
        UK_ASSERT(a != NULL);
        b = (struct uk_bbpalloc *)&a->priv;
 
+       size_t order = (size_t)num_pages_to_order(num_pages);
+
        /* if the object is not page aligned it was clearly not from us */
        UK_ASSERT((((uintptr_t)obj) & (__PAGE_SIZE - 1)) == 0);
 
diff --git a/lib/uksched/sched.c b/lib/uksched/sched.c
index a250547..281846d 100644
--- a/lib/uksched/sched.c
+++ b/lib/uksched/sched.c
@@ -141,7 +141,7 @@ static void *create_stack(struct uk_alloc *allocator)
 {
        void *stack;
 
-       stack = uk_palloc(allocator, STACK_SIZE_PAGE_ORDER);
+       stack = uk_palloc(allocator, 1ul << STACK_SIZE_PAGE_ORDER);
        if (stack == NULL) {
                uk_pr_warn("Error allocating thread stack.");
                return NULL;
@@ -250,7 +250,7 @@ void uk_sched_thread_destroy(struct uk_sched *sched, struct 
uk_thread *thread)
 
        UK_TAILQ_REMOVE(&sched->exited_threads, thread, thread_list);
        uk_thread_fini(thread, sched->allocator);
-       uk_pfree(sched->allocator, thread->stack, STACK_SIZE_PAGE_ORDER);
+       uk_pfree(sched->allocator, thread->stack, 1ul << STACK_SIZE_PAGE_ORDER);
        if (thread->tls)
                uk_free(sched->allocator, thread->tls);
        uk_free(sched->allocator, thread);
diff --git a/plat/xen/drivers/9p/9pfront.c b/plat/xen/drivers/9p/9pfront.c
index da55fd6..69dcd15 100644
--- a/plat/xen/drivers/9p/9pfront.c
+++ b/plat/xen/drivers/9p/9pfront.c
@@ -189,10 +189,9 @@ static void p9front_free_dev_ring(struct p9front_dev 
*p9fdev, int idx)
        unbind_evtchn(ring->evtchn);
        for (i = 0; i < (1 << p9fdev->ring_order); i++)
                gnttab_end_access(ring->intf->ref[i]);
-       uk_pfree(a, ring->data.in,
-               p9fdev->ring_order + XEN_PAGE_SHIFT - PAGE_SHIFT);
+       uk_pfree(a, ring->data.in, 1ul << (p9fdev->ring_order + XEN_PAGE_SHIFT 
- PAGE_SHIFT));
        gnttab_end_access(ring->ref);
-       uk_pfree(a, ring->intf, 0);
+       uk_pfree(a, ring->intf, 1);
        ring->initialized = false;
 }
 
@@ -226,7 +225,7 @@ static int p9front_allocate_dev_ring(struct p9front_dev 
*p9fdev, int idx)
        ring->dev = p9fdev;
 
        /* Allocate ring intf page. */
-       ring->intf = uk_palloc(a, 0);
+       ring->intf = uk_palloc(a, 1);
        if (!ring->intf) {
                rc = -ENOMEM;
                goto out;
@@ -239,8 +238,7 @@ static int p9front_allocate_dev_ring(struct p9front_dev 
*p9fdev, int idx)
        UK_ASSERT(ring->ref != GRANT_INVALID_REF);
 
        /* Allocate memory for the data. */
-       data_bytes = uk_palloc(a,
-                       p9fdev->ring_order + XEN_PAGE_SHIFT - PAGE_SHIFT);
+       data_bytes = uk_palloc(a, 1ul << (p9fdev->ring_order + XEN_PAGE_SHIFT - 
PAGE_SHIFT));
        if (!data_bytes) {
                rc = -ENOMEM;
                goto out_free_intf;
@@ -295,11 +293,10 @@ out_free_thread:
 out_free_grants:
        for (i = 0; i < (1 << p9fdev->ring_order); i++)
                gnttab_end_access(ring->intf->ref[i]);
-       uk_pfree(a, data_bytes,
-               p9fdev->ring_order + XEN_PAGE_SHIFT - PAGE_SHIFT);
+       uk_pfree(a, data_bytes, 1ul << (p9fdev->ring_order + XEN_PAGE_SHIFT - 
PAGE_SHIFT));
 out_free_intf:
        gnttab_end_access(ring->ref);
-       uk_pfree(a, ring->intf, 0);
+       uk_pfree(a, ring->intf, 1);
 out:
        return rc;
 }
diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
index cd5297e..49d2257 100644
--- a/plat/xen/drivers/blk/blkfront.c
+++ b/plat/xen/drivers/blk/blkfront.c
@@ -562,7 +562,7 @@ static int blkfront_ring_init(struct uk_blkdev_queue *queue)
 
        UK_ASSERT(queue);
        dev = queue->dev;
-       sring = uk_palloc(queue->a, 0);
+       sring = uk_palloc(queue->a, 1);
        if (!sring)
                return -ENOMEM;
 
diff --git a/plat/xen/gnttab.c b/plat/xen/gnttab.c
index 5963950..4f28df9 100644
--- a/plat/xen/gnttab.c
+++ b/plat/xen/gnttab.c
@@ -219,7 +219,7 @@ grant_ref_t gnttab_alloc_and_grant(void **map, struct 
uk_alloc *a)
        UK_ASSERT(map != NULL);
        UK_ASSERT(a != NULL);
 
-       page = uk_palloc(a, 0);
+       page = uk_palloc(a, 1);
        if (page == NULL)
                return GRANT_INVALID_REF;
 
diff --git a/plat/xen/x86/mm.c b/plat/xen/x86/mm.c
index 2dd2618..249b792 100644
--- a/plat/xen/x86/mm.c
+++ b/plat/xen/x86/mm.c
@@ -281,7 +281,7 @@ static pgentry_t *need_pte(unsigned long va, struct 
uk_alloc *a)
 #if defined(__x86_64__)
        offset = l4_table_offset(va);
        if (!(tab[offset] & _PAGE_PRESENT)) {
-               pt_pfn = virt_to_pfn(uk_palloc(a, 0));
+               pt_pfn = virt_to_pfn(uk_palloc(a, 1));
                if (!pt_pfn)
                        return NULL;
                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
@@ -293,7 +293,7 @@ static pgentry_t *need_pte(unsigned long va, struct 
uk_alloc *a)
 #endif
        offset = l3_table_offset(va);
        if (!(tab[offset] & _PAGE_PRESENT)) {
-               pt_pfn = virt_to_pfn(uk_palloc(a, 0));
+               pt_pfn = virt_to_pfn(uk_palloc(a, 1));
                if (!pt_pfn)
                        return NULL;
                new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
@@ -304,7 +304,7 @@ static pgentry_t *need_pte(unsigned long va, struct 
uk_alloc *a)
        tab = mfn_to_virt(pt_mfn);
        offset = l2_table_offset(va);
        if (!(tab[offset] & _PAGE_PRESENT)) {
-               pt_pfn = virt_to_pfn(uk_palloc(a, 0));
+               pt_pfn = virt_to_pfn(uk_palloc(a, 1));
                if (!pt_pfn)
                        return NULL;
                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
@@ -700,10 +700,10 @@ void _arch_init_p2m(struct uk_alloc *a)
        if (((max_pfn - 1) >> L3_P2M_SHIFT) > 0)
                UK_CRASH("Error: Too many pfns.\n");
 
-       l3_list = uk_palloc(a, 0);
+       l3_list = uk_palloc(a, 1);
        for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES) {
                if (!(pfn % (P2M_ENTRIES * P2M_ENTRIES))) {
-                       l2_list = uk_palloc(a, 0);
+                       l2_list = uk_palloc(a, 1);
                        l3_list[L3_P2M_IDX(pfn)] = virt_to_mfn(l2_list);
                        l2_list_pages[L3_P2M_IDX(pfn)] = l2_list;
                }
-- 
2.7.4


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.