[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [UNIKRAFT PATCH] lib/ukallocbbuddy: Correct region bitmap allocation and usage



Hello,
As a general comment this patch has to be rebased to include the export symbols.

Please find some comments inline:

On 06/10/2018 05:23 PM, Simon Kuenzer wrote:
The usage of a each memory region that is added to the binary
buddy allocator is tracked with a bitmap. This patch corrects
wrong size calculation for the bitmap and wrong calculations
of bit postitions.

Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
  lib/ukallocbbuddy/bbuddy.c | 76 ++++++++++++++++++++++++++++++----------------
  1 file changed, 50 insertions(+), 26 deletions(-)

diff --git a/lib/ukallocbbuddy/bbuddy.c b/lib/ukallocbbuddy/bbuddy.c
index b830995..2513b24 100644
--- a/lib/ukallocbbuddy/bbuddy.c
+++ b/lib/ukallocbbuddy/bbuddy.c
@@ -69,7 +69,7 @@ struct uk_bbpalloc_memr {
        unsigned long first_page;
        unsigned long nr_pages;
        unsigned long mm_alloc_bitmap_size;
-       unsigned long mm_alloc_bitmap[];
+       unsigned long *mm_alloc_bitmap;
  };
struct uk_bbpalloc {
@@ -93,10 +93,11 @@ struct uk_bbpalloc {
   *  *_idx == Index into `mm_alloc_bitmap' array.
   *  *_off == Bit offset within an element of the `mm_alloc_bitmap' array.
   */
-#define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
+#define BITS_PER_BYTE     8

This BITS_PER_BYTE can be moved to common header say "value.h" in the root directory of include/uk. This would give a generic definition.

+#define PAGES_PER_MAPWORD (sizeof(unsigned long) * BITS_PER_BYTE)
static inline struct uk_bbpalloc_memr *map_get_memr(struct uk_bbpalloc *b,
-                                                   unsigned long page_num)
+                                                   unsigned long page_va)
  {
        struct uk_bbpalloc_memr *memr = NULL;
@@ -106,8 +107,8 @@ static inline struct uk_bbpalloc_memr *map_get_memr(struct uk_bbpalloc *b,
         * of them. It should be just one region in most cases
         */
        for (memr = b->memr_head; memr != NULL; memr = memr->next) {
-               if ((page_num >= memr->first_page)
-                   && (page_num < (memr->first_page + memr->nr_pages)))
+               if ((page_va >= memr->first_page)
+                   && (page_va < (memr->first_page + memr->nr_pages * 
__PAGE_SIZE)))
                        return memr;
        }
@@ -117,18 +118,21 @@ static inline struct uk_bbpalloc_memr *map_get_memr(struct uk_bbpalloc *b,
        return NULL;
  }
-static inline int allocated_in_map(struct uk_bbpalloc *b,
-                                  unsigned long page_num)
+static inline unsigned long allocated_in_map(struct uk_bbpalloc *b,
+                                  unsigned long page_va)
  {
-       struct uk_bbpalloc_memr *memr = map_get_memr(b, page_num);
+       struct uk_bbpalloc_memr *memr = map_get_memr(b, page_va);
+       unsigned long bm_idx, bm_off;
/* treat pages outside of region as allocated */
        if (!memr)
                return 1;
- page_num -= memr->first_page;
-       return ((memr)->mm_alloc_bitmap[(page_num) / PAGES_PER_MAPWORD]
-               & (1UL << ((page_num) & (PAGES_PER_MAPWORD - 1))));
+       page_va -= memr->first_page;
+       bm_idx = (page_va / __PAGE_SIZE) / PAGES_PER_MAPWORD;
+       bm_off = (page_va / __PAGE_SIZE) & (PAGES_PER_MAPWORD - 1);
+
+       return ((memr)->mm_alloc_bitmap[bm_idx] & (1UL << bm_off));
  }
static void map_alloc(struct uk_bbpalloc *b, uintptr_t first_page,
@@ -144,14 +148,14 @@ static void map_alloc(struct uk_bbpalloc *b, uintptr_t 
first_page,
         */
        memr = map_get_memr(b, first_page);
        UK_ASSERT(memr != NULL);
-       UK_ASSERT((first_page + nr_pages)
-                 <= (memr->first_page + memr->nr_pages));
+       UK_ASSERT((first_page + nr_pages * __PAGE_SIZE)
+                 <= (memr->first_page + memr->nr_pages * __PAGE_SIZE));
first_page -= memr->first_page;
-       curr_idx = first_page / PAGES_PER_MAPWORD;
-       start_off = first_page & (PAGES_PER_MAPWORD - 1);
-       end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
-       end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD - 1);
+       curr_idx = (first_page / __PAGE_SIZE) / PAGES_PER_MAPWORD;
+       start_off = (first_page / __PAGE_SIZE) & (PAGES_PER_MAPWORD - 1);
+       end_idx = ((first_page + nr_pages * __PAGE_SIZE) / __PAGE_SIZE) / 
PAGES_PER_MAPWORD;
+       end_off = ((first_page + nr_pages * __PAGE_SIZE) / __PAGE_SIZE) & 
(PAGES_PER_MAPWORD - 1);
if (curr_idx == end_idx) {
                memr->mm_alloc_bitmap[curr_idx] |=
@@ -179,14 +183,14 @@ static void map_free(struct uk_bbpalloc *b, uintptr_t 
first_page,
         */
        memr = map_get_memr(b, first_page);
        UK_ASSERT(memr != NULL);
-       UK_ASSERT((first_page + nr_pages)
-                 <= (memr->first_page + memr->nr_pages));
+       UK_ASSERT((first_page + nr_pages * __PAGE_SIZE)
+                 <= (memr->first_page + memr->nr_pages * __PAGE_SIZE));
first_page -= memr->first_page;
-       curr_idx = first_page / PAGES_PER_MAPWORD;
-       start_off = first_page & (PAGES_PER_MAPWORD - 1);
-       end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
-       end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD - 1);
+       curr_idx = (first_page / __PAGE_SIZE) / PAGES_PER_MAPWORD;
+       start_off = (first_page / __PAGE_SIZE) & (PAGES_PER_MAPWORD - 1);
+       end_idx = ((first_page + nr_pages * __PAGE_SIZE) / __PAGE_SIZE) / 
PAGES_PER_MAPWORD;
+       end_off = ((first_page + nr_pages * __PAGE_SIZE) / __PAGE_SIZE) & 
(PAGES_PER_MAPWORD - 1);
if (curr_idx == end_idx) {
                memr->mm_alloc_bitmap[curr_idx] &=
@@ -345,10 +349,25 @@ static int bbuddy_addmem(struct uk_alloc *a, void *base, 
size_t len)
        min = round_pgup((uintptr_t)base);
        max = round_pgdown((uintptr_t)base + (uintptr_t)len);
        range = max - min;
-       memr_size =
-           round_pgup(sizeof(*memr) + DIV_ROUND_UP(range >> __PAGE_SHIFT, 8));
memr = (struct uk_bbpalloc_memr *)min;
+
+       /*
+        * The number of pages is found by solving the inequality:
+        *
+        * sizeof(*memr) + bitmap_size + page_num * page_size <= range
+        *
+        * where: bitmap_size = page_num / BITS_PER_BYTE
+        *
+        */
+       memr->nr_pages =
+                       BITS_PER_BYTE * (range - sizeof(*memr)) /
+                       (BITS_PER_BYTE * __PAGE_SIZE + 1); > +       
memr->mm_alloc_bitmap = (unsigned long *) (min + sizeof(*memr));
+       memr->mm_alloc_bitmap_size  =
+                       round_pgup(memr->nr_pages / BITS_PER_BYTE) - 
sizeof(*memr);
+       memr_size = sizeof(*memr) + memr->mm_alloc_bitmap_size;
+
        min += memr_size;
        range -= memr_size;
        if (max < min) {
@@ -358,14 +377,19 @@ static int bbuddy_addmem(struct uk_alloc *a, void *base, 
size_t len)
                return -EINVAL;
        }
+ UK_ASSERT((range & __PAGE_MASK) == range);
+

Why are we asserting on a user input "range" instead of error checking?

        /*
         * Initialize region's bitmap
         */
        memr->first_page = min;
-       memr->nr_pages = max - min;
        /* add to list */
        memr->next = b->memr_head;
        b->memr_head = memr;
+
+       /* All allocated by default. */
+       memset(memr->mm_alloc_bitmap, ~0, memr->mm_alloc_bitmap_size);
+

memset second parameter is byte.
" The memset() function fills the first n bytes of the memory area pointed to by s with the constant byte c." Why are we not masking the rest of the bits.

        /* free up the memory we've been given to play with */
        map_free(b, min, (unsigned long)(range >> __PAGE_SHIFT));

Thanks & Regards
S Sharan

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.