[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2] lib/ukallocbbuddy: Correct region bitmap allocation and usage


  • To: minios-devel@xxxxxxxxxxxxx
  • From: Costin Lupu <costin.lupu@xxxxxxxxx>
  • Date: Thu, 21 Jun 2018 13:10:49 +0300
  • Cc: simon.kuenzer@xxxxxxxxx, sharan.santhanam@xxxxxxxxx, yuri.volchkov@xxxxxxxxx
  • Delivery-date: Thu, 21 Jun 2018 10:11:04 +0000
  • Ironport-phdr: 9a23:q21y/xBMSmi4WzfkMNHcUyQJP3N1i/DPJgcQr6AfoPdwSPX4pcbcNUDSrc9gkEXOFd2Cra4c1qyO6+jJYi8p2d65qncMcZhBBVcuqP49uEgeOvODElDxN/XwbiY3T4xoXV5h+GynYwAOQJ6tL1LdrWev4jEMBx7xKRR6JvjvGo7Vks+7y/2+94fcbglUhDexe69+IAmrpgjNq8cahpdvJLwswRXTuHtIfOpWxWJsJV2Nmhv3+9m98p1+/SlOovwt78FPX7n0cKQ+VrxYES8pM3sp683xtBnMVhWA630BWWgLiBVIAgzF7BbnXpfttybxq+Rw1DWGMcDwULs5Xymp4aV2Rx/ykCoJNyA3/nzZhMJzi6xWuw6tqwBlzoLIeoyZKOZyc6XAdt0aX2pBWcNRWjRfD4O7dIsPE+sBPeBFpIf7ulsOtQa+DhSrCezzzT9InWP23aw80+g7FQHGwRQgH88VvXvIt9X5Lr8SUf2uw6XS1zXDaOpb1DHg44bLahAsueyAUL1tfcbLykQiFxnJgkuOpYHnJT+Y2PwBv3WU4uZ9T+6iiG4qpxtvrjWhyMogkJTFi40Lxl3C6C532pw6JceiR05+edOkFZxQuDyEOIZuWcMiRn1ouD49yr0bpZ63ZCgKx4ojxx7Yc/GHbY2I7QjiVOaVOzt3mGlldKinhxav6kes0Pf8Vs6s3FZLqCpKjMXMu2gQ2xHc98SLUPhw80e71TqRyQze6PtILE4smareMZEhw7owlpQJsUTEGy/7gFn5jKiNdkU4++io7f7rYrH7pp+EKo95kR3xMr80lsynHOQ3KRICX3Kc+eikzr3s4VX5QKlWjv0xiqTZtZHaJcIapq6+GA9Zy5ss5AihDzi41NQVhn0HLFNeeBKblIjlIV7PL+7+DfulhFSsijhrzejcPrL9GpXNMmTDkLD5cLZm605T0hAzwspZ555OEbEOOvTzWlPxtNPCCB82KQm0zv3hCNpjyoweXXiADbSDPKzMrF+C/vgvLPWUZI8JpDb9LOAo5+XvjX88nl8de7Ol3ZgKaH+mGPRnIluWYWD3j9cHD2gFog4+Q/bxh1GYTzFTem64X7gg6TEjFIKmEYDDS5itgbycxie7H4daZn5BClCIDXjod4aEW/ASaC+JJM9ujCALVby7RIA6zx2hqhL6mPJbKb/R+ysZsomm2NVr6un7kRAp6ScyH8mblWaXQDJahGQNEhQxx7x+pwRZ10+emfxzhOdEFNoV4+5RTy8xLtjE0uY8EdekCVGJRcuAVFvzGobuOjo2VN9km9I=
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>

The usage of a each memory region that is added to the binary
buddy allocator is tracked with a bitmap. This patch corrects
wrong size calculation for the bitmap and wrong calculations
of bit postitions.

Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
---
 lib/ukallocbbuddy/bbuddy.c | 83 +++++++++++++++++++++++++++++++---------------
 1 file changed, 57 insertions(+), 26 deletions(-)

diff --git a/lib/ukallocbbuddy/bbuddy.c b/lib/ukallocbbuddy/bbuddy.c
index 20a9b70..63288f0 100644
--- a/lib/ukallocbbuddy/bbuddy.c
+++ b/lib/ukallocbbuddy/bbuddy.c
@@ -69,7 +69,7 @@ struct uk_bbpalloc_memr {
        unsigned long first_page;
        unsigned long nr_pages;
        unsigned long mm_alloc_bitmap_size;
-       unsigned long mm_alloc_bitmap[];
+       unsigned long *mm_alloc_bitmap;
 };
 
 struct uk_bbpalloc {
@@ -93,10 +93,11 @@ struct uk_bbpalloc {
  *  *_idx == Index into `mm_alloc_bitmap' array.
  *  *_off == Bit offset within an element of the `mm_alloc_bitmap' array.
  */
-#define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
+#define BITS_PER_BYTE     8
+#define PAGES_PER_MAPWORD (sizeof(unsigned long) * BITS_PER_BYTE)
 
 static inline struct uk_bbpalloc_memr *map_get_memr(struct uk_bbpalloc *b,
-                                                   unsigned long page_num)
+                                                   unsigned long page_va)
 {
        struct uk_bbpalloc_memr *memr = NULL;
 
@@ -106,8 +107,9 @@ static inline struct uk_bbpalloc_memr *map_get_memr(struct 
uk_bbpalloc *b,
         * of them. It should be just one region in most cases
         */
        for (memr = b->memr_head; memr != NULL; memr = memr->next) {
-               if ((page_num >= memr->first_page)
-                   && (page_num < (memr->first_page + memr->nr_pages)))
+               if ((page_va >= memr->first_page)
+                   && (page_va < (memr->first_page +
+                   memr->nr_pages * __PAGE_SIZE)))
                        return memr;
        }
 
@@ -117,24 +119,29 @@ static inline struct uk_bbpalloc_memr 
*map_get_memr(struct uk_bbpalloc *b,
        return NULL;
 }
 
-static inline int allocated_in_map(struct uk_bbpalloc *b,
-                                  unsigned long page_num)
+static inline unsigned long allocated_in_map(struct uk_bbpalloc *b,
+                                  unsigned long page_va)
 {
-       struct uk_bbpalloc_memr *memr = map_get_memr(b, page_num);
+       struct uk_bbpalloc_memr *memr = map_get_memr(b, page_va);
+       unsigned long page_idx;
+       unsigned long bm_idx, bm_off;
 
        /* treat pages outside of region as allocated */
        if (!memr)
                return 1;
 
-       page_num -= memr->first_page;
-       return ((memr)->mm_alloc_bitmap[(page_num) / PAGES_PER_MAPWORD]
-               & (1UL << ((page_num) & (PAGES_PER_MAPWORD - 1))));
+       page_idx = (page_va - memr->first_page) / __PAGE_SIZE;
+       bm_idx = page_idx / PAGES_PER_MAPWORD;
+       bm_off = page_idx & (PAGES_PER_MAPWORD - 1);
+
+       return ((memr)->mm_alloc_bitmap[bm_idx] & (1UL << bm_off));
 }
 
 static void map_alloc(struct uk_bbpalloc *b, uintptr_t first_page,
                      unsigned long nr_pages)
 {
        struct uk_bbpalloc_memr *memr;
+       unsigned long first_page_idx, end_page_idx;
        unsigned long start_off, end_off, curr_idx, end_idx;
 
        /*
@@ -144,14 +151,16 @@ static void map_alloc(struct uk_bbpalloc *b, uintptr_t 
first_page,
         */
        memr = map_get_memr(b, first_page);
        UK_ASSERT(memr != NULL);
-       UK_ASSERT((first_page + nr_pages)
-                 <= (memr->first_page + memr->nr_pages));
+       UK_ASSERT((first_page + nr_pages * __PAGE_SIZE)
+                 <= (memr->first_page + memr->nr_pages * __PAGE_SIZE));
 
        first_page -= memr->first_page;
-       curr_idx = first_page / PAGES_PER_MAPWORD;
-       start_off = first_page & (PAGES_PER_MAPWORD - 1);
-       end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
-       end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD - 1);
+       first_page_idx = first_page / __PAGE_SIZE;
+       curr_idx = first_page_idx / PAGES_PER_MAPWORD;
+       start_off = first_page_idx & (PAGES_PER_MAPWORD - 1);
+       end_page_idx = first_page_idx + nr_pages;
+       end_idx = end_page_idx / PAGES_PER_MAPWORD;
+       end_off = end_page_idx & (PAGES_PER_MAPWORD - 1);
 
        if (curr_idx == end_idx) {
                memr->mm_alloc_bitmap[curr_idx] |=
@@ -170,6 +179,7 @@ static void map_free(struct uk_bbpalloc *b, uintptr_t 
first_page,
                     unsigned long nr_pages)
 {
        struct uk_bbpalloc_memr *memr;
+       unsigned long first_page_idx, end_page_idx;
        unsigned long start_off, end_off, curr_idx, end_idx;
 
        /*
@@ -179,14 +189,16 @@ static void map_free(struct uk_bbpalloc *b, uintptr_t 
first_page,
         */
        memr = map_get_memr(b, first_page);
        UK_ASSERT(memr != NULL);
-       UK_ASSERT((first_page + nr_pages)
-                 <= (memr->first_page + memr->nr_pages));
+       UK_ASSERT((first_page + nr_pages * __PAGE_SIZE)
+                 <= (memr->first_page + memr->nr_pages * __PAGE_SIZE));
 
        first_page -= memr->first_page;
-       curr_idx = first_page / PAGES_PER_MAPWORD;
-       start_off = first_page & (PAGES_PER_MAPWORD - 1);
-       end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
-       end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD - 1);
+       first_page_idx = first_page / __PAGE_SIZE;
+       curr_idx = first_page_idx / PAGES_PER_MAPWORD;
+       start_off = first_page_idx & (PAGES_PER_MAPWORD - 1);
+       end_page_idx = first_page_idx + nr_pages;
+       end_idx = end_page_idx / PAGES_PER_MAPWORD;
+       end_off = end_page_idx & (PAGES_PER_MAPWORD - 1);
 
        if (curr_idx == end_idx) {
                memr->mm_alloc_bitmap[curr_idx] &=
@@ -345,10 +357,25 @@ static int bbuddy_addmem(struct uk_alloc *a, void *base, 
size_t len)
        min = round_pgup((uintptr_t)base);
        max = round_pgdown((uintptr_t)base + (uintptr_t)len);
        range = max - min;
-       memr_size =
-           round_pgup(sizeof(*memr) + DIV_ROUND_UP(range >> __PAGE_SHIFT, 8));
 
        memr = (struct uk_bbpalloc_memr *)min;
+
+       /*
+        * The number of pages is found by solving the inequality:
+        *
+        * sizeof(*memr) + bitmap_size + page_num * page_size <= range
+        *
+        * where: bitmap_size = page_num / BITS_PER_BYTE
+        *
+        */
+       memr->nr_pages =
+               BITS_PER_BYTE * (range - sizeof(*memr)) /
+               (BITS_PER_BYTE * __PAGE_SIZE + 1);
+       memr->mm_alloc_bitmap = (unsigned long *) (min + sizeof(*memr));
+       memr->mm_alloc_bitmap_size  =
+               round_pgup(memr->nr_pages / BITS_PER_BYTE) - sizeof(*memr);
+       memr_size = sizeof(*memr) + memr->mm_alloc_bitmap_size;
+
        min += memr_size;
        range -= memr_size;
        if (max < min) {
@@ -362,10 +389,14 @@ static int bbuddy_addmem(struct uk_alloc *a, void *base, 
size_t len)
         * Initialize region's bitmap
         */
        memr->first_page = min;
-       memr->nr_pages = max - min;
        /* add to list */
        memr->next = b->memr_head;
        b->memr_head = memr;
+
+       /* All allocated by default. */
+       memset(memr->mm_alloc_bitmap, (unsigned char) ~0,
+                       memr->mm_alloc_bitmap_size);
+
        /* free up the memory we've been given to play with */
        map_free(b, min, (unsigned long)(range >> __PAGE_SHIFT));
 
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.