[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v3 12/17] include/uk: prefix functions in bitmap.h



This patch adds prefix to majority of the macros/functions. For the
sake of tractability, patch touches only functions either not
used (yet) or used no more then a couple of times

Signed-off-by: Yuri Volchkov <yuri.volchkov@xxxxxxxxx>
---
 include/uk/bitmap.h |  4 ++--
 include/uk/bitops.h | 58 +++++++++++++++++++++++----------------------
 2 files changed, 32 insertions(+), 30 deletions(-)

diff --git a/include/uk/bitmap.h b/include/uk/bitmap.h
index f3c54f7..bb54304 100644
--- a/include/uk/bitmap.h
+++ b/include/uk/bitmap.h
@@ -148,7 +148,7 @@ bitmap_find_next_zero_area_off(const unsigned long *map,
        unsigned int i;
 
 retry:
-       index = find_next_zero_bit(map, size, start);
+       index = uk_find_next_zero_bit(map, size, start);
 
        index = (((index + align_offset) + align_mask) & ~align_mask) -
                align_offset;
@@ -157,7 +157,7 @@ retry:
        if (end > size)
                return (end);
 
-       i = find_next_bit(map, end, index);
+       i = uk_find_next_bit(map, end, index);
        if (i < end) {
                start = i + 1;
                goto retry;
diff --git a/include/uk/bitops.h b/include/uk/bitops.h
index 6e9f7be..82399e1 100644
--- a/include/uk/bitops.h
+++ b/include/uk/bitops.h
@@ -39,8 +39,9 @@
 #include <uk/arch/lcpu.h>
 #include <uk/arch/atomic.h>
 
-#define        BIT(nr)                 (1UL << (nr))
-#define        BIT_ULL(nr)             (1ULL << (nr))
+#define        UK_BIT(nr)                      (1UL << (nr))
+#define        UK_BIT_ULL(nr)          (1ULL << (nr))
+
 #ifdef __LP64__
 #define        UK_BITS_PER_LONG                64
 #else
@@ -52,11 +53,12 @@
 #define        BITMAP_FIRST_WORD_MASK(start)  (~0UL << ((start) % 
UK_BITS_PER_LONG))
 #define        BITMAP_LAST_WORD_MASK(n)       (~0UL >> (UK_BITS_PER_LONG - 
(n)))
 #define        BITS_TO_LONGS(n)               howmany((n), UK_BITS_PER_LONG)
-#define        BIT_MASK(nr)                   (1UL << ((nr) & 
(UK_BITS_PER_LONG - 1)))
+#define        UK_BIT_MASK(nr) \
+       (1UL << ((nr) & (UK_BITS_PER_LONG - 1)))
 #define BIT_WORD(nr)                   ((nr) / UK_BITS_PER_LONG)
-#define        GENMASK(h, l) \
+#define        UK_GENMASK(h, l) \
        (((~0UL) >> (UK_BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
-#define        GENMASK_ULL(h, l) \
+#define        UK_GENMASK_ULL(h, l) \
        (((~0ULL) >> (UK_BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
 #define BITS_PER_BYTE  8
 
@@ -75,12 +77,12 @@ fls64(__u64 mask)
 #endif
 
 static inline __u32
-ror32(__u32 word, unsigned int shift)
+uk_ror32(__u32 word, unsigned int shift)
 {
        return ((word >> shift) | (word << (32 - shift)));
 }
 
-static inline int get_count_order(unsigned int count)
+static inline int uk_get_count_order(unsigned int count)
 {
        int order;
 
@@ -91,7 +93,7 @@ static inline int get_count_order(unsigned int count)
 }
 
 static inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
+uk_find_first_bit(const unsigned long *addr, unsigned long size)
 {
        long mask;
        int bit;
@@ -113,7 +115,7 @@ find_first_bit(const unsigned long *addr, unsigned long 
size)
 }
 
 static inline unsigned long
-find_first_zero_bit(const unsigned long *addr, unsigned long size)
+uk_find_first_zero_bit(const unsigned long *addr, unsigned long size)
 {
        long mask;
        int bit;
@@ -135,7 +137,7 @@ find_first_zero_bit(const unsigned long *addr, unsigned 
long size)
 }
 
 static inline unsigned long
-find_last_bit(const unsigned long *addr, unsigned long size)
+uk_find_last_bit(const unsigned long *addr, unsigned long size)
 {
        long mask;
        int offs;
@@ -161,7 +163,7 @@ find_last_bit(const unsigned long *addr, unsigned long size)
 }
 
 static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size,
+uk_find_next_bit(const unsigned long *addr, unsigned long size,
        unsigned long offset)
 {
        long mask;
@@ -201,7 +203,7 @@ find_next_bit(const unsigned long *addr, unsigned long size,
 }
 
 static inline unsigned long
-find_next_zero_bit(const unsigned long *addr, unsigned long size,
+uk_find_next_zero_bit(const unsigned long *addr, unsigned long size,
        unsigned long offset)
 {
        long mask;
@@ -240,32 +242,32 @@ find_next_zero_bit(const unsigned long *addr, unsigned 
long size,
        return (bit);
 }
 
-#define __set_bit(i, a)        ukarch_set_bit(i, a)
-#define set_bit(i, a)          ukarch_set_bit_sync(i, a)
-#define __clear_bit(i, a)      ukarch_clr_bit(i, a)
-#define clear_bit(i, a)        ukarch_clr_bit_sync(i, a)
-#define test_bit(i, a)         ukarch_test_bit(i, a)
+#define __uk_set_bit(i, a)        ukarch_set_bit(i, a)
+#define uk_set_bit(i, a)          ukarch_set_bit_sync(i, a)
+#define __uk_clear_bit(i, a)      ukarch_clr_bit(i, a)
+#define uk_clear_bit(i, a)        ukarch_clr_bit_sync(i, a)
+#define uk_test_bit(i, a)         ukarch_test_bit(i, a)
 
 static inline int
-test_and_clear_bit(long bit, volatile unsigned long *var)
+uk_test_and_clear_bit(long bit, volatile unsigned long *var)
 {
        return ukarch_test_and_clr_bit_sync(bit, (volatile void *) var);
 }
 
 static inline int
-__test_and_clear_bit(long bit, volatile unsigned long *var)
+__uk_test_and_clear_bit(long bit, volatile unsigned long *var)
 {
        return ukarch_test_and_clr_bit(bit, (volatile void *) var);
 }
 
 static inline int
-test_and_set_bit(long bit, volatile unsigned long *var)
+uk_test_and_set_bit(long bit, volatile unsigned long *var)
 {
        return ukarch_test_and_set_bit_sync(bit, (volatile void *) var);
 }
 
 static inline int
-__test_and_set_bit(long bit, volatile unsigned long *var)
+__uk_test_and_set_bit(long bit, volatile unsigned long *var)
 {
        return ukarch_test_and_set_bit(bit, (volatile void *) var);
 }
@@ -321,18 +323,18 @@ done:
        return ret;
 }
 
-#define for_each_set_bit(bit, addr, size) \
-       for ((bit) = find_first_bit((addr), (size));            \
+#define uk_for_each_set_bit(bit, addr, size) \
+       for ((bit) = uk_find_first_bit((addr), (size));         \
             (bit) < (size);                                    \
-            (bit) = find_next_bit((addr), (size), (bit) + 1))
+            (bit) = uk_find_next_bit((addr), (size), (bit) + 1))
 
-#define        for_each_clear_bit(bit, addr, size) \
-       for ((bit) = find_first_zero_bit((addr), (size));               \
+#define        uk_for_each_clear_bit(bit, addr, size) \
+       for ((bit) = uk_find_first_zero_bit((addr), (size));            \
             (bit) < (size);                                            \
-            (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+            (bit) = uk_find_next_zero_bit((addr), (size), (bit) + 1))
 
 static inline __u64
-sign_extend64(__u64 value, int index)
+uk_sign_extend64(__u64 value, int index)
 {
        __u8 shift = 63 - index;
 
-- 
2.18.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.