|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH 5/6] include/uk: adapt bitops.h for Unikraft
1) Add SPDX license identifier
2) Include Unikraft headers
3) Use Unikraft types
4) Use Unikraft "find-first" functions
5) Use Unikraft atomic bit operations
6) Disable fls64 function
7) Fix checkpatch issues
Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
---
include/uk/bitops.h | 240 +++++++++++++++++++++++-----------------------------
1 file changed, 107 insertions(+), 133 deletions(-)
diff --git a/include/uk/bitops.h b/include/uk/bitops.h
index 71825cb..21d8bd0 100644
--- a/include/uk/bitops.h
+++ b/include/uk/bitops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
/*-
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
@@ -32,10 +33,11 @@
#define _LINUX_BITOPS_H_
#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/systm.h>
-#include <sys/errno.h>
-#include <sys/libkern.h>
+#include <errno.h>
+#include <uk/essentials.h>
+#include <uk/bitcount.h>
+#include <uk/arch/lcpu.h>
+#include <uk/arch/atomic.h>
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
@@ -47,67 +49,47 @@
#define BITS_PER_LONG_LONG 64
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) %
BITS_PER_LONG))
-#define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
-#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
-#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1))
& ((~0UL) << (l)))
-#define GENMASK_ULL(h, l) (((~0ULL) >> (BITS_PER_LONG_LONG - (h)
- 1)) & ((~0ULL) << (l)))
-#define BITS_PER_BYTE 8
-
-#define hweight8(x) bitcount((uint8_t)(x))
-#define hweight16(x) bitcount16(x)
-#define hweight32(x) bitcount32(x)
-#define hweight64(x) bitcount64(x)
-#define hweight_long(x) bitcountl(x)
-
-static inline int
-__ffs(int mask)
-{
- return (ffs(mask) - 1);
-}
-
-static inline int
-__fls(int mask)
-{
- return (fls(mask) - 1);
-}
-
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) %
BITS_PER_LONG))
+#define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
+#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
+#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG -
1)))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define GENMASK(h, l) \
+ (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
+#define BITS_PER_BYTE 8
+
+#define hweight8(x) __bitcount((__u8)(x))
+#define hweight16(x) __bitcount16(x)
+#define hweight32(x) __bitcount32(x)
+#define hweight64(x) __bitcount64(x)
+#define hweight_long(x) __bitcountl(x)
+
+#if 0 /* TODO revisit when needed */
static inline int
-__ffsl(long mask)
+fls64(__u64 mask)
{
- return (ffsl(mask) - 1);
-}
-
-static inline int
-__flsl(long mask)
-{
- return (flsl(mask) - 1);
-}
-
-static inline int
-fls64(uint64_t mask)
-{
- return (flsll(mask));
+ return flsll(mask);
}
+#endif
-static inline uint32_t
-ror32(uint32_t word, unsigned int shift)
+static inline __u32
+ror32(__u32 word, unsigned int shift)
{
return ((word >> shift) | (word << (32 - shift)));
}
-#define ffz(mask) __ffs(~(mask))
+#define ffz(mask) ukarch_ffs(~(mask))
static inline int get_count_order(unsigned int count)
{
- int order;
+ int order;
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
+ order = ukarch_fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
}
static inline unsigned long
@@ -117,15 +99,15 @@ find_first_bit(const unsigned long *addr, unsigned long
size)
int bit;
for (bit = 0; size >= BITS_PER_LONG;
- size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
if (*addr == 0)
continue;
- return (bit + __ffsl(*addr));
+ return (bit + ukarch_ffsl(*addr));
}
if (size) {
mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
if (mask)
- bit += __ffsl(mask);
+ bit += ukarch_ffsl(mask);
else
bit += size;
}
@@ -139,15 +121,15 @@ find_first_zero_bit(const unsigned long *addr, unsigned
long size)
int bit;
for (bit = 0; size >= BITS_PER_LONG;
- size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
if (~(*addr) == 0)
continue;
- return (bit + __ffsl(~(*addr)));
+ return (bit + ukarch_ffsl(~(*addr)));
}
if (size) {
mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
if (mask)
- bit += __ffsl(mask);
+ bit += ukarch_ffsl(mask);
else
bit += size;
}
@@ -169,19 +151,20 @@ find_last_bit(const unsigned long *addr, unsigned long
size)
if (offs) {
mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
if (mask)
- return (bit + __flsl(mask));
+ return (bit + ukarch_flsl(mask));
}
while (pos--) {
addr--;
bit -= BITS_PER_LONG;
if (*addr)
- return (bit + __flsl(*addr));
+ return (bit + ukarch_flsl(*addr));
}
return (size);
}
static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size, unsigned long
offset)
+find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
{
long mask;
int offs;
@@ -197,22 +180,22 @@ find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offse
if (offs) {
mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
if (mask)
- return (bit + __ffsl(mask));
+ return (bit + ukarch_ffsl(mask));
if (size - bit <= BITS_PER_LONG)
return (size);
bit += BITS_PER_LONG;
addr++;
}
for (size -= bit; size >= BITS_PER_LONG;
- size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
if (*addr == 0)
continue;
- return (bit + __ffsl(*addr));
+ return (bit + ukarch_ffsl(*addr));
}
if (size) {
mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
if (mask)
- bit += __ffsl(mask);
+ bit += ukarch_ffsl(mask);
else
bit += size;
}
@@ -221,7 +204,7 @@ find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offse
static inline unsigned long
find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+ unsigned long offset)
{
long mask;
int offs;
@@ -237,42 +220,33 @@ find_next_zero_bit(const unsigned long *addr, unsigned
long size,
if (offs) {
mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
if (mask)
- return (bit + __ffsl(mask));
+ return (bit + ukarch_ffsl(mask));
if (size - bit <= BITS_PER_LONG)
return (size);
bit += BITS_PER_LONG;
addr++;
}
for (size -= bit; size >= BITS_PER_LONG;
- size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
if (~(*addr) == 0)
continue;
- return (bit + __ffsl(~(*addr)));
+ return (bit + ukarch_ffsl(~(*addr)));
}
if (size) {
mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
if (mask)
- bit += __ffsl(mask);
+ bit += ukarch_ffsl(mask);
else
bit += size;
}
return (bit);
}
-#define __set_bit(i, a)
\
- atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
-
-#define set_bit(i, a)
\
- atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
-
-#define __clear_bit(i, a)
\
- atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)],
BIT_MASK(i))
-
-#define clear_bit(i, a)
\
- atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)],
BIT_MASK(i))
-
-#define test_bit(i, a)
\
- !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
+#define __set_bit(i, a) ukarch_set_bit(i, a)
+#define set_bit(i, a) ukarch_set_bit(i, a)
+#define __clear_bit(i, a) ukarch_clr_bit(i, a)
+#define clear_bit(i, a) ukarch_clr_bit(i, a)
+#define test_bit(i, a) ukarch_test_bit(i, a)
static inline int
test_and_clear_bit(long bit, volatile unsigned long *var)
@@ -284,7 +258,7 @@ test_and_clear_bit(long bit, volatile unsigned long *var)
bit = (1UL << bit);
do {
val = *var;
- } while (atomic_cmpset_long(var, val, val & ~bit) == 0);
+ } while (ukarch_compare_exchange_sync((unsigned long *) var, val, val &
~bit) == 0);
return !!(val & bit);
}
@@ -314,7 +288,7 @@ test_and_set_bit(long bit, volatile unsigned long *var)
bit = (1UL << bit);
do {
val = *var;
- } while (atomic_cmpset_long(var, val, val | bit) == 0);
+ } while (ukarch_compare_exchange_sync((unsigned long *) var, val, val |
bit) == 0);
return !!(val & bit);
}
@@ -335,54 +309,54 @@ __test_and_set_bit(long bit, volatile unsigned long *var)
}
enum {
- REG_OP_ISFREE,
- REG_OP_ALLOC,
- REG_OP_RELEASE,
+ REG_OP_ISFREE,
+ REG_OP_ALLOC,
+ REG_OP_RELEASE,
};
static inline int
linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
{
- int nbits_reg;
- int index;
- int offset;
- int nlongs_reg;
- int nbitsinlong;
- unsigned long mask;
- int i;
- int ret = 0;
-
- nbits_reg = 1 << order;
- index = pos / BITS_PER_LONG;
- offset = pos - (index * BITS_PER_LONG);
- nlongs_reg = BITS_TO_LONGS(nbits_reg);
- nbitsinlong = min(nbits_reg, BITS_PER_LONG);
-
- mask = (1UL << (nbitsinlong - 1));
- mask += mask - 1;
- mask <<= offset;
-
- switch (reg_op) {
- case REG_OP_ISFREE:
- for (i = 0; i < nlongs_reg; i++) {
- if (bitmap[index + i] & mask)
- goto done;
- }
- ret = 1;
- break;
-
- case REG_OP_ALLOC:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] |= mask;
- break;
-
- case REG_OP_RELEASE:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] &= ~mask;
- break;
- }
+ int nbits_reg;
+ int index;
+ int offset;
+ int nlongs_reg;
+ int nbitsinlong;
+ unsigned long mask;
+ int i;
+ int ret = 0;
+
+ nbits_reg = 1 << order;
+ index = pos / BITS_PER_LONG;
+ offset = pos - (index * BITS_PER_LONG);
+ nlongs_reg = BITS_TO_LONGS(nbits_reg);
+ nbitsinlong = MIN(nbits_reg, BITS_PER_LONG);
+
+ mask = (1UL << (nbitsinlong - 1));
+ mask += mask - 1;
+ mask <<= offset;
+
+ switch (reg_op) {
+ case REG_OP_ISFREE:
+ for (i = 0; i < nlongs_reg; i++) {
+ if (bitmap[index + i] & mask)
+ goto done;
+ }
+ ret = 1;
+ break;
+
+ case REG_OP_ALLOC:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] |= mask;
+ break;
+
+ case REG_OP_RELEASE:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] &= ~mask;
+ break;
+ }
done:
- return ret;
+ return ret;
}
#define for_each_set_bit(bit, addr, size) \
@@ -395,12 +369,12 @@ done:
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-static inline uint64_t
-sign_extend64(uint64_t value, int index)
+static inline __u64
+sign_extend64(__u64 value, int index)
{
- uint8_t shift = 63 - index;
+ __u8 shift = 63 - index;
- return ((int64_t)(value << shift) >> shift);
+ return ((__s64)(value << shift) >> shift);
}
#endif /* _LINUX_BITOPS_H_ */
--
2.11.0
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |