|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH v5 04/15] include/uk: import bitops.h and bitmap.h from FreeBSD
From: Costin Lupu <costin.lupu@xxxxxxxxx>
Copied as it is from official FreeBSD git mirror
(git://github.com/freebsd/freebsd.git)
Commit <ef0cebe7e1319abfe840cc789932f5cc0c54d175>
Original file locations:
sys/compat/linuxkpi/common/include/linux/bitops.h
sys/compat/linuxkpi/common/include/linux/bitmap.h
Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
Reviewed-by: Yuri Volchkov <yuri.volchkov@xxxxxxxxx>
---
include/uk/bitmap.h | 290 +++++++++++++++++++++++++++++++
include/uk/bitops.h | 406 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 696 insertions(+)
create mode 100644 include/uk/bitmap.h
create mode 100644 include/uk/bitops.h
diff --git a/include/uk/bitmap.h b/include/uk/bitmap.h
new file mode 100644
index 0000000..c323e42
--- /dev/null
+++ b/include/uk/bitmap.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _LINUX_BITMAP_H_
+#define _LINUX_BITMAP_H_
+
+#include <linux/bitops.h>
+
+static inline void
+bitmap_zero(unsigned long *addr, const unsigned int size)
+{
+ memset(addr, 0, BITS_TO_LONGS(size) * sizeof(long));
+}
+
+static inline void
+bitmap_fill(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+
+ memset(addr, 0xff, BIT_WORD(size) * sizeof(long));
+
+ if (tail)
+ addr[BIT_WORD(size)] = BITMAP_LAST_WORD_MASK(tail);
+}
+
+static inline int
+bitmap_full(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int i;
+
+ for (i = 0; i != end; i++) {
+ if (addr[i] != ~0UL)
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if ((addr[end] & mask) != mask)
+ return (0);
+ }
+ return (1);
+}
+
+static inline int
+bitmap_empty(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int i;
+
+ for (i = 0; i != end; i++) {
+ if (addr[i] != 0)
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if ((addr[end] & mask) != 0)
+ return (0);
+ }
+ return (1);
+}
+
+static inline void
+bitmap_set(unsigned long *map, unsigned int start, int nr)
+{
+ const unsigned int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ map += BIT_WORD(start);
+
+ while (nr - bits_to_set >= 0) {
+ *map |= mask_to_set;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ map++;
+ }
+
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *map |= mask_to_set;
+ }
+}
+
+static inline void
+bitmap_clear(unsigned long *map, unsigned int start, int nr)
+{
+ const unsigned int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ map += BIT_WORD(start);
+
+ while (nr - bits_to_clear >= 0) {
+ *map &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ map++;
+ }
+
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *map &= ~mask_to_clear;
+ }
+}
+
+static inline unsigned int
+bitmap_find_next_zero_area_off(const unsigned long *map,
+ const unsigned int size, unsigned int start,
+ unsigned int nr, unsigned int align_mask,
+ unsigned int align_offset)
+{
+ unsigned int index;
+ unsigned int end;
+ unsigned int i;
+
+retry:
+ index = find_next_zero_bit(map, size, start);
+
+ index = (((index + align_offset) + align_mask) & ~align_mask) -
align_offset;
+
+ end = index + nr;
+ if (end > size)
+ return (end);
+
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto retry;
+ }
+ return (index);
+}
+
+static inline unsigned int
+bitmap_find_next_zero_area(const unsigned long *map,
+ const unsigned int size, unsigned int start,
+ unsigned int nr, unsigned int align_mask)
+{
+ return (bitmap_find_next_zero_area_off(map, size,
+ start, nr, align_mask, 0));
+}
+
+static inline int
+bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+{
+ int pos;
+ int end;
+
+ for (pos = 0; (end = pos + (1 << order)) <= bits; pos = end) {
+ if (!linux_reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ continue;
+ linux_reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return (pos);
+ }
+ return (-ENOMEM);
+}
+
+static inline int
+bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+{
+ if (!linux_reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ return (-EBUSY);
+ linux_reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return (0);
+}
+
+static inline void
+bitmap_release_region(unsigned long *bitmap, int pos, int order)
+{
+ linux_reg_op(bitmap, pos, order, REG_OP_RELEASE);
+}
+
+static inline unsigned int
+bitmap_weight(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int retval = 0;
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ retval += hweight_long(addr[i]);
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ retval += hweight_long(addr[end] & mask);
+ }
+ return (retval);
+}
+
+static inline int
+bitmap_equal(const unsigned long *pa,
+ const unsigned long *pb, unsigned size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int i;
+
+ for (i = 0; i != end; i++) {
+ if (pa[i] != pb[i])
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if ((pa[end] ^ pb[end]) & mask)
+ return (0);
+ }
+ return (1);
+}
+
+static inline void
+bitmap_complement(unsigned long *dst, const unsigned long *src,
+ const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = ~src[i];
+}
+
+static inline void
+bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] | src2[i];
+}
+
+static inline void
+bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] & src2[i];
+}
+
+static inline void
+bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] ^ src2[i];
+}
+
+#endif /* _LINUX_BITMAP_H_ */
diff --git a/include/uk/bitops.h b/include/uk/bitops.h
new file mode 100644
index 0000000..71825cb
--- /dev/null
+++ b/include/uk/bitops.h
@@ -0,0 +1,406 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _LINUX_BITOPS_H_
+#define _LINUX_BITOPS_H_
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#include <sys/libkern.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#ifdef __LP64__
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif
+
+#define BITS_PER_LONG_LONG 64
+
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) %
BITS_PER_LONG))
+#define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
+#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
+#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1))
& ((~0UL) << (l)))
+#define GENMASK_ULL(h, l) (((~0ULL) >> (BITS_PER_LONG_LONG - (h)
- 1)) & ((~0ULL) << (l)))
+#define BITS_PER_BYTE 8
+
+#define hweight8(x) bitcount((uint8_t)(x))
+#define hweight16(x) bitcount16(x)
+#define hweight32(x) bitcount32(x)
+#define hweight64(x) bitcount64(x)
+#define hweight_long(x) bitcountl(x)
+
+static inline int
+__ffs(int mask)
+{
+ return (ffs(mask) - 1);
+}
+
+static inline int
+__fls(int mask)
+{
+ return (fls(mask) - 1);
+}
+
+static inline int
+__ffsl(long mask)
+{
+ return (ffsl(mask) - 1);
+}
+
+static inline int
+__flsl(long mask)
+{
+ return (flsl(mask) - 1);
+}
+
+static inline int
+fls64(uint64_t mask)
+{
+ return (flsll(mask));
+}
+
+static inline uint32_t
+ror32(uint32_t word, unsigned int shift)
+{
+ return ((word >> shift) | (word << (32 - shift)));
+}
+
+#define ffz(mask) __ffs(~(mask))
+
+static inline int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ long mask;
+ int bit;
+
+ for (bit = 0; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (*addr == 0)
+ continue;
+ return (bit + __ffsl(*addr));
+ }
+ if (size) {
+ mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+static inline unsigned long
+find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ long mask;
+ int bit;
+
+ for (bit = 0; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (~(*addr) == 0)
+ continue;
+ return (bit + __ffsl(~(*addr)));
+ }
+ if (size) {
+ mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+static inline unsigned long
+find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ long mask;
+ int offs;
+ int bit;
+ int pos;
+
+ pos = size / BITS_PER_LONG;
+ offs = size % BITS_PER_LONG;
+ bit = BITS_PER_LONG * pos;
+ addr += pos;
+ if (offs) {
+ mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
+ if (mask)
+ return (bit + __flsl(mask));
+ }
+ while (pos--) {
+ addr--;
+ bit -= BITS_PER_LONG;
+ if (*addr)
+ return (bit + __flsl(*addr));
+ }
+ return (size);
+}
+
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long
offset)
+{
+ long mask;
+ int offs;
+ int bit;
+ int pos;
+
+ if (offset >= size)
+ return (size);
+ pos = offset / BITS_PER_LONG;
+ offs = offset % BITS_PER_LONG;
+ bit = BITS_PER_LONG * pos;
+ addr += pos;
+ if (offs) {
+ mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
+ if (mask)
+ return (bit + __ffsl(mask));
+ if (size - bit <= BITS_PER_LONG)
+ return (size);
+ bit += BITS_PER_LONG;
+ addr++;
+ }
+ for (size -= bit; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (*addr == 0)
+ continue;
+ return (bit + __ffsl(*addr));
+ }
+ if (size) {
+ mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+static inline unsigned long
+find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ long mask;
+ int offs;
+ int bit;
+ int pos;
+
+ if (offset >= size)
+ return (size);
+ pos = offset / BITS_PER_LONG;
+ offs = offset % BITS_PER_LONG;
+ bit = BITS_PER_LONG * pos;
+ addr += pos;
+ if (offs) {
+ mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
+ if (mask)
+ return (bit + __ffsl(mask));
+ if (size - bit <= BITS_PER_LONG)
+ return (size);
+ bit += BITS_PER_LONG;
+ addr++;
+ }
+ for (size -= bit; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (~(*addr) == 0)
+ continue;
+ return (bit + __ffsl(~(*addr)));
+ }
+ if (size) {
+ mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+#define __set_bit(i, a)
\
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define set_bit(i, a)
\
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define __clear_bit(i, a)
\
+ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)],
BIT_MASK(i))
+
+#define clear_bit(i, a)
\
+ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)],
BIT_MASK(i))
+
+#define test_bit(i, a)
\
+ !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
+
+static inline int
+test_and_clear_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+ do {
+ val = *var;
+ } while (atomic_cmpset_long(var, val, val & ~bit) == 0);
+
+ return !!(val & bit);
+}
+
+static inline int
+__test_and_clear_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+
+ val = *var;
+ *var &= ~bit;
+
+ return !!(val & bit);
+}
+
+static inline int
+test_and_set_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+ do {
+ val = *var;
+ } while (atomic_cmpset_long(var, val, val | bit) == 0);
+
+ return !!(val & bit);
+}
+
+static inline int
+__test_and_set_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+
+ val = *var;
+ *var |= bit;
+
+ return !!(val & bit);
+}
+
+enum {
+ REG_OP_ISFREE,
+ REG_OP_ALLOC,
+ REG_OP_RELEASE,
+};
+
+static inline int
+linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+{
+ int nbits_reg;
+ int index;
+ int offset;
+ int nlongs_reg;
+ int nbitsinlong;
+ unsigned long mask;
+ int i;
+ int ret = 0;
+
+ nbits_reg = 1 << order;
+ index = pos / BITS_PER_LONG;
+ offset = pos - (index * BITS_PER_LONG);
+ nlongs_reg = BITS_TO_LONGS(nbits_reg);
+ nbitsinlong = min(nbits_reg, BITS_PER_LONG);
+
+ mask = (1UL << (nbitsinlong - 1));
+ mask += mask - 1;
+ mask <<= offset;
+
+ switch (reg_op) {
+ case REG_OP_ISFREE:
+ for (i = 0; i < nlongs_reg; i++) {
+ if (bitmap[index + i] & mask)
+ goto done;
+ }
+ ret = 1;
+ break;
+
+ case REG_OP_ALLOC:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] |= mask;
+ break;
+
+ case REG_OP_RELEASE:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] &= ~mask;
+ break;
+ }
+done:
+ return ret;
+}
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+static inline uint64_t
+sign_extend64(uint64_t value, int index)
+{
+ uint8_t shift = 63 - index;
+
+ return ((int64_t)(value << shift) >> shift);
+}
+
+#endif /* _LINUX_BITOPS_H_ */
--
2.18.0
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |