[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2 5/9] uk/include: bitmap, bitopts - fix spaces issues



From: Costin Lupu <costin.lupu@xxxxxxxxx>

This fixexes some of the checkpatch issues. Namely tabs instead of
spaces for indentation, and over 80 characters lines

Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
---
 include/uk/bitmap.h |  25 ++++----
 include/uk/bitops.h | 135 ++++++++++++++++++++++----------------------
 2 files changed, 82 insertions(+), 78 deletions(-)

diff --git a/include/uk/bitmap.h b/include/uk/bitmap.h
index c323e42..5f3bd82 100644
--- a/include/uk/bitmap.h
+++ b/include/uk/bitmap.h
@@ -138,9 +138,9 @@ bitmap_clear(unsigned long *map, unsigned int start, int nr)
 
 static inline unsigned int
 bitmap_find_next_zero_area_off(const unsigned long *map,
-    const unsigned int size, unsigned int start,
-    unsigned int nr, unsigned int align_mask,
-    unsigned int align_offset)
+       const unsigned int size, unsigned int start,
+       unsigned int nr, unsigned int align_mask,
+       unsigned int align_offset)
 {
        unsigned int index;
        unsigned int end;
@@ -149,7 +149,8 @@ bitmap_find_next_zero_area_off(const unsigned long *map,
 retry:
        index = find_next_zero_bit(map, size, start);
 
-       index = (((index + align_offset) + align_mask) & ~align_mask) - 
align_offset;
+       index = (((index + align_offset) + align_mask) & ~align_mask) -
+               align_offset;
 
        end = index + nr;
        if (end > size)
@@ -165,11 +166,11 @@ retry:
 
 static inline unsigned int
 bitmap_find_next_zero_area(const unsigned long *map,
-    const unsigned int size, unsigned int start,
-    unsigned int nr, unsigned int align_mask)
+       const unsigned int size, unsigned int start,
+       unsigned int nr, unsigned int align_mask)
 {
        return (bitmap_find_next_zero_area_off(map, size,
-           start, nr, align_mask, 0));
+               start, nr, align_mask, 0));
 }
 
 static inline int
@@ -223,7 +224,7 @@ bitmap_weight(unsigned long *addr, const unsigned int size)
 
 static inline int
 bitmap_equal(const unsigned long *pa,
-    const unsigned long *pb, unsigned size)
+       const unsigned long *pb, unsigned int size)
 {
        const unsigned int end = BIT_WORD(size);
        const unsigned int tail = size & (BITS_PER_LONG - 1);
@@ -245,7 +246,7 @@ bitmap_equal(const unsigned long *pa,
 
 static inline void
 bitmap_complement(unsigned long *dst, const unsigned long *src,
-    const unsigned int size)
+       const unsigned int size)
 {
        const unsigned int end = BITS_TO_LONGS(size);
        unsigned int i;
@@ -256,7 +257,7 @@ bitmap_complement(unsigned long *dst, const unsigned long 
*src,
 
 static inline void
 bitmap_or(unsigned long *dst, const unsigned long *src1,
-    const unsigned long *src2, const unsigned int size)
+       const unsigned long *src2, const unsigned int size)
 {
        const unsigned int end = BITS_TO_LONGS(size);
        unsigned int i;
@@ -267,7 +268,7 @@ bitmap_or(unsigned long *dst, const unsigned long *src1,
 
 static inline void
 bitmap_and(unsigned long *dst, const unsigned long *src1,
-    const unsigned long *src2, const unsigned int size)
+       const unsigned long *src2, const unsigned int size)
 {
        const unsigned int end = BITS_TO_LONGS(size);
        unsigned int i;
@@ -278,7 +279,7 @@ bitmap_and(unsigned long *dst, const unsigned long *src1,
 
 static inline void
 bitmap_xor(unsigned long *dst, const unsigned long *src1,
-    const unsigned long *src2, const unsigned int size)
+       const unsigned long *src2, const unsigned int size)
 {
        const unsigned int end = BITS_TO_LONGS(size);
        unsigned int i;
diff --git a/include/uk/bitops.h b/include/uk/bitops.h
index 71825cb..01c069f 100644
--- a/include/uk/bitops.h
+++ b/include/uk/bitops.h
@@ -47,14 +47,16 @@
 
 #define        BITS_PER_LONG_LONG      64
 
-#define        BITMAP_FIRST_WORD_MASK(start)   (~0UL << ((start) % 
BITS_PER_LONG))
-#define        BITMAP_LAST_WORD_MASK(n)        (~0UL >> (BITS_PER_LONG - (n)))
-#define        BITS_TO_LONGS(n)        howmany((n), BITS_PER_LONG)
-#define        BIT_MASK(nr)            (1UL << ((nr) & (BITS_PER_LONG - 1)))
-#define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
-#define        GENMASK(h, l)           (((~0UL) >> (BITS_PER_LONG - (h) - 1)) 
& ((~0UL) << (l)))
-#define        GENMASK_ULL(h, l)       (((~0ULL) >> (BITS_PER_LONG_LONG - (h) 
- 1)) & ((~0ULL) << (l)))
-#define BITS_PER_BYTE          8
+#define        BITMAP_FIRST_WORD_MASK(start)  (~0UL << ((start) % 
BITS_PER_LONG))
+#define        BITMAP_LAST_WORD_MASK(n)       (~0UL >> (BITS_PER_LONG - (n)))
+#define        BITS_TO_LONGS(n)               howmany((n), BITS_PER_LONG)
+#define        BIT_MASK(nr)                   (1UL << ((nr) & (BITS_PER_LONG - 
1)))
+#define BIT_WORD(nr)                   ((nr) / BITS_PER_LONG)
+#define        GENMASK(h, l) \
+       (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
+#define        GENMASK_ULL(h, l) \
+       (((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
+#define BITS_PER_BYTE  8
 
 #define        hweight8(x)     bitcount((uint8_t)(x))
 #define        hweight16(x)    bitcount16(x)
@@ -102,12 +104,12 @@ ror32(uint32_t word, unsigned int shift)
 
 static inline int get_count_order(unsigned int count)
 {
-        int order;
+       int order;
 
-        order = fls(count) - 1;
-        if (count & (count - 1))
-                order++;
-        return order;
+       order = fls(count) - 1;
+       if (count & (count - 1))
+               order++;
+       return order;
 }
 
 static inline unsigned long
@@ -117,7 +119,7 @@ find_first_bit(const unsigned long *addr, unsigned long 
size)
        int bit;
 
        for (bit = 0; size >= BITS_PER_LONG;
-           size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+               size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
                if (*addr == 0)
                        continue;
                return (bit + __ffsl(*addr));
@@ -139,7 +141,7 @@ find_first_zero_bit(const unsigned long *addr, unsigned 
long size)
        int bit;
 
        for (bit = 0; size >= BITS_PER_LONG;
-           size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+               size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
                if (~(*addr) == 0)
                        continue;
                return (bit + __ffsl(~(*addr)));
@@ -181,7 +183,8 @@ find_last_bit(const unsigned long *addr, unsigned long size)
 }
 
 static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size, unsigned long 
offset)
+find_next_bit(const unsigned long *addr, unsigned long size,
+       unsigned long offset)
 {
        long mask;
        int offs;
@@ -204,7 +207,7 @@ find_next_bit(const unsigned long *addr, unsigned long 
size, unsigned long offse
                addr++;
        }
        for (size -= bit; size >= BITS_PER_LONG;
-           size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+               size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
                if (*addr == 0)
                        continue;
                return (bit + __ffsl(*addr));
@@ -221,7 +224,7 @@ find_next_bit(const unsigned long *addr, unsigned long 
size, unsigned long offse
 
 static inline unsigned long
 find_next_zero_bit(const unsigned long *addr, unsigned long size,
-    unsigned long offset)
+       unsigned long offset)
 {
        long mask;
        int offs;
@@ -244,7 +247,7 @@ find_next_zero_bit(const unsigned long *addr, unsigned long 
size,
                addr++;
        }
        for (size -= bit; size >= BITS_PER_LONG;
-           size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+               size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
                if (~(*addr) == 0)
                        continue;
                return (bit + __ffsl(~(*addr)));
@@ -260,19 +263,19 @@ find_next_zero_bit(const unsigned long *addr, unsigned 
long size,
 }
 
 #define        __set_bit(i, a)                                                 
\
-    atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+       atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], 
BIT_MASK(i))
 
 #define        set_bit(i, a)                                                   
\
-    atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+       atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], 
BIT_MASK(i))
 
 #define        __clear_bit(i, a)                                               
\
-    atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], 
BIT_MASK(i))
+       atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], 
BIT_MASK(i))
 
 #define        clear_bit(i, a)                                                 
\
-    atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], 
BIT_MASK(i))
+       atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], 
BIT_MASK(i))
 
 #define        test_bit(i, a)                                                  
\
-    !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
+       !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & 
BIT_MASK(i))
 
 static inline int
 test_and_clear_bit(long bit, volatile unsigned long *var)
@@ -335,54 +338,54 @@ __test_and_set_bit(long bit, volatile unsigned long *var)
 }
 
 enum {
-        REG_OP_ISFREE,
-        REG_OP_ALLOC,
-        REG_OP_RELEASE,
+       REG_OP_ISFREE,
+       REG_OP_ALLOC,
+       REG_OP_RELEASE,
 };
 
 static inline int
 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
 {
-        int nbits_reg;
-        int index;
-        int offset;
-        int nlongs_reg;
-        int nbitsinlong;
-        unsigned long mask;
-        int i;
-        int ret = 0;
-
-        nbits_reg = 1 << order;
-        index = pos / BITS_PER_LONG;
-        offset = pos - (index * BITS_PER_LONG);
-        nlongs_reg = BITS_TO_LONGS(nbits_reg);
-        nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
-
-        mask = (1UL << (nbitsinlong - 1));
-        mask += mask - 1;
-        mask <<= offset;
-
-        switch (reg_op) {
-        case REG_OP_ISFREE:
-                for (i = 0; i < nlongs_reg; i++) {
-                        if (bitmap[index + i] & mask)
-                                goto done;
-                }
-                ret = 1;
-                break;
-
-        case REG_OP_ALLOC:
-                for (i = 0; i < nlongs_reg; i++)
-                        bitmap[index + i] |= mask;
-                break;
-
-        case REG_OP_RELEASE:
-                for (i = 0; i < nlongs_reg; i++)
-                        bitmap[index + i] &= ~mask;
-                break;
-        }
+       int nbits_reg;
+       int index;
+       int offset;
+       int nlongs_reg;
+       int nbitsinlong;
+       unsigned long mask;
+       int i;
+       int ret = 0;
+
+       nbits_reg = 1 << order;
+       index = pos / BITS_PER_LONG;
+       offset = pos - (index * BITS_PER_LONG);
+       nlongs_reg = BITS_TO_LONGS(nbits_reg);
+       nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
+
+       mask = (1UL << (nbitsinlong - 1));
+       mask += mask - 1;
+       mask <<= offset;
+
+       switch (reg_op) {
+       case REG_OP_ISFREE:
+               for (i = 0; i < nlongs_reg; i++) {
+                       if (bitmap[index + i] & mask)
+                               goto done;
+               }
+               ret = 1;
+               break;
+
+       case REG_OP_ALLOC:
+               for (i = 0; i < nlongs_reg; i++)
+                       bitmap[index + i] |= mask;
+               break;
+
+       case REG_OP_RELEASE:
+               for (i = 0; i < nlongs_reg; i++)
+                       bitmap[index + i] &= ~mask;
+               break;
+       }
 done:
-        return ret;
+       return ret;
 }
 
 #define for_each_set_bit(bit, addr, size) \
-- 
2.18.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.