[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 7/9] xen/bitops: Implement hweight64() in terms of hweightl()



... and drop generic_hweight64().

This is identical on all architectures except ARM32.  Add one extra SELF_TEST
to check that hweight64() works when the input is split in half.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Bertrand Marquis <bertrand.marquis@xxxxxxx>
CC: Michal Orzel <michal.orzel@xxxxxxx>
CC: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
CC: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>
---
 xen/arch/arm/include/asm/bitops.h |  1 -
 xen/arch/ppc/include/asm/bitops.h |  1 -
 xen/arch/x86/include/asm/bitops.h |  1 -
 xen/common/bitops.c               |  3 +++
 xen/include/xen/bitops.h          | 26 ++++++++------------------
 5 files changed, 11 insertions(+), 21 deletions(-)

diff --git a/xen/arch/arm/include/asm/bitops.h 
b/xen/arch/arm/include/asm/bitops.h
index 91cd167b6bbb..bed6b3b98e08 100644
--- a/xen/arch/arm/include/asm/bitops.h
+++ b/xen/arch/arm/include/asm/bitops.h
@@ -84,7 +84,6 @@ bool clear_mask16_timeout(uint16_t mask, volatile void *p,
  *
  * The Hamming Weight of a number is the total number of bits set in it.
  */
-#define hweight64(x) generic_hweight64(x)
 #define hweight32(x) generic_hweight32(x)
 
 #endif /* _ARM_BITOPS_H */
diff --git a/xen/arch/ppc/include/asm/bitops.h 
b/xen/arch/ppc/include/asm/bitops.h
index 64512e949530..24dc35ef644d 100644
--- a/xen/arch/ppc/include/asm/bitops.h
+++ b/xen/arch/ppc/include/asm/bitops.h
@@ -132,7 +132,6 @@ static inline int test_and_set_bit(unsigned int nr, 
volatile void *addr)
  *
  * The Hamming Weight of a number is the total number of bits set in it.
  */
-#define hweight64(x) __builtin_popcountll(x)
 #define hweight32(x) __builtin_popcount(x)
 
 #endif /* _ASM_PPC_BITOPS_H */
diff --git a/xen/arch/x86/include/asm/bitops.h 
b/xen/arch/x86/include/asm/bitops.h
index 4c5b21907a64..9d3a2448036e 100644
--- a/xen/arch/x86/include/asm/bitops.h
+++ b/xen/arch/x86/include/asm/bitops.h
@@ -481,7 +481,6 @@ static always_inline unsigned int arch_flsl(unsigned long x)
  *
  * The Hamming Weight of a number is the total number of bits set in it.
  */
-#define hweight64(x) generic_hweight64(x)
 #define hweight32(x) generic_hweight32(x)
 
 #endif /* _X86_BITOPS_H */
diff --git a/xen/common/bitops.c b/xen/common/bitops.c
index d0c268b4994a..f6a3eb5c9daf 100644
--- a/xen/common/bitops.c
+++ b/xen/common/bitops.c
@@ -117,6 +117,9 @@ static void __init test_hweight(void)
 
     CHECK(hweightl, 1 | (1UL << (BITS_PER_LONG - 1)), 2);
     CHECK(hweightl, -1UL, BITS_PER_LONG);
+
+    /* unsigned int hweight64(uint64_t) */
+    CHECK(hweight64, -1ULL, 64);
 }
 
 static void __init __constructor test_bitops(void)
diff --git a/xen/include/xen/bitops.h b/xen/include/xen/bitops.h
index 11a1c9130722..e97516552a2e 100644
--- a/xen/include/xen/bitops.h
+++ b/xen/include/xen/bitops.h
@@ -302,6 +302,14 @@ static always_inline __pure unsigned int hweightl(unsigned 
long x)
 #endif
 }
 
+static always_inline __pure unsigned int hweight64(uint64_t x)
+{
+    if ( BITS_PER_LONG == 64 )
+        return hweightl(x);
+    else
+        return hweightl(x >> 32) + hweightl(x);
+}
+
 /* --------------------- Please tidy below here --------------------- */
 
 #ifndef find_next_bit
@@ -389,24 +397,6 @@ static inline unsigned int generic_hweight32(unsigned int 
w)
     return (w + (w >> 16)) & 0xff;
 }
 
-static inline unsigned int generic_hweight64(uint64_t w)
-{
-    if ( BITS_PER_LONG < 64 )
-        return generic_hweight32(w >> 32) + generic_hweight32(w);
-
-    w -= (w >> 1) & 0x5555555555555555UL;
-    w =  (w & 0x3333333333333333UL) + ((w >> 2) & 0x3333333333333333UL);
-    w =  (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0fUL;
-
-    if ( IS_ENABLED(CONFIG_HAS_FAST_MULTIPLY) )
-        return (w * 0x0101010101010101UL) >> 56;
-
-    w += w >> 8;
-    w += w >> 16;
-
-    return (w + (w >> 32)) & 0xFF;
-}
-
 /*
  * rol32 - rotate a 32-bit value left
  *
-- 
2.39.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.