[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Bit-scanning function take offset/size as an int, not a long, and



ChangeSet 1.1591, 2005/05/29 17:13:43+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Bit-scanning function take offset/size as an int, not a long, and
        return an int. This allows some small optimisation in code produced
        for x86/64.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/bitops.c        |   42 +++++++++++++++++++++---------------------
 include/asm-x86/bitops.h |   24 ++++++++++++------------
 2 files changed, 33 insertions(+), 33 deletions(-)


diff -Nru a/xen/arch/x86/bitops.c b/xen/arch/x86/bitops.c
--- a/xen/arch/x86/bitops.c     2005-05-29 13:02:16 -04:00
+++ b/xen/arch/x86/bitops.c     2005-05-29 13:02:16 -04:00
@@ -2,8 +2,8 @@
 #include <xen/bitops.h>
 #include <xen/lib.h>
 
-unsigned long __find_first_bit(
-    const unsigned long *addr, unsigned long size)
+unsigned int __find_first_bit(
+    const unsigned long *addr, unsigned int size)
 {
     unsigned long d0, d1, res;
 
@@ -13,29 +13,29 @@
         "   je 1f\n\t"
         "   lea -"STR(BITS_PER_LONG/8)"(%2),%2\n\t"
         "   bsf (%2),%0\n"
-        "1: sub %5,%2\n\t"
-        "   shl $3,%2\n\t"
-        "   add %2,%0"
+        "1: sub %%ebx,%%edi\n\t"
+        "   shl $3,%%edi\n\t"
+        "   add %%edi,%%eax"
         : "=&a" (res), "=&c" (d0), "=&D" (d1)
         : "1" ((size + BITS_PER_LONG - 1) / BITS_PER_LONG),
-          "2" (addr), "b" (addr) : "memory" );
+          "2" (addr), "b" ((int)(long)addr) : "memory" );
 
     return res;
 }
 
-unsigned long __find_next_bit(
-    const unsigned long *addr, unsigned long size, unsigned long offset)
+unsigned int __find_next_bit(
+    const unsigned long *addr, unsigned int size, unsigned int offset)
 {
     const unsigned long *p = addr + (offset / BITS_PER_LONG);
-    unsigned long set, bit = offset & (BITS_PER_LONG - 1);
+    unsigned int set, bit = offset & (BITS_PER_LONG - 1);
 
     ASSERT(offset < size);
 
     if ( bit != 0 )
     {
         /* Look for a bit in the first word. */
-        __asm__ ( "bsf %1,%0" 
-                  : "=r" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
+        __asm__ ( "bsf %1,%%"__OP"ax"
+                  : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
         if ( set < (BITS_PER_LONG - bit) )
             return (offset + set);
         offset += BITS_PER_LONG - bit;
@@ -50,8 +50,8 @@
     return (offset + set);
 }
 
-unsigned long __find_first_zero_bit(
-    const unsigned long *addr, unsigned long size)
+unsigned int __find_first_zero_bit(
+    const unsigned long *addr, unsigned int size)
 {
     unsigned long d0, d1, d2, res;
 
@@ -62,28 +62,28 @@
         "   lea -"STR(BITS_PER_LONG/8)"(%2),%2\n\t"
         "   xor (%2),%3\n\t"
         "   bsf %3,%0\n"
-        "1: sub %6,%2\n\t"
-        "   shl $3,%2\n\t"
-        "   add %2,%0"
+        "1: sub %%ebx,%%edi\n\t"
+        "   shl $3,%%edi\n\t"
+        "   add %%edi,%%edx"
         : "=&d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
         : "1" ((size + BITS_PER_LONG - 1) / BITS_PER_LONG),
-          "2" (addr), "b" (addr), "3" (-1L) : "memory" );
+          "2" (addr), "b" ((int)(long)addr), "3" (-1L) : "memory" );
 
     return res;
 }
 
-unsigned long __find_next_zero_bit(
-    const unsigned long *addr, unsigned long size, unsigned long offset)
+unsigned int __find_next_zero_bit(
+    const unsigned long *addr, unsigned int size, unsigned int offset)
 {
     const unsigned long *p = addr + (offset / BITS_PER_LONG);
-    unsigned long set, bit = offset & (BITS_PER_LONG - 1);
+    unsigned int set, bit = offset & (BITS_PER_LONG - 1);
 
     ASSERT(offset < size);
 
     if ( bit != 0 )
     {
         /* Look for zero in the first word. */
-        __asm__ ( "bsf %1,%0" : "=r" (set) : "r" (~(*p >> bit)) );
+        __asm__ ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
         if ( set < (BITS_PER_LONG - bit) )
             return (offset + set);
         offset += BITS_PER_LONG - bit;
diff -Nru a/xen/include/asm-x86/bitops.h b/xen/include/asm-x86/bitops.h
--- a/xen/include/asm-x86/bitops.h      2005-05-29 13:02:17 -04:00
+++ b/xen/include/asm-x86/bitops.h      2005-05-29 13:02:17 -04:00
@@ -248,20 +248,20 @@
  constant_test_bit((nr),(addr)) : \
  variable_test_bit((nr),(addr)))
 
-extern unsigned long __find_first_bit(
-    const unsigned long *addr, unsigned long size);
-extern unsigned long __find_next_bit(
-    const unsigned long *addr, unsigned long size, unsigned long offset);
-extern unsigned long __find_first_zero_bit(
-    const unsigned long *addr, unsigned long size);
-extern unsigned long __find_next_zero_bit(
-    const unsigned long *addr, unsigned long size, unsigned long offset);
+extern unsigned int __find_first_bit(
+    const unsigned long *addr, unsigned int size);
+extern unsigned int __find_next_bit(
+    const unsigned long *addr, unsigned int size, unsigned int offset);
+extern unsigned int __find_first_zero_bit(
+    const unsigned long *addr, unsigned int size);
+extern unsigned int __find_next_zero_bit(
+    const unsigned long *addr, unsigned int size, unsigned int offset);
 
 /* return index of first bit set in val or BITS_PER_LONG when no bit is set */
-static inline unsigned long __scanbit(unsigned long val)
+static inline unsigned int __scanbit(unsigned long val)
 {
        __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) );
-       return val;
+       return (unsigned int)val;
 }
 
 /**
@@ -320,10 +320,10 @@
  * Returns the bit-number of the first set bit. If no bits are set then the
  * result is undefined.
  */
-static __inline__ unsigned long find_first_set_bit(unsigned long word)
+static __inline__ unsigned int find_first_set_bit(unsigned long word)
 {
        __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) );
-       return word;
+       return (unsigned int)word;
 }
 
 /**

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.