[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86: use gcc6'es flags asm() output support



commit 452ec7fb73b648863dd5fe62c6134229cedef9e5
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Aug 2 17:51:10 2016 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Aug 2 17:51:10 2016 +0200

    x86: use gcc6'es flags asm() output support
    
    ..., rendering affected code more efficient and smaller.
    
    Note that in atomic.h this at once does away with the redundant output
    and input specifications of the memory location touched.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/x86_emulate/x86_emulate.c | 29 ++++++++++++
 xen/include/asm-x86/atomic.h           | 68 +++++++++++++++++++----------
 xen/include/asm-x86/bitops.h           | 80 ++++++++++++++++++++++++++--------
 xen/include/asm-x86/hvm/vmx/vmx.h      |  9 +++-
 4 files changed, 143 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index fe594ba..d5a56cf 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -610,7 +610,12 @@ do {                                                    \
  */
 static bool_t even_parity(uint8_t v)
 {
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm ( "test %1,%1" : "=@ccp" (v) : "q" (v) );
+#else
     asm ( "test %1,%1; setp %0" : "=qm" (v) : "q" (v) );
+#endif
+
     return v;
 }
 
@@ -832,8 +837,14 @@ static int read_ulong(
 static bool_t mul_dbl(unsigned long m[2])
 {
     bool_t rc;
+
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm ( "mul %1" : "+a" (m[0]), "+d" (m[1]), "=@cco" (rc) );
+#else
     asm ( "mul %1; seto %2"
           : "+a" (m[0]), "+d" (m[1]), "=qm" (rc) );
+#endif
+
     return rc;
 }
 
@@ -845,8 +856,14 @@ static bool_t mul_dbl(unsigned long m[2])
 static bool_t imul_dbl(unsigned long m[2])
 {
     bool_t rc;
+
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm ( "imul %1" : "+a" (m[0]), "+d" (m[1]), "=@cco" (rc) );
+#else
     asm ( "imul %1; seto %2"
           : "+a" (m[0]), "+d" (m[1]), "=qm" (rc) );
+#endif
+
     return rc;
 }
 
@@ -4650,9 +4667,15 @@ x86_emulate(
     case 0xbc: /* bsf or tzcnt */ {
         bool_t zf;
 
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+        asm ( "bsf %2,%0"
+              : "=r" (dst.val), "=@ccz" (zf)
+              : "rm" (src.val) );
+#else
         asm ( "bsf %2,%0; setz %1"
               : "=r" (dst.val), "=qm" (zf)
               : "rm" (src.val) );
+#endif
         _regs.eflags &= ~EFLG_ZF;
         if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() )
         {
@@ -4676,9 +4699,15 @@ x86_emulate(
     case 0xbd: /* bsr or lzcnt */ {
         bool_t zf;
 
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+        asm ( "bsr %2,%0"
+              : "=r" (dst.val), "=@ccz" (zf)
+              : "rm" (src.val) );
+#else
         asm ( "bsr %2,%0; setz %1"
               : "=r" (dst.val), "=qm" (zf)
               : "rm" (src.val) );
+#endif
         _regs.eflags &= ~EFLG_ZF;
         if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() )
         {
diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h
index 101eded..ef7e70b 100644
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -133,12 +133,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 
 static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
-    unsigned char c;
+    bool_t c;
+
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; subl %2,%0"
+                   : "+m" (*(volatile int *)&v->counter), "=@ccz" (c)
+                   : "ir" (i) : "memory" );
+#else
+    asm volatile ( "lock; subl %2,%0; setz %1"
+                   : "+m" (*(volatile int *)&v->counter), "=qm" (c)
+                   : "ir" (i) : "memory" );
+#endif
 
-    asm volatile (
-        "lock; subl %2,%0; sete %1"
-        : "=m" (*(volatile int *)&v->counter), "=qm" (c)
-        : "ir" (i), "m" (*(volatile int *)&v->counter) : "memory" );
     return c;
 }
 
@@ -157,13 +163,19 @@ static inline int atomic_inc_return(atomic_t *v)
 
 static inline int atomic_inc_and_test(atomic_t *v)
 {
-    unsigned char c;
+    bool_t c;
 
-    asm volatile (
-        "lock; incl %0; sete %1"
-        : "=m" (*(volatile int *)&v->counter), "=qm" (c)
-        : "m" (*(volatile int *)&v->counter) : "memory" );
-    return c != 0;
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; incl %0"
+                   : "+m" (*(volatile int *)&v->counter), "=@ccz" (c)
+                   :: "memory" );
+#else
+    asm volatile ( "lock; incl %0; setz %1"
+                   : "+m" (*(volatile int *)&v->counter), "=qm" (c)
+                   :: "memory" );
+#endif
+
+    return c;
 }
 
 static inline void atomic_dec(atomic_t *v)
@@ -181,23 +193,35 @@ static inline int atomic_dec_return(atomic_t *v)
 
 static inline int atomic_dec_and_test(atomic_t *v)
 {
-    unsigned char c;
+    bool_t c;
 
-    asm volatile (
-        "lock; decl %0; sete %1"
-        : "=m" (*(volatile int *)&v->counter), "=qm" (c)
-        : "m" (*(volatile int *)&v->counter) : "memory" );
-    return c != 0;
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; decl %0"
+                   : "+m" (*(volatile int *)&v->counter), "=@ccz" (c)
+                   :: "memory" );
+#else
+    asm volatile ( "lock; decl %0; setz %1"
+                   : "+m" (*(volatile int *)&v->counter), "=qm" (c)
+                   :: "memory" );
+#endif
+
+    return c;
 }
 
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
-    unsigned char c;
+    bool_t c;
+
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; addl %2,%0"
+                   : "+m" (*(volatile int *)&v->counter), "=@ccs" (c)
+                   : "ir" (i) : "memory" );
+#else
+    asm volatile ( "lock; addl %2,%0; sets %1"
+                   : "+m" (*(volatile int *)&v->counter), "=qm" (c)
+                   : "ir" (i) : "memory" );
+#endif
 
-    asm volatile (
-        "lock; addl %2,%0; sets %1"
-        : "=m" (*(volatile int *)&v->counter), "=qm" (c)
-        : "ir" (i), "m" (*(volatile int *)&v->counter) : "memory" );
     return c;
 }
 
diff --git a/xen/include/asm-x86/bitops.h b/xen/include/asm-x86/bitops.h
index ff43a98..a8db7e4 100644
--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -145,8 +145,14 @@ static inline int test_and_set_bit(int nr, volatile void 
*addr)
 {
     int oldbit;
 
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; btsl %2,%1"
+                   : "=@ccc" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory" );
+#else
     asm volatile ( "lock; btsl %2,%1\n\tsbbl %0,%0"
-                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 #define test_and_set_bit(nr, addr) ({                   \
@@ -167,10 +173,16 @@ static inline int __test_and_set_bit(int nr, void *addr)
 {
     int oldbit;
 
-    asm volatile (
-        "btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "+m" (*(int *)addr)
-        : "Ir" (nr) : "memory" );
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "btsl %2,%1"
+                   : "=@ccc" (oldbit), "+m" (*(int *)addr)
+                   : "Ir" (nr) : "memory" );
+#else
+    asm volatile ( "btsl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (*(int *)addr)
+                   : "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 #define __test_and_set_bit(nr, addr) ({                 \
@@ -190,8 +202,14 @@ static inline int test_and_clear_bit(int nr, volatile void 
*addr)
 {
     int oldbit;
 
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; btrl %2,%1"
+                   : "=@ccc" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory" );
+#else
     asm volatile ( "lock; btrl %2,%1\n\tsbbl %0,%0"
-                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 #define test_and_clear_bit(nr, addr) ({                 \
@@ -212,10 +230,16 @@ static inline int __test_and_clear_bit(int nr, void *addr)
 {
     int oldbit;
 
-    asm volatile (
-        "btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "+m" (*(int *)addr)
-        : "Ir" (nr) : "memory" );
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "btrl %2,%1"
+                   : "=@ccc" (oldbit), "+m" (*(int *)addr)
+                   : "Ir" (nr) : "memory" );
+#else
+    asm volatile ( "btrl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (*(int *)addr)
+                   : "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 #define __test_and_clear_bit(nr, addr) ({               \
@@ -228,10 +252,16 @@ static inline int __test_and_change_bit(int nr, void 
*addr)
 {
     int oldbit;
 
-    asm volatile (
-        "btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "+m" (*(int *)addr)
-        : "Ir" (nr) : "memory" );
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "btcl %2,%1"
+                   : "=@ccc" (oldbit), "+m" (*(int *)addr)
+                   : "Ir" (nr) : "memory" );
+#else
+    asm volatile ( "btcl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (*(int *)addr)
+                   : "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 #define __test_and_change_bit(nr, addr) ({              \
@@ -251,8 +281,14 @@ static inline int test_and_change_bit(int nr, volatile 
void *addr)
 {
     int oldbit;
 
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "lock; btcl %2,%1"
+                   : "=@ccc" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory" );
+#else
     asm volatile ( "lock; btcl %2,%1\n\tsbbl %0,%0"
-                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 #define test_and_change_bit(nr, addr) ({                \
@@ -270,10 +306,16 @@ static inline int variable_test_bit(int nr, const 
volatile void *addr)
 {
     int oldbit;
 
-    asm volatile (
-        "btl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit)
-        : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+    asm volatile ( "btl %2,%1"
+                   : "=@ccc" (oldbit)
+                   : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
+#else
+    asm volatile ( "btl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit)
+                   : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
+#endif
+
     return oldbit;
 }
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 359b2a9..4cdd9b1 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -406,12 +406,17 @@ static inline bool_t __vmread_safe(unsigned long field, 
unsigned long *value)
                    VMREAD_OPCODE MODRM_EAX_ECX
 #endif
                    /* CF==1 or ZF==1 --> rc = 0 */
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+                   : "=@ccnbe" (okay),
+#else
                    "setnbe %0"
+                   : "=qm" (okay),
+#endif
 #ifdef HAVE_GAS_VMX
-                   : "=qm" (okay), "=rm" (*value)
+                     "=rm" (*value)
                    : "r" (field));
 #else
-                   : "=qm" (okay), "=c" (*value)
+                     "=c" (*value)
                    : "a" (field));
 #endif
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.