[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Fix 64-bit build.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxxx
# Node ID e10a13f1320708951014c3e38a606c6eb651e943
# Parent  e32b74b2d7f43a0ec3807893664fe1c5ddd312eb
[XEN] Fix 64-bit build.

This required fiddling the asm constraints of the atomic bitops. It
seems gcc isn't entirely happy with "+m": the manual says that the
'+' modifier should be used only when a register constraint is
available.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vioapic.c   |    7 ++---
 xen/arch/x86/hvm/vmx/vmcs.c  |    2 -
 xen/include/asm-x86/bitops.h |   58 ++++++++++++++++++++++---------------------
 3 files changed, 35 insertions(+), 32 deletions(-)

diff -r e32b74b2d7f4 -r e10a13f13207 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c        Tue Nov 07 18:13:12 2006 +0000
+++ b/xen/arch/x86/hvm/vioapic.c        Tue Nov 07 23:14:09 2006 +0000
@@ -399,7 +399,8 @@ static void ioapic_deliver(struct vioapi
     struct vlapic *target;
 
     HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
-                "dest %x dest_mode %x delivery_mode %x vector %x trig_mode 
%x\n",
+                "dest=%x dest_mode=%x delivery_mode=%x "
+                "vector=%x trig_mode=%x\n",
                 dest, dest_mode, delivery_mode, vector, trig_mode);
 
     deliver_bitmask = ioapic_get_delivery_bitmask(
@@ -430,8 +431,8 @@ static void ioapic_deliver(struct vioapi
         }
         else
         {
-            HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
-                        "null round robin mask %x vector %x delivery_mode 
%x\n",
+            HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
+                        "mask=%x vector=%x delivery_mode=%x\n",
                         deliver_bitmask, vector, dest_LowestPrio);
         }
         break;
diff -r e32b74b2d7f4 -r e10a13f13207 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Tue Nov 07 18:13:12 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Nov 07 23:14:09 2006 +0000
@@ -420,7 +420,7 @@ static int construct_vmcs(struct vcpu *v
     error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
                        v->arch.hvm_vcpu.u.vmx.exec_control);
     error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
-                       page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page));
+                       page_to_maddr(vcpu_vlapic(v)->regs_page));
     error |= __vmwrite(TPR_THRESHOLD, 0);
 #endif
 
diff -r e32b74b2d7f4 -r e10a13f13207 xen/include/asm-x86/bitops.h
--- a/xen/include/asm-x86/bitops.h      Tue Nov 07 18:13:12 2006 +0000
+++ b/xen/include/asm-x86/bitops.h      Tue Nov 07 23:14:09 2006 +0000
@@ -14,10 +14,12 @@
 #endif
 
 /*
- * We use the "+m" constraint because the memory operand is both read from
- * and written to. Since the operand is in fact a word array, we also
- * specify "memory" in the clobbers list to indicate that words other than
- * the one directly addressed by the memory operand may be modified.
+ * We specify the memory operand as both input and output because the memory
+ * operand is both read from and written to. Since the operand is in fact a
+ * word array, we also specify "memory" in the clobbers list to indicate that
+ * words other than the one directly addressed by the memory operand may be
+ * modified. We don't use "+m" because the gcc manual says that it should be
+ * used only when the constraint allows the operand to reside in a register.
  */
 
 #define ADDR (*(volatile long *) addr)
@@ -36,8 +38,8 @@ static __inline__ void set_bit(int nr, v
 {
        __asm__ __volatile__( LOCK_PREFIX
                "btsl %1,%0"
-               :"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
 }
 
 /**
@@ -53,8 +55,8 @@ static __inline__ void __set_bit(int nr,
 {
        __asm__(
                "btsl %1,%0"
-               :"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
 }
 
 /**
@@ -71,8 +73,8 @@ static __inline__ void clear_bit(int nr,
 {
        __asm__ __volatile__( LOCK_PREFIX
                "btrl %1,%0"
-               :"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
 }
 
 /**
@@ -88,8 +90,8 @@ static __inline__ void __clear_bit(int n
 {
        __asm__(
                "btrl %1,%0"
-               :"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
 }
 
 #define smp_mb__before_clear_bit()     barrier()
@@ -108,8 +110,8 @@ static __inline__ void __change_bit(int 
 {
        __asm__ __volatile__(
                "btcl %1,%0"
-               :"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
 }
 
 /**
@@ -125,8 +127,8 @@ static __inline__ void change_bit(int nr
 {
        __asm__ __volatile__( LOCK_PREFIX
                "btcl %1,%0"
-               :"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
 }
 
 /**
@@ -143,8 +145,8 @@ static __inline__ int test_and_set_bit(i
 
        __asm__ __volatile__( LOCK_PREFIX
                "btsl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
        return oldbit;
 }
 
@@ -163,8 +165,8 @@ static __inline__ int __test_and_set_bit
 
        __asm__(
                "btsl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
        return oldbit;
 }
 
@@ -182,8 +184,8 @@ static __inline__ int test_and_clear_bit
 
        __asm__ __volatile__( LOCK_PREFIX
                "btrl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
        return oldbit;
 }
 
@@ -202,8 +204,8 @@ static __inline__ int __test_and_clear_b
 
        __asm__(
                "btrl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
        return oldbit;
 }
 
@@ -214,8 +216,8 @@ static __inline__ int __test_and_change_
 
        __asm__ __volatile__(
                "btcl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
        return oldbit;
 }
 
@@ -233,8 +235,8 @@ static __inline__ int test_and_change_bi
 
        __asm__ __volatile__( LOCK_PREFIX
                "btcl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"+m" (ADDR)
-               :"dIr" (nr) : "memory");
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr), "m" (ADDR) : "memory");
        return oldbit;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.