[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Declare an order-enforcing construct for external locks used in the mm layer



# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1320923555 0
# Node ID ff5c8383eb9fcc57e2eba8c3fa254568f594a7f2
# Parent  9bb159ec3337e7999c0d49b81105518810cb7b17
x86/mm: Declare an order-enforcing construct for external locks used in the mm 
layer

Declare an order-enforcing construct for a lock used in the mm layer
that is not of type mm_lock_t. This is useful whenever the mm layer
takes locks from other subsystems, or locks not implemented as
mm_lock_t.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 9bb159ec3337 -r ff5c8383eb9f xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h        Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/arch/x86/mm/mm-locks.h        Thu Nov 10 11:12:35 2011 +0000
@@ -70,6 +70,27 @@
         panic("mm lock already held by %s\n", l->locker_function);
     __set_lock_level(level);
 }
+
+static inline void _mm_enforce_order_lock_pre(int level)
+{
+    __check_lock_level(level);
+}
+
+static inline void _mm_enforce_order_lock_post(int level, int *unlock_level,
+                                                unsigned short *recurse_count)
+{
+    if ( recurse_count )
+    {
+        if ( *recurse_count++ == 0 )
+        {
+            *unlock_level = __get_lock_level();
+        }
+    } else {
+        *unlock_level = __get_lock_level();
+    }
+    __set_lock_level(level);
+}
+
 /* This wrapper uses the line number to express the locking order below */
 #define declare_mm_lock(name)                                                 \
     static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\
@@ -78,6 +99,16 @@
 #define mm_lock(name, l) mm_lock_##name(l, __func__, 0)
 #define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1)
 
+/* This wrapper is intended for "external" locks which do not use
+ * the mm_lock_t types. Such locks inside the mm code are also subject
+ * to ordering constraints. */
+#define declare_mm_order_constraint(name)                                   \
+    static inline void mm_enforce_order_lock_pre_##name(void)               \
+    { _mm_enforce_order_lock_pre(__LINE__); }                               \
+    static inline void mm_enforce_order_lock_post_##name(                   \
+                        int *unlock_level, unsigned short *recurse_count)   \
+    { _mm_enforce_order_lock_post(__LINE__, unlock_level, recurse_count); } \
+
 static inline void mm_unlock(mm_lock_t *l)
 {
     if ( l->lock.recurse_cnt == 1 )
@@ -88,6 +119,21 @@
     spin_unlock_recursive(&l->lock);
 }
 
+static inline void mm_enforce_order_unlock(int unlock_level, 
+                                            unsigned short *recurse_count)
+{
+    if ( recurse_count )
+    {
+        BUG_ON(*recurse_count == 0);
+        if ( *recurse_count-- == 1 )
+        {
+            __set_lock_level(unlock_level);
+        }
+    } else {
+        __set_lock_level(unlock_level);
+    }
+}
+
 /************************************************************************
  *                                                                      *
  * To avoid deadlocks, these locks _MUST_ be taken in the order they're *

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.