This patch builds upon your smplock.h cleanup. It does the following:
- move various defines to static inline functions for type safety of parameters.
- move __inline__ to inline.
- add kernel_locked_for_task(task) and make kernel_locked() used it.
- add do { } while (0) to macros
# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
# ChangeSet 1.519 -> 1.521
# include/asm-i386/smplock.h 1.9 -> 1.11
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/08/15 mulix@alhambra.merseine.nu 1.520
# further cleanups for smplock.h
# - move various defines to static inline functions for type safety of parameters.
# - move __inline__ to inline.
# - add kernel_locked_for_task(task) and make kernel_locked() used it.
# - add do { } while (0) to macros
# --------------------------------------------
# 02/08/15 mulix@alhambra.merseine.nu 1.521
# check if the kernel is locked when unlocking via the proper interface
# --------------------------------------------
#
diff -Nru a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
--- a/include/asm-i386/smplock.h Thu Aug 15 13:30:43 2002
+++ b/include/asm-i386/smplock.h Thu Aug 15 13:30:43 2002
@@ -10,29 +10,36 @@
extern spinlock_t kernel_flag;
-#define kernel_locked() (current->lock_depth >= 0)
+static inline int kernel_locked_for_task(struct task_struct* task)
+{
+ return (task->lock_depth >= 0);
+}
+
+static inline int kernel_locked(void)
+{
+ return kernel_locked_for_task(current);
+}
-#define get_kernel_lock() spin_lock(&kernel_flag)
-#define put_kernel_lock() spin_unlock(&kernel_flag)
+#define get_kernel_lock() do { spin_lock(&kernel_flag); } while (0)
+#define put_kernel_lock() do { spin_unlock(&kernel_flag); } while (0)
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task) \
-do { \
- if (unlikely(task->lock_depth >= 0)) \
- put_kernel_lock(); \
-} while (0)
+static inline void release_kernel_lock(struct task_struct* task)
+{
+ if (unlikely(kernel_locked_for_task(task)))
+ put_kernel_lock();
+}
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (unlikely(task->lock_depth >= 0)) \
- get_kernel_lock(); \
-} while (0)
-
+static inline void reacquire_kernel_lock(struct task_struct* task)
+{
+ if (unlikely(kernel_locked_for_task(task)))
+ get_kernel_lock();
+}
/*
* Getting the big kernel lock.
@@ -41,7 +48,7 @@
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static inline void lock_kernel(void)
{
int depth = current->lock_depth+1;
if (!depth)
@@ -49,9 +56,9 @@
current->lock_depth = depth;
}
-static __inline__ void unlock_kernel(void)
+static inline void unlock_kernel(void)
{
- if (current->lock_depth < 0)
+ if (!kernel_locked())
BUG();
if (--current->lock_depth < 0)
put_kernel_lock();
-- "Hmm.. Cache shrink failed - time to kill something? Mhwahahhaha! This is the part I really like. Giggle." -- linux/mm/vmscan.c http://vipe.technion.ac.il/~mulix/ http://syscalltrack.sf.net
This archive was generated by hypermail 2b29 : Thu Aug 15 2002 - 22:00:39 EST