[patch] Timeout semaphores...

Michael Zappe (zapman@interlan.net)
Thu, 14 Oct 1999 15:33:34 -0400


This is a patch to give timeoutable semaphores with 2.2.12 (and probably earlier, but i havent tested the patch on any earlier kernels). Right now it uses the same algorithm as the rest of the 2.2 semaphores, although i noted that 2.3.xx has new semaphore code to eliminate a race condition. Are there any plans to back-port this code? (I'm not sure how such things are looked upon for production kernels.) I'll have a patch for 2.3.21 later. I'm not sure when i'll get around to it, but if anyone needs it, just let me know. ;-)

Later,

Mike

diff -r -u linux-2.2.12/arch/i386/kernel/i386_ksyms.c linux/arch/i386/kernel/i386_ksyms.c
--- linux-2.2.12/arch/i386/kernel/i386_ksyms.c Mon Aug 9 15:05:05 1999
+++ linux/arch/i386/kernel/i386_ksyms.c Thu Oct 14 11:37:48 1999
@@ -47,6 +47,7 @@
EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
+EXPORT_SYMBOL_NOVERS(__down_failed_timeout);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
diff -r -u linux-2.2.12/arch/i386/lib/semaphore.S linux/arch/i386/lib/semaphore.S
--- linux-2.2.12/arch/i386/lib/semaphore.S Wed Feb 17 12:34:13 1999
+++ linux/arch/i386/lib/semaphore.S Thu Oct 14 11:50:44 1999
@@ -40,6 +40,15 @@
popl %edx /* restore %edx */
ret

+/* Don't save/restore %eax, because that will be our return value */
+ENTRY(__down_failed_timeout)
+ pushl %edx /* save %edx */
+ pushl %ecx /* save %ecx (and argument) */
+ call SYMBOL_NAME(__down_timeout)
+ popl %ecx /* restore %ecx (count on __down_trylock not changing it) */
+ popl %edx /* restore %edx */
+ ret
+
ENTRY(__up_wakeup)
pushl %eax /* save %eax */
pushl %edx /* save %edx */
diff -r -u linux-2.2.12/include/asm-i386/semaphore-helper.h linux/include/asm-i386/semaphore-helper.h
--- linux-2.2.12/include/asm-i386/semaphore-helper.h Mon Aug 9 15:04:41 1999
+++ linux/include/asm-i386/semaphore-helper.h Wed Oct 13 18:40:05 1999
@@ -99,4 +99,32 @@
return ret;
}

+/*
+ * waking_non_zero_interruptible_timeout:
+ * 1 got the lock
+ * 0 go to sleep
+ * -EINTR interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static inline int waking_non_zero_interruptible_timeout(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking > 0) {
+ sem->waking--;
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ atomic_inc(&sem->count);
+ ret = -EINTR;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
#endif
diff -r -u linux-2.2.12/include/asm-i386/semaphore.h linux/include/asm-i386/semaphore.h
--- linux-2.2.12/include/asm-i386/semaphore.h Mon Aug 9 15:04:57 1999
+++ linux/include/asm-i386/semaphore.h Thu Oct 14 12:14:48 1999
@@ -44,16 +44,23 @@
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
+asmlinkage int __down_failed_timeout(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);

asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
+asmlinkage int __down_timeout(struct semaphore *sem, signed long timeout);
asmlinkage void __up(struct semaphore * sem);

extern spinlock_t semaphore_wake_lock;

-#define sema_init(sem, val) atomic_set(&((sem)->count), (val))
+extern inline void sema_init(struct semaphore *sem, int val)
+{
+ sem->waking = 0;
+ sem->wait = NULL;
+ atomic_set(&sem->count, val);
+}

/*
* This is ugly, but we want the default case to fall through.
@@ -124,6 +131,29 @@
:"memory");
return result;
}
+
+extern inline int down_timeout(struct semaphore * sem, signed long timeout)
+{
+ int result;
+
+ __asm__ __volatile__(
+ "# atomic interruptible down operation\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl 0(%1)\n\t"
+ "js 1f\n\t"
+ "xorl %0,%0\n"
+ "1:\tmovl %2,%%edx\n"
+ "\tmovl %1,%%ecx\n"
+ "\tcall __down_failed_timeout\n"
+ :"=a" (result)
+ :"g" (sem), "g" (timeout)
+ :"memory");
+
+ return result;
+}
+

/*
* Note! This is subtle. We jump to wake people up only if
diff -r -u linux-2.2.12/kernel/ksyms.c linux/kernel/ksyms.c
--- linux-2.2.12/kernel/ksyms.c Mon Aug 9 15:04:41 1999
+++ linux/kernel/ksyms.c Wed Oct 13 20:26:26 1999
@@ -380,6 +380,7 @@
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
+EXPORT_SYMBOL(__down_timeout);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(brw_page);

diff -r -u linux-2.2.12/kernel/sched.c linux/kernel/sched.c
--- linux-2.2.12/kernel/sched.c Mon May 10 12:55:21 1999
+++ linux/kernel/sched.c Thu Oct 14 15:15:40 1999
@@ -1037,6 +1037,44 @@
return waking_non_zero_trylock(sem);
}

+int __down_timeout(struct semaphore *sem, signed long timeout)
+{
+ DOWN_VAR
+ signed long start_time = jiffies;
+ signed long remaining = timeout,
+ elapsed = 0;
+ int ret = 0;
+
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible_timeout(sem, tsk);
+
+ elapsed = jiffies - start_time;
+ remaining -= elapsed;
+
+ if((ret != 1) && remaining <= 0)
+ {
+ ret = -EBUSY;
+
+ break;
+ }
+
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+
+ schedule_timeout(remaining);
+
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+
+ return ret;
+}
+
+
#define SLEEP_ON_VAR \
unsigned long flags; \
struct wait_queue wait;

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/