>with the special values '0' and '-1' devoted to 'no timeout' and
As you see I just dropped from the beginning the 0 case. In the 0 case we
must use schedule() and not schedule_timeout() ;-).
Now I have dropped now also the -1 (forever) case. There' s now a nice
#define (MAX_SCHEDULE_TIMEOUT) that is just the right value (~0UL >> 1).
Using -1 is less pretty than using a #define and cause us to waste time
converting and checking for the -1 case...
Here my latest schedule_timeout() update against 2.1.125 (I like it so btw
;-):
Index: linux/kernel/sched.c
diff -u linux/kernel/sched.c:1.1.1.1 linux/kernel/sched.c:1.1.1.1.14.7
--- linux/kernel/sched.c:1.1.1.1 Fri Oct 2 19:22:39 1998
+++ linux/kernel/sched.c Sun Oct 18 21:47:49 1998
@@ -90,7 +90,7 @@
extern void mem_use(void);
-unsigned long volatile jiffies=0;
+unsigned long volatile jiffies=JIFFIES_OFFSET;
/*
* Init task must be ok at boot for the ix86 as we will check its signals
@@ -248,7 +248,6 @@
{
struct task_struct * p = (struct task_struct *) __data;
- p->timeout = 0;
wake_up_process(p);
}
@@ -341,7 +340,7 @@
#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
-static unsigned long timer_jiffies = 0;
+static unsigned long timer_jiffies = JIFFIES_OFFSET;
static inline void insert_timer(struct timer_list *timer,
struct timer_list **vec, int idx)
@@ -372,12 +371,12 @@
} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
insert_timer(timer, tv4.vec, i);
- } else if (expires < timer_jiffies) {
+ } else if ((signed long) idx < 0) {
/* can happen if you add a timer with expires == jiffies,
* or you set a timer to go off in the past
*/
insert_timer(timer, tv1.vec, tv1.index);
- } else if (idx < 0xffffffffUL) {
+ } else if (idx <= 0xffffffffUL) {
int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
insert_timer(timer, tv5.vec, i);
} else {
@@ -445,6 +444,50 @@
#endif
+signed long schedule_timeout(signed long timeout)
+{
+ struct timer_list timer;
+ unsigned long expire;
+
+ /*
+ * PARANOID.
+ */
+ if (current->state != TASK_INTERRUPTIBLE)
+ {
+ printk(KERN_ERR "schedule_timeout: task not interrutible "
+ " from %p\n", __builtin_return_address(0));
+ goto normal_schedule;
+ }
+ if (!timeout || timeout < 0)
+ {
+ printk(KERN_ERR "schedule_timeout: wrong timeout value %lx"
+ "from %p\n", timeout, __builtin_return_address(0));
+ goto normal_schedule;
+ }
+
+ /*
+ * Here we start for real.
+ */
+ expire = (unsigned long) timeout + jiffies;
+
+ init_timer(&timer);
+ timer.expires = expire;
+ timer.data = (unsigned long) current;
+ timer.function = process_timeout;
+
+ add_timer(&timer);
+ schedule();
+ del_timer(&timer);
+
+ timeout = (signed long) (expire - jiffies);
+
+ return timeout < 0 ? 0 : timeout;
+
+ normal_schedule:
+ schedule();
+ return 0;
+}
+
/*
* 'schedule()' is the scheduler function. It's a very simple and nice
* scheduler: it's not perfect, but certainly works for most things.
@@ -458,7 +501,6 @@
asmlinkage void schedule(void)
{
struct task_struct * prev, * next;
- unsigned long timeout;
int this_cpu;
prev = current;
@@ -481,16 +523,11 @@
prev->counter = prev->priority;
move_last_runqueue(prev);
}
- timeout = 0;
+
switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (signal_pending(prev))
- goto makerunnable;
- timeout = prev->timeout;
- if (timeout && (timeout <= jiffies)) {
- prev->timeout = 0;
- timeout = 0;
- makerunnable:
+ {
prev->state = TASK_RUNNING;
break;
}
@@ -550,21 +587,9 @@
#endif
if (prev != next) {
- struct timer_list timer;
-
kstat.context_swtch++;
- if (timeout) {
- init_timer(&timer);
- timer.expires = timeout;
- timer.data = (unsigned long) prev;
- timer.function = process_timeout;
- add_timer(&timer);
- }
get_mmu_context(next);
switch_to(prev,next);
-
- if (timeout)
- del_timer(&timer);
}
spin_unlock(&scheduler_lock);
@@ -803,7 +828,7 @@
break;
if (!(mask & timer_active))
continue;
- if (tp->expires > jiffies)
+ if (time_after(tp->expires, jiffies))
continue;
timer_active &= ~mask;
tp->fn();
@@ -1563,16 +1588,14 @@
return 0;
}
- expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
+ expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
- current->timeout = expire;
current->state = TASK_INTERRUPTIBLE;
- schedule();
+ expire = schedule_timeout(expire);
- if (expire > jiffies) {
+ if (expire) {
if (rmtp) {
- jiffies_to_timespec(expire - jiffies -
- (expire > jiffies + 1), &t);
+ jiffies_to_timespec(expire, &t);
if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
return -EFAULT;
}
Index: linux/include/linux/sched.h
diff -u linux/include/linux/sched.h:1.1.1.1.2.1 linux/include/linux/sched.h:1.1.1.1.2.1.2.7
--- linux/include/linux/sched.h:1.1.1.1.2.1 Mon Oct 5 18:21:46 1998
+++ linux/include/linux/sched.h Sun Oct 18 20:50:16 1998
@@ -23,6 +23,8 @@
#include <linux/capability.h>
#include <linux/securebits.h>
+#define JIFFIES_OFFSET (-120*HZ)
+
/*
* cloning flags:
*/
@@ -119,6 +121,8 @@
extern void show_state(void);
extern void trap_init(void);
+#define MAX_SCHEDULE_TIMEOUT (~0UL >> 1)
+extern signed long schedule_timeout(signed long timeout);
asmlinkage void schedule(void);
@@ -258,7 +262,7 @@
struct task_struct **tarray_ptr;
struct wait_queue *wait_chldexit; /* for wait4() */
- unsigned long timeout, policy, rt_priority;
+ unsigned long policy, rt_priority;
unsigned long it_real_value, it_prof_value, it_virt_value;
unsigned long it_real_incr, it_prof_incr, it_virt_incr;
struct timer_list real_timer;
@@ -348,7 +352,7 @@
/* pidhash */ NULL, NULL, \
/* tarray */ &task[0], \
/* chld wait */ NULL, \
-/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timeout */ SCHED_OTHER,0,0,0,0,0,0,0, \
/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
/* utime */ {0,0,0,0},0, \
/* per CPU times */ {0, }, {0, }, \
Index: linux/kernel/signal.c
diff -u linux/kernel/signal.c:1.1.1.1 linux/kernel/signal.c:1.1.1.1.12.4
--- linux/kernel/signal.c:1.1.1.1 Fri Oct 2 19:22:39 1998
+++ linux/kernel/signal.c Sun Oct 18 21:50:32 1998
@@ -712,6 +712,7 @@
sigset_t these;
struct timespec ts;
siginfo_t info;
+ long timeout = 0;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
@@ -738,22 +739,18 @@
if (!sig) {
/* None ready -- temporarily unblock those we're interested
in so that we'll be awakened when they arrive. */
- unsigned long expire;
sigset_t oldblocked = current->blocked;
sigandsets(¤t->blocked, ¤t->blocked, &these);
recalc_sigpending(current);
spin_unlock_irq(¤t->sigmask_lock);
- expire = ~0UL;
- if (uts) {
- expire = (timespec_to_jiffies(&ts)
- + (ts.tv_sec || ts.tv_nsec));
- expire += jiffies;
- }
- current->timeout = expire;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (uts)
+ timeout = (timespec_to_jiffies(&ts)
+ + (ts.tv_sec || ts.tv_nsec));
current->state = TASK_INTERRUPTIBLE;
- schedule();
+ timeout = schedule_timeout(timeout);
spin_lock_irq(¤t->sigmask_lock);
sig = dequeue_signal(&these, &info);
@@ -770,10 +767,8 @@
}
} else {
ret = -EAGAIN;
- if (current->timeout != 0) {
- current->timeout = 0;
+ if (timeout)
ret = -EINTR;
- }
}
return ret;
Andrea Arcangeli
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/