diff -r -N -u -X exclude vanilla22/include/linux/tqueue.h linux/include/linux/tqueue.h
--- vanilla22/include/linux/tqueue.h Mon Jan 25 17:04:20 1999
+++ linux/include/linux/tqueue.h Thu Jan 28 18:05:15 1999
@@ -29,6 +29,8 @@
* used as a bottom half handler. This is for example useful for bottom
* halfs, which want to be delayed until the next clock tick.
*
+ * [ this comment is obsolete as queue_task and queue_task_irq are now
+ * the same. VY]
* Problems:
* - The queue_task_irq() inline function is only atomic with respect to itself.
* Problems can occur, when queue_task_irq() is called from a normal system
@@ -51,7 +53,7 @@
#define DECLARE_TASK_QUEUE(q) task_queue q = NULL
-extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
+extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk,tq_rtl;
/*
* To implement your own list of active bottom halfs, use the following
@@ -78,17 +80,47 @@
extern spinlock_t tqueue_lock;
/*
- * queue_task
+
+ For RTL we need to make these a little more complex
+ As previously bh_pointer->sync BIT0 is used to indicate that a
+ bh function is busy. BIT1 is now used to indicate that it is possibly
+ getting the next pointer changed.
+ run_task better never be called by an interrupt routine!
+
+Cases:
+1. queue_task interferes with run_task
+ worst case is queue task reads the list header and then run_task clears
+ it. This can't happen because queue_task and run_task both use atomic
+ xchange to change the queue header. The problem that remains is that
+ queue_task puts new head in and reads old head; run_task copies head to temp
+ and clears head. But now run_task will busy wait until queue_task finishes
+ appending the old queue list to the end of the new header.
+2. queue_task runs on two machines at the same time. They must work on
+ different bottom halves because of bit0 in sync. If ProcessorA is enqueing
+ X and processorB is enqueuing Y, one will do the atomic_xchg first. Say that
+ A is first. Then A replaces List with X and reads List.old. Then if
+ B does an xchg, B replaces List with Y and reads List.old==X. No
+ problem.
+
+ If you see a problem email me yodaiken@cs.nmt.edu
*/
extern __inline__ void queue_task(struct tq_struct *bh_pointer,
task_queue *bh_list)
{
if (!test_and_set_bit(0,&bh_pointer->sync)) {
- unsigned long flags;
+ set_bit(1,&bh_pointer->sync); /* see run_task */
+ /* atomic{return_val= *bh_list, *bh_list = bh_pointer }
+ the assignment to bh_pointer-> next is NOT atomic */
+ bh_pointer->next = xchg(bh_list,bh_pointer);
+ mb();
+ clear_bit(1,&bh_pointer->sync); /* see run_task */
+
+ /* replaces
spin_lock_irqsave(&tqueue_lock, flags);
bh_pointer->next = *bh_list;
*bh_list = bh_pointer;
spin_unlock_irqrestore(&tqueue_lock, flags);
+ */
}
}
@@ -98,14 +130,16 @@
extern __inline__ void run_task_queue(task_queue *list)
{
if (*list) {
- unsigned long flags;
struct tq_struct *p;
+
+ p = xchg(list,NULL);
+ /* the lock free operation above replaces
spin_lock_irqsave(&tqueue_lock, flags);
p = *list;
*list = NULL;
spin_unlock_irqrestore(&tqueue_lock, flags);
-
+ */
while (p) {
void *arg;
void (*f) (void *);
@@ -113,6 +147,11 @@
arg = p -> data;
f = p -> routine;
save_p = p;
+ /* queue task may have put p on the list
+ but not yet glued the rest of the list
+ back onto p
+ */
+ while(test_bit(1,&save_p->sync));
p = p -> next;
mb();
save_p -> sync = 0;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/