# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.569   -> 1.570  
#	drivers/block/ll_rw_blk.c	1.64    -> 1.65   
#	include/linux/blkdev.h	1.43    -> 1.44   
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/05/01	axboe@burns.home.kernel.dk	1.570
# Initial block level tagged command queueing helpers
# --------------------------------------------
#
diff -Nru a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
--- a/drivers/block/ll_rw_blk.c	Wed May  1 14:36:31 2002
+++ b/drivers/block/ll_rw_blk.c	Wed May  1 14:36:31 2002
@@ -311,9 +311,221 @@
 	q->queue_lock = lock;
 }
 
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *    blk_cleanup_queue() will take care of calling this function, if tagging
+ *    has been used. So there's usually no need to call this directly, unless
+ *    tagging is just being disabled but the queue remains in function.
+ **/
+void blk_queue_free_tags(request_queue_t *q)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+
+	if (!bqt)
+		return;
+
+	BUG_ON(bqt->busy);
+	BUG_ON(!list_empty(&bqt->busy_list));
+
+	kfree(bqt->tag_index);
+	bqt->tag_index = NULL;
+
+	kfree(bqt->tag_map);
+	bqt->tag_map = NULL;
+
+	kfree(bqt);
+	q->queue_tags = NULL;
+	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+}
+
+/**
+ * blk_queue_init_tags - initialize the queue tag info
+ * @q:  the request queue for the device
+ * @depth:  the maximum queue depth supported
+ **/
+int blk_queue_init_tags(request_queue_t *q, int depth)
+{
+	struct blk_queue_tag *tags;
+	int bits;
+
+	if (depth > queue_nr_requests)
+		depth = queue_nr_requests;
+
+	tags = kmalloc(sizeof(struct blk_queue_tag),GFP_ATOMIC);
+	if (!tags)
+		goto fail;
+
+	tags->tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+	if (!tags->tag_index)
+		goto fail_index;
+
+	bits = depth / BLK_TAGS_PER_LONG;
+	tags->tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
+	if (!tags->tag_map)
+		goto fail_map;
+
+	memset(tags->tag_index, depth * sizeof(struct request *), 0);
+	memset(tags->tag_map, bits * sizeof(unsigned long), 0);
+	INIT_LIST_HEAD(&tags->busy_list);
+	tags->busy = 0;
+	tags->max_depth = depth;
+
+	/*
+	 * assign it, all done
+	 */
+	q->queue_tags = tags;
+	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+	return 0;
+
+fail_map:
+	kfree(tags->tag_index);
+fail_index:
+	kfree(tags);
+fail:
+	return -ENOMEM;
+}
+
+/*
+ * queue_lock must be held for both get_tag and clear_tag
+ */
+static int blk_queue_get_tag(struct blk_queue_tag *bqt)
+{
+	unsigned long *map;
+	int i, depth, tag, bit;
+
+	tag = -1;
+	depth = i = 0;
+	do {
+		map = &bqt->tag_map[i++];
+		if (*map != -1UL)
+			break;
+
+		depth += BLK_TAGS_PER_LONG;
+		if (depth < bqt->max_depth)
+			continue;
+
+		map = NULL;
+	} while (map);
+
+	if (map) {
+		bit = ffz(*map);
+		if (bit + depth <= bqt->max_depth) {
+			__set_bit(bit, map);
+			tag = bit + depth;
+		}
+	}
+
+	return tag;
+}
+
+/**
+ * blk_queue_end_tag - end tag operations for a request
+ * @q:  the request queue for the device
+ * @tag:  the tag that has completed
+ *
+ *  Description:
+ *    Typically called when end_that_request_first() returns 0, meaning
+ *    all transfers have been done for a request. It's important to call
+ *    this function before end_that_request_last(), as that will put the
+ *    request back on the free list thus corrupting the internal tag list.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	int tag = rq->tag;
+
+	BUG_ON(tag == -1);
+
+	if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
+		printk("attempt to clear non-busy tag (%d)\n", tag);
+		return;
+	}
+
+	list_del(&rq->queuelist);
+	rq->flags &= ~REQ_QUEUED;
+	rq->tag = -1;
+
+	if (unlikely(bqt->tag_index[tag] == NULL))
+		printk("tag %d is missing\n", tag);
+
+	bqt->tag_index[tag] = NULL;
+	bqt->busy--;
+}
+
+/**
+ * blk_queue_start_tag - find a free tag and assign it
+ * @q:  the request queue for the device
+ * @rq:  the block request that needs tagging
+ *
+ *  Description:
+ *    This can either be used as a stand-alone helper, or possibly be
+ *    assigned as the queue &prep_rq_fn (in which case &struct request
+ *    automagically gets a tag assigned). Note that this function assumes
+ *    that only REQ_CMD requests can be queued! The request will also be
+ *    removed from the request queue, so it's the drivers responsibility to
+ *    readd it if it should need to be restarted for some reason.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	int tag;
+
+	if (unlikely(!(rq->flags & REQ_CMD)))
+		return 1;
+
+	tag = blk_queue_get_tag(bqt);
+	if (tag != -1) {
+		rq->flags |= REQ_QUEUED;
+		rq->tag = tag;
+		bqt->tag_index[tag] = rq;
+		blkdev_dequeue_request(rq);
+		list_add(&rq->queuelist, &bqt->busy_list);
+		bqt->busy++;
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * blk_queue_invalidate_tags - invalidate all pending tags
+ * @q:  the request queue for the device
+ *
+ *  Description:
+ *   Hardware conditions make dictate a need to stop all pending request.
+ *   In this case, we will safely clear the block side of the tag queue and
+ *   readd all request to the request queue in the right order.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+void blk_queue_invalidate_tags(request_queue_t *q)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	struct list_head *tmp;
+	struct request *rq;
+
+	list_for_each(tmp, &bqt->busy_list) {
+		rq = list_entry_rq(tmp);
+
+		blk_queue_end_tag(q, rq);
+		rq->flags &= ~REQ_STARTED;
+		elv_add_request(q, rq, 0);
+	}
+}
+
 static char *rq_flags[] = { "REQ_RW", "REQ_RW_AHEAD", "REQ_BARRIER",
 			   "REQ_CMD", "REQ_NOMERGE", "REQ_STARTED",
-			   "REQ_DONTPREP", "REQ_DRIVE_CMD",
+			   "REQ_DONTPREP", "REQ_QUEUED", "REQ_DRIVE_CMD",
 			   "REQ_DRIVE_ACB", "REQ_PC", "REQ_BLOCK_PC",
 			   "REQ_SENSE", "REQ_SPECIAL" };
 
@@ -619,7 +831,7 @@
 	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
 	if (blk_hw_contig_segment(q, req->biotail, next->bio))
 		total_hw_segments--;
-    
+
 	if (total_hw_segments > q->max_hw_segments)
 		return 0;
 
@@ -719,7 +931,7 @@
  *     when a block device is being de-registered.  Currently, its
  *     primary task it to free all the &struct request structures that
  *     were allocated to the queue.
- * Caveat: 
+ * Caveat:
  *     Hopefully the low level driver will have finished any
  *     outstanding requests first...
  **/
@@ -733,6 +945,9 @@
 	if (count)
 		printk("blk_cleanup_queue: leaked requests (%d)\n", count);
 
+	if (blk_queue_tagged(q))
+		blk_queue_free_tags(q);
+
 	elevator_exit(q, &q->elevator);
 
 	memset(q, 0, sizeof(*q));
@@ -1599,7 +1814,7 @@
  * Description:
  *     Ends I/O on the first buffer attached to @req, and sets it up
  *     for the next buffer_head (if any) in the cluster.
- *     
+ *
  * Return:
  *     0 - we are done with this request, call end_that_request_last()
  *     1 - still buffers pending for this request
@@ -1753,3 +1968,9 @@
 
 EXPORT_SYMBOL(ll_10byte_cmd_build);
 EXPORT_SYMBOL(blk_queue_prep_rq);
+
+EXPORT_SYMBOL(blk_queue_init_tags);
+EXPORT_SYMBOL(blk_queue_free_tags);
+EXPORT_SYMBOL(blk_queue_start_tag);
+EXPORT_SYMBOL(blk_queue_end_tag);
+EXPORT_SYMBOL(blk_queue_invalidate_tags);
diff -Nru a/include/linux/blkdev.h b/include/linux/blkdev.h
--- a/include/linux/blkdev.h	Wed May  1 14:36:31 2002
+++ b/include/linux/blkdev.h	Wed May  1 14:36:31 2002
@@ -56,6 +56,7 @@
 
 	unsigned int current_nr_sectors;
 	unsigned int hard_cur_sectors;
+	int tag;
 	void *special;
 	char *buffer;
 	struct completion *waiting;
@@ -75,6 +76,7 @@
 	__REQ_NOMERGE,	/* don't touch this for merging */
 	__REQ_STARTED,	/* drive already may have started this one */
 	__REQ_DONTPREP,	/* don't call prep for this one */
+	__REQ_QUEUED,	/* uses queueing */
 	/*
 	 * for ATA/ATAPI devices
 	 */
@@ -97,6 +99,7 @@
 #define REQ_NOMERGE	(1 << __REQ_NOMERGE)
 #define REQ_STARTED	(1 << __REQ_STARTED)
 #define REQ_DONTPREP	(1 << __REQ_DONTPREP)
+#define REQ_QUEUED	(1 << __REQ_QUEUED)
 #define REQ_DRIVE_CMD	(1 << __REQ_DRIVE_CMD)
 #define REQ_DRIVE_ACB	(1 << __REQ_DRIVE_ACB)
 #define REQ_PC		(1 << __REQ_PC)
@@ -121,6 +124,17 @@
 	Queue_up,
 };
 
+#define BLK_TAGS_PER_LONG	(sizeof(unsigned long) * 8)
+#define BLK_TAGS_MASK		(BLK_TAGS_PER_LONG - 1)
+
+struct blk_queue_tag {
+	struct request **tag_index;	/* map of busy tags */
+	unsigned long *tag_map;		/* bit map of free/busy tags */
+	struct list_head busy_list;	/* fifo list of busy tags */
+	int busy;			/* current depth */
+	int max_depth;
+};
+
 /*
  * Default nr free requests per queue, ll_rw_blk will scale it down
  * according to available RAM at init time
@@ -193,6 +207,8 @@
 	unsigned long		seg_boundary_mask;
 
 	wait_queue_head_t	queue_wait;
+
+	struct blk_queue_tag	*queue_tags;
 };
 
 #define RQ_INACTIVE		(-1)
@@ -203,9 +219,11 @@
 
 #define QUEUE_FLAG_PLUGGED	0	/* queue is plugged */
 #define QUEUE_FLAG_CLUSTER	1	/* cluster several segments into 1 */
+#define QUEUE_FLAG_QUEUED	2	/* uses generic tag queueing */
 
 #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
 #define blk_mark_plugged(q)	set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_empty(q)	elv_queue_empty(q)
 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
 
@@ -315,6 +333,19 @@
 extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
 extern void generic_unplug_device(void *);
+
+/*
+ * tag stuff
+ */
+#define blk_queue_tag_request(q, tag)	((q)->queue_tags->tag_index[(tag)])
+#define blk_queue_tag_depth(q)		((q)->queue_tags->busy)
+#define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth)
+#define blk_rq_tagged(rq)		((rq)->flags & REQ_QUEUED)
+extern int blk_queue_start_tag(request_queue_t *, struct request *);
+extern void blk_queue_end_tag(request_queue_t *, struct request *);
+extern int blk_queue_init_tags(request_queue_t *, int);
+extern void blk_queue_free_tags(request_queue_t *);
+extern void blk_queue_invalidate_tags(request_queue_t *);
 
 extern int * blk_size[MAX_BLKDEV];	/* in units of 1024 bytes */
 extern int * blksize_size[MAX_BLKDEV];