[PATCH 1/2]block cfq: don't use atomic_t for cfq_queue

From: Shaohua Li
Date: Wed Dec 22 2010 - 21:45:49 EST


cfq_queue->ref is used with queue_lock hold, so ref doesn't need to be an atomic
and atomic operation is slower.

Signed-off-by: Shaohua Li <shaohua.li@xxxxxxxxx>

---
block/cfq-iosched.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)

Index: linux/block/cfq-iosched.c
===================================================================
--- linux.orig/block/cfq-iosched.c 2010-12-22 21:14:15.000000000 +0800
+++ linux/block/cfq-iosched.c 2010-12-23 10:44:23.000000000 +0800
@@ -97,7 +97,7 @@ struct cfq_rb_root {
*/
struct cfq_queue {
/* reference count */
- atomic_t ref;
+ int ref;
/* various state flags, see below */
unsigned int flags;
/* parent cfq_data */
@@ -2040,7 +2040,7 @@ static int cfqq_process_refs(struct cfq_
int process_refs, io_refs;

io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
- process_refs = atomic_read(&cfqq->ref) - io_refs;
+ process_refs = cfqq->ref - io_refs;
BUG_ON(process_refs < 0);
return process_refs;
}
@@ -2080,10 +2080,10 @@ static void cfq_setup_merge(struct cfq_q
*/
if (new_process_refs >= process_refs) {
cfqq->new_cfqq = new_cfqq;
- atomic_add(process_refs, &new_cfqq->ref);
+ new_cfqq->ref += process_refs;
} else {
new_cfqq->new_cfqq = cfqq;
- atomic_add(new_process_refs, &cfqq->ref);
+ cfqq->ref += new_process_refs;
}
}

@@ -2538,9 +2538,10 @@ static void cfq_put_queue(struct cfq_que
struct cfq_data *cfqd = cfqq->cfqd;
struct cfq_group *cfqg, *orig_cfqg;

- BUG_ON(atomic_read(&cfqq->ref) <= 0);
+ BUG_ON(cfqq->ref <= 0);

- if (!atomic_dec_and_test(&cfqq->ref))
+ cfqq->ref--;
+ if (cfqq->ref)
return;

cfq_log_cfqq(cfqd, cfqq, "put_queue");
@@ -2843,7 +2844,7 @@ static void cfq_init_cfqq(struct cfq_dat
RB_CLEAR_NODE(&cfqq->p_node);
INIT_LIST_HEAD(&cfqq->fifo);

- atomic_set(&cfqq->ref, 0);
+ cfqq->ref = 0;
cfqq->cfqd = cfqd;

cfq_mark_cfqq_prio_changed(cfqq);
@@ -2979,11 +2980,11 @@ cfq_get_queue(struct cfq_data *cfqd, boo
* pin the queue now that it's allocated, scheduler exit will prune it
*/
if (!is_sync && !(*async_cfqq)) {
- atomic_inc(&cfqq->ref);
+ cfqq->ref++;
*async_cfqq = cfqq;
}

- atomic_inc(&cfqq->ref);
+ cfqq->ref++;
return cfqq;
}

@@ -3681,7 +3682,7 @@ new_queue:
}

cfqq->allocated[rw]++;
- atomic_inc(&cfqq->ref);
+ cfqq->ref++;

spin_unlock_irqrestore(q->queue_lock, flags);

@@ -3862,6 +3863,10 @@ static void *cfq_init_queue(struct reque
if (!cfqd)
return NULL;

+ /*
+ * Don't need take queue_lock in the routine, since we are
+ * initializing the ioscheduler, and nobody is using cfqd
+ */
cfqd->cic_index = i;

/* Init root service tree */
@@ -3901,7 +3906,7 @@ static void *cfq_init_queue(struct reque
* will not attempt to free it.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
- atomic_inc(&cfqd->oom_cfqq.ref);
+ cfqd->oom_cfqq.ref++;
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);

INIT_LIST_HEAD(&cfqd->cic_list);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/