[PATCH 5/6] blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq
From: Tejun Heo
Date: Sat Dec 09 2017 - 14:26:13 EST
After the recent updates to use generation number and state based
synchronization, blk-mq no longer depends on REQ_ATOM_COMPLETE for
anything.
Remove all REQ_ATOM_COMPLETE usages. This removes atomic bitops from
hot paths too. The next patch will push further along this direction
and include simple performance test results.
Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
---
block/blk-mq.c | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0267040..4ebac33 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -596,14 +596,12 @@ void blk_mq_complete_request(struct request *rq)
*/
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
rcu_read_lock();
- if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
- !blk_mark_rq_complete(rq))
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
rcu_read_unlock();
} else {
srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
- if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
- !blk_mark_rq_complete(rq))
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
}
@@ -650,8 +648,6 @@ void blk_mq_start_request(struct request *rq)
write_seqcount_end(&rq->gstate_seqc);
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
@@ -862,8 +858,7 @@ static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
* now guaranteed to see @rq->aborted_gstate and yield. If
* @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
*/
- if (READ_ONCE(rq->gstate) == rq->aborted_gstate &&
- !blk_mark_rq_complete(rq))
+ if (READ_ONCE(rq->gstate) == rq->aborted_gstate)
blk_mq_rq_timed_out(rq, reserved);
}
--
2.9.5