[RFC PATCH 2/2] Revert "mq-deadline: Fix request accounting"

From: Niklas Cassel
Date: Fri Aug 27 2021 - 08:41:48 EST


From: Niklas Cassel <niklas.cassel@xxxxxxx>

This reverts commit b6d2b054e8baaee53fd2d4854c63cbf0f2c6262a.

There is no longer any need to perform any workaround private data magic
in order to track if a request was inserted to the scheduler or not.

blk-mq will no longer call .finish_request() for requests that were
never inserted to the scheduler.

This patch also didn't handle requeues properly, so it would have
required additional private data magic if it wasn't reverted.

Signed-off-by: Niklas Cassel <niklas.cassel@xxxxxxx>
---
block/mq-deadline.c | 16 +++++-----------
1 file changed, 5 insertions(+), 11 deletions(-)

diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 36920670dccc..a5d171b54f8e 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -682,7 +682,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,

prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, inserted, prio);
- rq->elv.priv[0] = (void *)(uintptr_t)1;

if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free);
@@ -731,10 +730,12 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock);
}

-/* Callback from inside blk_mq_rq_ctx_init(). */
+/*
+ * Nothing to do here. This is defined only to ensure that .finish_request
+ * method is called upon request completion.
+ */
static void dd_prepare_request(struct request *rq)
{
- rq->elv.priv[0] = NULL;
}

/*
@@ -761,14 +762,7 @@ static void dd_finish_request(struct request *rq)
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio];

- /*
- * The block layer core may call dd_finish_request() without having
- * called dd_insert_requests(). Hence only update statistics for
- * requests for which dd_insert_requests() has been called. See also
- * blk_mq_request_bypass_insert().
- */
- if (rq->elv.priv[0])
- dd_count(dd, completed, prio);
+ dd_count(dd, completed, prio);

if (blk_queue_is_zoned(q)) {
unsigned long flags;
--
2.31.1