[PATCH 01/16] blk-mq-sched: add new parameter nr_requests in blk_mq_alloc_sched_tags()
From: Yu Kuai
Date: Wed Aug 13 2025 - 23:45:00 EST
From: Yu Kuai <yukuai3@xxxxxxxxxx>
This helper only support to iallocate the default number of requests,
add a new parameter to support specific number of requests.
Prepare to fix tags double free problem if nr_requests is grown by
queue sysfs attribute nr_requests.
Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx>
---
block/blk-mq-sched.c | 11 +++++++----
block/blk-mq-sched.h | 2 +-
block/elevator.c | 2 +-
3 files changed, 9 insertions(+), 6 deletions(-)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index e2ce4a28e6c9..9a8a0b5e04a9 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -454,7 +454,7 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
}
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
- unsigned int nr_hw_queues)
+ unsigned int nr_hw_queues, unsigned int nr_requests)
{
unsigned int nr_tags;
int i;
@@ -475,8 +475,11 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
* 128, since we don't split into sync/async like the old code
* did. Additionally, this is a per-hw queue depth.
*/
- et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
- BLKDEV_DEFAULT_RQ);
+ if (nr_requests)
+ et->nr_requests = nr_requests;
+ else
+ et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
+ BLKDEV_DEFAULT_RQ);
et->nr_hw_queues = nr_hw_queues;
if (blk_mq_is_shared_tags(set->flags)) {
@@ -521,7 +524,7 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
* concurrently.
*/
if (q->elevator) {
- et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
+ et = blk_mq_alloc_sched_tags(set, nr_hw_queues, 0);
if (!et)
goto out_unwind;
if (xa_insert(et_table, q->id, et, gfp))
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index b554e1d55950..0582d4bc3987 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -24,7 +24,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
void blk_mq_sched_free_rqs(struct request_queue *q);
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
- unsigned int nr_hw_queues);
+ unsigned int nr_hw_queues, unsigned int nr_requests);
int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
void blk_mq_free_sched_tags(struct elevator_tags *et,
diff --git a/block/elevator.c b/block/elevator.c
index fe96c6f4753c..f8a04f32cbcf 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -669,7 +669,7 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
lockdep_assert_held(&set->update_nr_hwq_lock);
if (strncmp(ctx->name, "none", 4)) {
- ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
+ ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues, 0);
if (!ctx->et)
return -ENOMEM;
}
--
2.39.2