On Fri, Sep 24, 2021 at 04:28:28PM +0800, John Garry wrote:
Refactor blk_mq_free_map_and_requests() such that it can be used at manyThere are 5 callers in which 'set->tags[i]' is cleared, so just
sites at which the tag map and rqs are freed.
Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the
alloc equivalent.
Suggested-by: Ming Lei<ming.lei@xxxxxxxxxx>
Signed-off-by: John Garry<john.garry@xxxxxxxxxx>
Reviewed-by: Hannes Reinecke<hare@xxxxxxx>
---
block/blk-mq-tag.c | 3 +--
block/blk-mq.c | 40 ++++++++++++++++++++++++----------------
block/blk-mq.h | 4 +++-
3 files changed, 28 insertions(+), 19 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index db99f1246795..a0ecc6d88f84 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -607,8 +607,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (!new)
return -ENOMEM;
- blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
- blk_mq_free_rq_map(*tagsptr, set->flags);
+ blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
*tagsptr = new;
} else {
/*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 46772773b9c4..464ea20b9bcb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2878,15 +2878,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
return set->tags[hctx_idx];
}
-static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
- unsigned int hctx_idx)
+void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
+ struct blk_mq_tags *tags,
+ unsigned int hctx_idx)
{
unsigned int flags = set->flags;
- if (set->tags && set->tags[hctx_idx]) {
- blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
- blk_mq_free_rq_map(set->tags[hctx_idx], flags);
- set->tags[hctx_idx] = NULL;
+ if (tags) {
+ blk_mq_free_rqs(set, tags, hctx_idx);
+ blk_mq_free_rq_map(tags, flags);
}
}
@@ -2967,8 +2967,10 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* fallback in case of a new remap fails
* allocation
*/
- if (i && set->tags[i])
- blk_mq_free_map_and_requests(set, i);
+ if (i && set->tags[i]) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
hctx->tags = NULL;
continue;
@@ -3264,8 +3266,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx = hctxs[j];
if (hctx) {
- if (hctx->tags)
- blk_mq_free_map_and_requests(set, j);
+ blk_mq_free_map_and_rqs(set, set->tags[j], j);
+ set->tags[j] = NULL;
blk_mq_exit_hctx(q, set, hctx, j);
hctxs[j] = NULL;
}
@@ -3361,8 +3363,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
out_unwind:
- while (--i >= 0)
- blk_mq_free_map_and_requests(set, i);
+ while (--i >= 0) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
return -ENOMEM;
}
@@ -3557,8 +3561,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return 0;
out_free_mq_rq_maps:
- for (i = 0; i < set->nr_hw_queues; i++)
- blk_mq_free_map_and_requests(set, i);
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
out_free_mq_map:
for (i = 0; i < set->nr_maps; i++) {
kfree(set->map[i].mq_map);
@@ -3590,8 +3596,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;
- for (i = 0; i < set->nr_hw_queues; i++)
- blk_mq_free_map_and_requests(set, i);
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
wondering why you don't clear set->tags[i] at default in
blk_mq_free_map_and_rqs(). And just call __blk_mq_free_map_and_rqs()
for the only other user?