Hi,
在 2023/10/10 04:16, Sidhartha Kumar 写道:
Preallocate maple nodes before call to mas_wr_store_entry(). If a newmas_wr_store_entry() does something similar to mas_prealloc_calc().
node is not needed, go directly to mas_wr_store_entry(), otherwise
allocate the needed nodes and set the MA_STATE_PREALLOC flag.
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx>
---
lib/maple_tree.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index e239197a57fc..25ae66e585f4 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5478,17 +5478,33 @@ int mas_prealloc_calc(struct ma_wr_state *wr_mas)
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
+ int request;
mas_wr_store_setup(&wr_mas);
- trace_ma_write(__func__, mas, 0, entry);
-retry:
+ wr_mas.content = mas_start(mas);
+
+ request = mas_prealloc_calc(&wr_mas);
Now, making it do it twice would incur additional overhead.
We encountered this issue while optimizing preallocation, but it
hasn't been resolved yet. Previously, this problem only occurred
when using mas_preallocate(). Now, this change would bring this
impact to all write operations on maple tree. What do you think
about it?
Thanks,
Peng
+ if (!request)
+ goto store_entry;
+
+ mas_node_count_gfp(mas, request, gfp);
+ if (unlikely(mas_is_err(mas))) {
+ mas_set_alloc_req(mas, 0);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return xa_err(mas->node);
+ }
+ mas->mas_flags |= MA_STATE_PREALLOC;
+
+store_entry:
mas_wr_store_entry(&wr_mas);
if (unlikely(mas_nomem(mas, gfp)))
- goto retry;
+ goto store_entry;
if (unlikely(mas_is_err(mas)))
return xa_err(mas->node);
+ trace_ma_write(__func__, mas, 0, entry);
return 0;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);