Re: [PATCH v2 10/16] maple_tree: convert mas_insert() to preallocate nodes

From: Liam R. Howlett
Date: Thu Jun 13 2024 - 10:41:40 EST


* Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> [240607 14:53]:
> By setting the store type in mas_insert(), we no longer need to use
> mas_wr_modify() to determine the correct store function to use. Instead,
> set the store type and call mas_wr_store_entry(). Also, pass in the
> requested gfp flags to mas_insert() so they can be passed to the call to
> mas_wr_preallocate().

This can be done without passing in the gfp flags as it was done before
by using the same flags as before (GFP_KERNEL). However the only user
already has the gfp flags to use, so this makes sense.

We may need to add a _gfp() version in the future, but most users want
to mas_store() vs mas_insert(), so I'm fine with this change.

>
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx>
> ---
> lib/maple_tree.c | 33 ++++++++++++++++-----------------
> 1 file changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 2c42e99c400c..c37bfac4f622 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4442,11 +4442,12 @@ static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry, g
> * mas_insert() - Internal call to insert a value
> * @mas: The maple state
> * @entry: The entry to store
> + * @gfp: The GFP_FLAGS to use for allocations
> *
> * Return: %NULL or the contents that already exists at the requested index
> * otherwise. The maple state needs to be checked for error conditions.
> */
> -static inline void *mas_insert(struct ma_state *mas, void *entry)
> +static inline void *mas_insert(struct ma_state *mas, void *entry, gfp_t gfp)
> {
> MA_WR_STATE(wr_mas, mas, entry);
>
> @@ -4468,26 +4469,24 @@ static inline void *mas_insert(struct ma_state *mas, void *entry)
> if (wr_mas.content)
> goto exists;
>
> - if (mas_is_none(mas) || mas_is_ptr(mas)) {
> - mas_store_root(mas, entry);
> + mas_wr_preallocate(&wr_mas, entry, gfp);
> + if (mas_is_err(mas))
> return NULL;
> - }
>
> /* spanning writes always overwrite something */
> - if (!mas_wr_walk(&wr_mas))
> + if (mas->store_type == wr_spanning_store)
> goto exists;
>
> /* At this point, we are at the leaf node that needs to be altered. */
> - wr_mas.offset_end = mas->offset;
> - wr_mas.end_piv = wr_mas.r_max;
> -
> - if (wr_mas.content || (mas->last > wr_mas.r_max))
> - goto exists;
> + if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) {
> + wr_mas.offset_end = mas->offset;
> + wr_mas.end_piv = wr_mas.r_max;
>
> - if (!entry)
> - return NULL;
> + if (wr_mas.content || (mas->last > wr_mas.r_max))
> + goto exists;
> + }
>
> - mas_wr_modify(&wr_mas);
> + mas_wr_store_entry(&wr_mas);
> return wr_mas.content;
>
> exists:
> @@ -4532,7 +4531,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
> return ret;
>
> do {
> - mas_insert(mas, entry);
> + mas_insert(mas, entry, gfp);
> } while (mas_nomem(mas, gfp));
> if (mas_is_err(mas))
> return xa_err(mas->node);
> @@ -6536,7 +6535,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
>
> mtree_lock(mt);
> retry:
> - mas_insert(&ms, entry);
> + mas_insert(&ms, entry, gfp);
> if (mas_nomem(&ms, gfp))
> goto retry;
>
> @@ -6585,7 +6584,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
> if (ret)
> goto unlock;
>
> - mas_insert(&mas, entry);
> + mas_insert(&mas, entry, gfp);
> /*
> * mas_nomem() may release the lock, causing the allocated area
> * to be unavailable, so try to allocate a free area again.
> @@ -6667,7 +6666,7 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
> if (ret)
> goto unlock;
>
> - mas_insert(&mas, entry);
> + mas_insert(&mas, entry, gfp);
> /*
> * mas_nomem() may release the lock, causing the allocated area
> * to be unavailable, so try to allocate a free area again.
> --
> 2.45.2
>