[PATCH v4 08/10] maple_tree: Update check_forking() and bench_forking()

From: Peng Zhang
Date: Mon Oct 09 2023 - 05:05:30 EST


Updated check_forking() and bench_forking() to use __mt_dup() to
duplicate maple tree.

Signed-off-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx>
---
lib/test_maple_tree.c | 61 +++++++++++++++++++++----------------------
1 file changed, 30 insertions(+), 31 deletions(-)

diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 27d424fad797..bcd07c220a13 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1837,36 +1837,37 @@ static noinline void __init check_forking(struct maple_tree *mt)
{

struct maple_tree newmt;
- int i, nr_entries = 134;
+ int i, nr_entries = 134, ret;
void *val;
MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);

for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);

+
mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
mas_lock(&newmas);
- mas.index = 0;
- mas.last = 0;
- if (mas_expected_entries(&newmas, nr_entries)) {
+ mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
+
+ ret = __mt_dup(mt, &newmt, GFP_NOWAIT | __GFP_NOWARN);
+ if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
- rcu_read_lock();
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX) {
mas_store(&newmas, val);
}
- rcu_read_unlock();
- mas_destroy(&newmas);
+
+ mas_unlock(&mas);
mas_unlock(&newmas);
+
+ mas_destroy(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
mtree_destroy(&newmt);
@@ -1974,12 +1975,11 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
#if defined(BENCH_FORK)
static noinline void __init bench_forking(struct maple_tree *mt)
{
-
struct maple_tree newmt;
- int i, nr_entries = 134, nr_fork = 80000;
+ int i, nr_entries = 134, nr_fork = 80000, ret;
void *val;
MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);

for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
@@ -1988,25 +1988,24 @@ static noinline void __init bench_forking(struct maple_tree *mt)
for (i = 0; i < nr_fork; i++) {
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- mas.index = 0;
- mas.last = 0;
- rcu_read_lock();
+
mas_lock(&newmas);
- if (mas_expected_entries(&newmas, nr_entries)) {
- printk("OOM!");
+ mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(mt, &newmt, GFP_NOWAIT | __GFP_NOWARN);
+ if (ret) {
+ pr_err("OOM!");
BUG_ON(1);
}
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX) {
mas_store(&newmas, val);
}
- mas_destroy(&newmas);
+
+ mas_unlock(&mas);
mas_unlock(&newmas);
- rcu_read_unlock();
+
+ mas_destroy(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
mtree_destroy(&newmt);
--
2.20.1