[PATCH v5 08/10] maple_tree: Update check_forking() and bench_forking()
From: Peng Zhang
Date: Sun Oct 15 2023 - 23:25:52 EST
Updated check_forking() and bench_forking() to use __mt_dup() to
duplicate maple tree.
Signed-off-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx>
---
lib/test_maple_tree.c | 117 ++++++++++++++++++------------------
tools/include/linux/rwsem.h | 4 ++
2 files changed, 62 insertions(+), 59 deletions(-)
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index de470950714f..3e4597fb49d3 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1834,47 +1834,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
}
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
-static noinline void __init check_forking(struct maple_tree *mt)
+static noinline void __init check_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
- mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- down_write(&newmt_lock);
- mas.index = 0;
- mas.last = 0;
- if (mas_expected_entries(&newmas, nr_entries)) {
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
- rcu_read_lock();
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
- rcu_read_unlock();
+
mas_destroy(&newmas);
+ mas_destroy(&mas);
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
+ __mt_destroy(&mt);
up_write(&newmt_lock);
+ up_write(&mt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1977,49 +1978,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
}
#if defined(BENCH_FORK)
-static noinline void __init bench_forking(struct maple_tree *mt)
+static noinline void __init bench_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134, nr_fork = 80000;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, nr_fork = 80000, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- mt_set_external_lock(&newmt, &newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
for (i = 0; i < nr_fork; i++) {
- mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- mas.index = 0;
- mas.last = 0;
- rcu_read_lock();
- down_write(&newmt_lock);
- if (mas_expected_entries(&newmas, nr_entries)) {
- printk("OOM!");
+ mt_init_flags(&newmt,
+ MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
+ pr_err("OOM!");
BUG_ON(1);
}
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
+
mas_destroy(&newmas);
- rcu_read_unlock();
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
up_write(&newmt_lock);
}
+ mas_destroy(&mas);
+ __mt_destroy(&mt);
+ up_write(&mt_lock);
}
#endif
@@ -3615,9 +3618,7 @@ static int __init maple_tree_seed(void)
#endif
#if defined(BENCH_FORK)
#define BENCH
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- bench_forking(&tree);
- mtree_destroy(&tree);
+ bench_forking();
goto skip;
#endif
#if defined(BENCH_MT_FOR_EACH)
@@ -3650,9 +3651,7 @@ static int __init maple_tree_seed(void)
check_iteration(&tree);
mtree_destroy(&tree);
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_forking(&tree);
- mtree_destroy(&tree);
+ check_forking();
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree);
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
index 83971b3cbfce..f8bffd4a987c 100644
--- a/tools/include/linux/rwsem.h
+++ b/tools/include/linux/rwsem.h
@@ -37,4 +37,8 @@ static inline int up_write(struct rw_semaphore *sem)
{
return pthread_rwlock_unlock(&sem->lock);
}
+
+#define down_read_nested(sem, subclass) down_read(sem)
+#define down_write_nested(sem, subclass) down_write(sem)
+
#endif /* _TOOLS_RWSEM_H */
--
2.20.1