[tip: core/rcu] rcutorture: Properly synchronize with OOM notifier

From: tip-bot2 for Paul E. McKenney
Date: Fri Oct 09 2020 - 02:35:31 EST


The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 57f602022e82ee8fa6476d0e16ddbaf3eb86b245
Gitweb: https://git.kernel.org/tip/57f602022e82ee8fa6476d0e16ddbaf3eb86b245
Author: Paul E. McKenney <paulmck@xxxxxxxxxx>
AuthorDate: Mon, 20 Jul 2020 08:34:07 -07:00
Committer: Paul E. McKenney <paulmck@xxxxxxxxxx>
CommitterDate: Mon, 24 Aug 2020 18:45:34 -07:00

rcutorture: Properly synchronize with OOM notifier

The current rcutorture forward-progress code assumes that it is the
only cause of out-of-memory (OOM) events. For script-based rcutorture
testing, this assumption is in fact correct. However, testing based
on modprobe/rmmod might well encounter external OOM events, which could
happen at any time.

This commit therefore properly synchronizes the interaction between
rcutorture's forward-progress testing and its OOM notifier by adding a
global mutex.

Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx>
---
kernel/rcu/rcutorture.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7942be4..2b3f04e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1796,6 +1796,7 @@ struct rcu_fwd {
unsigned long rcu_launder_gp_seq_start;
};

+static DEFINE_MUTEX(rcu_fwd_mutex);
static struct rcu_fwd *rcu_fwds;
static bool rcu_fwd_emergency_stop;

@@ -2062,8 +2063,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
static int rcutorture_oom_notify(struct notifier_block *self,
unsigned long notused, void *nfreed)
{
- struct rcu_fwd *rfp = rcu_fwds;
+ struct rcu_fwd *rfp;

+ mutex_lock(&rcu_fwd_mutex);
+ rfp = rcu_fwds;
+ if (!rfp) {
+ mutex_unlock(&rcu_fwd_mutex);
+ return NOTIFY_OK;
+ }
WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
__func__);
rcu_torture_fwd_cb_hist(rfp);
@@ -2081,6 +2088,7 @@ static int rcutorture_oom_notify(struct notifier_block *self,
smp_mb(); /* Frees before return to avoid redoing OOM. */
(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
pr_info("%s returning after OOM processing.\n", __func__);
+ mutex_unlock(&rcu_fwd_mutex);
return NOTIFY_OK;
}

@@ -2148,7 +2156,9 @@ static int __init rcu_torture_fwd_prog_init(void)
return -ENOMEM;
spin_lock_init(&rfp->rcu_fwd_lock);
rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
+ mutex_lock(&rcu_fwd_mutex);
rcu_fwds = rfp;
+ mutex_unlock(&rcu_fwd_mutex);
return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
}

@@ -2158,7 +2168,9 @@ static void rcu_torture_fwd_prog_cleanup(void)

torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
rfp = rcu_fwds;
+ mutex_lock(&rcu_fwd_mutex);
rcu_fwds = NULL;
+ mutex_unlock(&rcu_fwd_mutex);
kfree(rfp);
}