[PATCH RT v3 2/5] sched: Rename sleeping_lock to rt_invol_sleep

From: Scott Wood
Date: Wed Sep 11 2019 - 12:57:46 EST


It's already used for one situation other than acquiring a lock, and the
next patch will add another, so change the name to avoid confusion.

Signed-off-by: Scott Wood <swood@xxxxxxxxxx>
---
include/linux/sched.h | 15 ++++++++-------
kernel/locking/rtmutex.c | 14 +++++++-------
kernel/locking/rwlock-rt.c | 16 ++++++++--------
kernel/rcu/tree_plugin.h | 6 +++---
kernel/softirq.c | 2 +-
kernel/time/hrtimer.c | 4 ++--
6 files changed, 29 insertions(+), 28 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e892e727f12..edc93b74f7d8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -674,7 +674,8 @@ struct task_struct {
# endif
#endif
#ifdef CONFIG_PREEMPT_RT_FULL
- int sleeping_lock;
+ /* Task is blocking due to RT-specific mechanisms, not voluntarily */
+ int rt_invol_sleep;
#endif

#ifdef CONFIG_PREEMPT_RCU
@@ -1882,20 +1883,20 @@ static __always_inline bool need_resched(void)
}

#ifdef CONFIG_PREEMPT_RT_FULL
-static inline void sleeping_lock_inc(void)
+static inline void rt_invol_sleep_inc(void)
{
- current->sleeping_lock++;
+ current->rt_invol_sleep++;
}

-static inline void sleeping_lock_dec(void)
+static inline void rt_invol_sleep_dec(void)
{
- current->sleeping_lock--;
+ current->rt_invol_sleep--;
}

#else

-static inline void sleeping_lock_inc(void) { }
-static inline void sleeping_lock_dec(void) { }
+static inline void rt_invol_sleep_inc(void) { }
+static inline void rt_invol_sleep_dec(void) { }
#endif

/*
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 5ccbb45131e5..d7100586c597 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1135,7 +1135,7 @@ void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)

void __lockfunc rt_spin_lock(spinlock_t *lock)
{
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1150,7 +1150,7 @@ void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
{
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1164,7 +1164,7 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
spin_release(&lock->dep_map, 1, _RET_IP_);
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
migrate_enable();
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
}
EXPORT_SYMBOL(rt_spin_unlock);

@@ -1190,14 +1190,14 @@ int __lockfunc rt_spin_trylock(spinlock_t *lock)
{
int ret;

- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
} else {
migrate_enable();
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
}
return ret;
}
@@ -1210,7 +1210,7 @@ int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
local_bh_disable();
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
} else
@@ -1226,7 +1226,7 @@ int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
*flags = 0;
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
index c3b91205161c..de025a7cc9c4 100644
--- a/kernel/locking/rwlock-rt.c
+++ b/kernel/locking/rwlock-rt.c
@@ -305,14 +305,14 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
{
int ret;

- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
ret = do_read_rt_trylock(rwlock);
if (ret) {
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
} else {
migrate_enable();
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
}
return ret;
}
@@ -322,14 +322,14 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock)
{
int ret;

- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
ret = do_write_rt_trylock(rwlock);
if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
} else {
migrate_enable();
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
}
return ret;
}
@@ -337,7 +337,7 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock)

void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
do_read_rt_lock(rwlock);
@@ -346,7 +346,7 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)

void __lockfunc rt_write_lock(rwlock_t *rwlock)
{
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
migrate_disable();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
do_write_rt_lock(rwlock);
@@ -358,7 +358,7 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
do_read_rt_unlock(rwlock);
migrate_enable();
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
}
EXPORT_SYMBOL(rt_read_unlock);

@@ -367,7 +367,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
do_write_rt_unlock(rwlock);
migrate_enable();
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
}
EXPORT_SYMBOL(rt_write_unlock);

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 23a54e4b649c..0da4b975cd71 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -287,15 +287,15 @@ void rcu_note_context_switch(bool preempt)
struct task_struct *t = current;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
- int sleeping_l = 0;
+ int rt_invol = 0;

barrier(); /* Avoid RCU read-side critical sections leaking down. */
trace_rcu_utilization(TPS("Start context switch"));
lockdep_assert_irqs_disabled();
#if defined(CONFIG_PREEMPT_RT_FULL)
- sleeping_l = t->sleeping_lock;
+ rt_invol = t->rt_invol_sleep;
#endif
- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
+ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !rt_invol);
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 6080c9328df1..daa21a87838a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -879,7 +879,7 @@ void softirq_check_pending_idle(void)
*/
raw_spin_lock(&tsk->pi_lock);
if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
- (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
+ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->rt_invol_sleep)) {
okay = true;
}
raw_spin_unlock(&tsk->pi_lock);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 5eb45a868de9..0e6e2dcf6fa4 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1863,9 +1863,9 @@ void cpu_chill(void)
chill_time = ktime_set(0, NSEC_PER_MSEC);

current->flags |= PF_NOFREEZE;
- sleeping_lock_inc();
+ rt_invol_sleep_inc();
schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
- sleeping_lock_dec();
+ rt_invol_sleep_dec();
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;

--
1.8.3.1