Use rcu_read_lock_sched to simplify the codes, and it also saves
some cycles of handling rcu nesting counter.
Signed-off-by: Yanfei Xu <yanfei.xu@xxxxxxxxxxxxx>
---
kernel/locking/rwsem.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 000e8d5a2884..7afadfe02286 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -616,8 +616,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
return false;
}
- preempt_disable();
- rcu_read_lock();
+ rcu_read_lock_sched();
owner = rwsem_owner_flags(sem, &flags);
/*
* Don't check the read-owner as the entry may be stale.
@@ -625,8 +624,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
if ((flags & RWSEM_NONSPINNABLE) ||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
ret = false;
- rcu_read_unlock();
- preempt_enable();
+ rcu_read_unlock_sched();
lockevent_cond_inc(rwsem_opt_fail, !ret);
return ret;