Re: [PATCH v4 14/16] locking/rwsem: Guard against making count negative

From: Peter Zijlstra
Date: Wed Apr 24 2019 - 03:10:07 EST


On Tue, Apr 23, 2019 at 03:12:16PM -0400, Waiman Long wrote:
> That is true in general, but doing preempt_disable/enable across
> function boundary is ugly and prone to further problems down the road.

We do worse things in this code, and the thing Linus proposes is
actually quite simple, something like so:

---
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -912,7 +904,7 @@ rwsem_down_read_slowpath(struct rw_semap
raw_spin_unlock_irq(&sem->wait_lock);
break;
}
- schedule();
+ schedule_preempt_disabled();
lockevent_inc(rwsem_sleep_reader);
}

@@ -1121,6 +1113,7 @@ static struct rw_semaphore *rwsem_downgr
*/
inline void __down_read(struct rw_semaphore *sem)
{
+ preempt_disable();
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) {
rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
@@ -1129,10 +1122,12 @@ inline void __down_read(struct rw_semaph
} else {
rwsem_set_reader_owned(sem);
}
+ preempt_enable();
}

static inline int __down_read_killable(struct rw_semaphore *sem)
{
+ preempt_disable();
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
@@ -1142,6 +1137,7 @@ static inline int __down_read_killable(s
} else {
rwsem_set_reader_owned(sem);
}
+ preempt_enable();
return 0;
}