[PATCH] eventfd: convert global percpu eventfd_wake_count to ctx percpu eventfd_wake_count
From: qiang . zhang
Date: Fri Jun 04 2021 - 03:42:08 EST
From: Zqiang <qiang.zhang@xxxxxxxxxxxxx>
In RT system, the spinlock_irq be replaced by rt_mutex, when
call eventfd_signal(), if the current task is preempted after
increasing the current CPU eventfd_wake_count, when other task
run on this CPU and call eventfd_signal(), find this CPU
eventfd_wake_count is not zero, will trigger warning and direct
return, miss wakeup.
In no-RT system, even if the eventfd_signal() call is nested, if
if it's different eventfd_ctx object, it is not happen deadlock.
Fixes: b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
Signed-off-by: Zqiang <qiang.zhang@xxxxxxxxxxxxx>
---
fs/aio.c | 2 +-
fs/eventfd.c | 21 +++++++++++++++++----
include/linux/eventfd.h | 9 ++-------
3 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index 76ce0cc3ee4e..b45983d5d35a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1695,7 +1695,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
- if (iocb->ki_eventfd && eventfd_signal_count()) {
+ if (iocb->ki_eventfd && eventfd_signal_count(iocb->ki_eventfd)) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index e265b6dd4f34..ef92d3dedde8 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -25,7 +25,6 @@
#include <linux/idr.h>
#include <linux/uio.h>
-DEFINE_PER_CPU(int, eventfd_wake_count);
static DEFINE_IDA(eventfd_ida);
@@ -43,8 +42,15 @@ struct eventfd_ctx {
__u64 count;
unsigned int flags;
int id;
+ int __percpu *eventfd_wake_count;
};
+inline bool eventfd_signal_count(struct eventfd_ctx *ctx)
+{
+ return this_cpu_read(*ctx->eventfd_wake_count);
+}
+EXPORT_SYMBOL_GPL(eventfd_signal_count);
+
/**
* eventfd_signal - Adds @n to the eventfd counter.
* @ctx: [in] Pointer to the eventfd context.
@@ -71,17 +77,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns true, the eventfd_signal() call should be deferred to a
* safe context.
*/
- if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ if (WARN_ON_ONCE(this_cpu_read(*ctx->eventfd_wake_count)))
return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags);
- this_cpu_inc(eventfd_wake_count);
+ this_cpu_inc(*ctx->eventfd_wake_count);
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
- this_cpu_dec(eventfd_wake_count);
+ this_cpu_dec(*ctx->eventfd_wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
@@ -92,6 +98,9 @@ static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
if (ctx->id >= 0)
ida_simple_remove(&eventfd_ida, ctx->id);
+
+ if (ctx->eventfd_wake_count)
+ free_percpu(ctx->eventfd_wake_count);
kfree(ctx);
}
@@ -421,6 +430,10 @@ static int do_eventfd(unsigned int count, int flags)
if (!ctx)
return -ENOMEM;
+ ctx->eventfd_wake_count = alloc_percpu(int);
+ if (!ctx->eventfd_wake_count)
+ goto err;
+
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index fa0a524baed0..1deda815ef1b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -43,12 +43,7 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
-DECLARE_PER_CPU(int, eventfd_wake_count);
-
-static inline bool eventfd_signal_count(void)
-{
- return this_cpu_read(eventfd_wake_count);
-}
+inline bool eventfd_signal_count(struct eventfd_ctx *ctx);
#else /* CONFIG_EVENTFD */
@@ -78,7 +73,7 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
return -ENOSYS;
}
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_count(struct eventfd_ctx *ctx)
{
return false;
}
--
2.17.1