[PATCH 2/2] fs/epoll: deal with wait_queue only once

From: Davidlohr Bueso
Date: Wed Nov 14 2018 - 13:25:56 EST


There is no reason why we rearm the waitiqueue upon every
fetch_events retry (for when events are found yet send_events()
fails). If nothing else, this saves four lock operations per
retry, and furthermore reduces the scope of the lock even
further.

Signed-off-by: Davidlohr Bueso <dbueso@xxxxxxx>
---
fs/eventpoll.c | 24 ++++++++++++++++--------
1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 7e2b5f3d6b3e..25b0c94cc091 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
{
int res = 0, eavail, timed_out = 0;
u64 slack = 0;
+ bool waiter = false;
wait_queue_entry_t wait;
ktime_t expires, *to = NULL;

@@ -1786,6 +1787,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (eavail)
goto send_events;

+ if (!waiter) {
+ waiter = true;
+ init_waitqueue_entry(&wait, current);
+
+ spin_lock_irq(&ep->wq.lock);
+ __add_wait_queue_exclusive(&ep->wq, &wait);
+ spin_unlock_irq(&ep->wq.lock);
+ }
+
/*
* Busy poll timed out. Drop NAPI ID for now, we can add
* it back in when we have moved a socket with a valid NAPI
@@ -1798,10 +1808,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
* We need to sleep here, and we will be wake up by
* ep_poll_callback() when events will become available.
*/
- init_waitqueue_entry(&wait, current);
- spin_lock_irq(&ep->wq.lock);
- __add_wait_queue_exclusive(&ep->wq, &wait);
- spin_unlock_irq(&ep->wq.lock);

for (;;) {
/*
@@ -1836,10 +1842,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,

__set_current_state(TASK_RUNNING);

- spin_lock_irq(&ep->wq.lock);
- __remove_wait_queue(&ep->wq, &wait);
- spin_unlock_irq(&ep->wq.lock);
-
send_events:
/*
* Try to transfer events to user space. In case we get 0 events and
@@ -1850,6 +1852,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto fetch_events;

+ if (waiter) {
+ spin_lock_irq(&ep->wq.lock);
+ __remove_wait_queue(&ep->wq, &wait);
+ spin_unlock_irq(&ep->wq.lock);
+ }
+
return res;
}

--
2.16.4