Skip to content

Commit

Permalink
fs/epoll: deal with wait_queue only once
Browse files Browse the repository at this point in the history
There is no reason why we rearm the waitiqueue upon every fetch_events
retry (for when events are found yet send_events() fails).  If nothing
else, this saves four lock operations per retry, and furthermore reduces
the scope of the lock even further.

[akpm@linux-foundation.org: restore code to original position, fix and reflow comment]
Link: http://lkml.kernel.org/r/20181114182532.27981-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Davidlohr Bueso authored and Linus Torvalds committed Jan 4, 2019
1 parent 35cff1a commit 86c0517
Showing 1 changed file with 18 additions and 11 deletions.
29 changes: 18 additions & 11 deletions fs/eventpoll.c
Original file line number Diff line number Diff line change
Expand Up @@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
{
int res = 0, eavail, timed_out = 0;
u64 slack = 0;
bool waiter = false;
wait_queue_entry_t wait;
ktime_t expires, *to = NULL;

Expand Down Expand Up @@ -1794,14 +1795,18 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
ep_reset_busy_poll_napi_id(ep);

/*
* We don't have any available event to return to the caller.
* We need to sleep here, and we will be wake up by
* ep_poll_callback() when events will become available.
* We don't have any available event to return to the caller. We need
* to sleep here, and we will be woken by ep_poll_callback() when events
* become available.
*/
init_waitqueue_entry(&wait, current);
spin_lock_irq(&ep->wq.lock);
__add_wait_queue_exclusive(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
if (!waiter) {
waiter = true;
init_waitqueue_entry(&wait, current);

spin_lock_irq(&ep->wq.lock);
__add_wait_queue_exclusive(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
}

for (;;) {
/*
Expand Down Expand Up @@ -1837,10 +1842,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,

__set_current_state(TASK_RUNNING);

spin_lock_irq(&ep->wq.lock);
__remove_wait_queue(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);

send_events:
/*
* Try to transfer events to user space. In case we get 0 events and
Expand All @@ -1851,6 +1852,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto fetch_events;

if (waiter) {
spin_lock_irq(&ep->wq.lock);
__remove_wait_queue(&ep->wq, &wait);
spin_unlock_irq(&ep->wq.lock);
}

return res;
}

Expand Down

0 comments on commit 86c0517

Please sign in to comment.