]> git.baikalelectronics.ru Git - kernel.git/commitdiff
epoll: eliminate unnecessary lock for zero timeout
authorSoheil Hassas Yeganeh <soheil@google.com>
Fri, 18 Dec 2020 22:02:06 +0000 (14:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Dec 2020 19:18:38 +0000 (11:18 -0800)
We call ep_events_available() under lock when timeout is 0, and then call
it without locks in the loop for the other cases.

Instead, call ep_events_available() without lock for all cases.  For
non-zero timeouts, we will recheck after adding the thread to the wait
queue.  For zero timeout cases, by definition, user is opportunistically
polling and will have to call epoll_wait again in the future.

Note that this lock was kept in c5a282e9635e9 because the whole loop was
historically under lock.

This patch results in a 1% CPU/RPC reduction in RPC benchmarks.

Link: https://lkml.kernel.org/r/20201106231635.3528496-9-soheil.kdev@gmail.com
Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
Suggested-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Khazhismel Kumykov <khazhy@google.com>
Cc: Guantao Liu <guantaol@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/eventpoll.c

index 1e0030cb805ba559b29fa141e01435ee02ea3a5a..9efb553b2b2b0ccc310640644c5f07f15b12c986 100644 (file)
@@ -1743,7 +1743,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms)
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                   int maxevents, long timeout)
 {
-       int res, eavail = 0, timed_out = 0;
+       int res, eavail, timed_out = 0;
        u64 slack = 0;
        wait_queue_entry_t wait;
        ktime_t expires, *to = NULL;
@@ -1759,18 +1759,21 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
        } else if (timeout == 0) {
                /*
                 * Avoid the unnecessary trip to the wait queue loop, if the
-                * caller specified a non blocking operation. We still need
-                * lock because we could race and not see an epi being added
-                * to the ready list while in irq callback. Thus incorrectly
-                * returning 0 back to userspace.
+                * caller specified a non blocking operation.
                 */
                timed_out = 1;
-
-               write_lock_irq(&ep->lock);
-               eavail = ep_events_available(ep);
-               write_unlock_irq(&ep->lock);
        }
 
+       /*
+        * This call is racy: We may or may not see events that are being added
+        * to the ready list under the lock (e.g., in IRQ callbacks). For, cases
+        * with a non-zero timeout, this thread will check the ready list under
+        * lock and will added to the wait queue.  For, cases with a zero
+        * timeout, the user by definition should not care and will have to
+        * recheck again.
+        */
+       eavail = ep_events_available(ep);
+
        while (1) {
                if (eavail) {
                        /*
@@ -1786,10 +1789,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                if (timed_out)
                        return 0;
 
-               eavail = ep_events_available(ep);
-               if (eavail)
-                       continue;
-
                eavail = ep_busy_loop(ep, timed_out);
                if (eavail)
                        continue;