]> git.baikalelectronics.ru Git - kernel.git/commitdiff
fs/epoll: simplify ep_send_events_proc() ready-list loop
authorDavidlohr Bueso <dave@stgolabs.net>
Thu, 3 Jan 2019 23:27:05 +0000 (15:27 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 4 Jan 2019 21:13:46 +0000 (13:13 -0800)
The current logic is a bit convoluted.  Lets simplify this with a
standard list_for_each_entry_safe() loop instead and just break out
after maxevents is reached.

While at it, remove an unnecessary indentation level in the loop when
there are in fact ready events.

Link: http://lkml.kernel.org/r/20181108051006.18751-3-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Jason Baron <jbaron@akamai.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/eventpoll.c

index be50799737f4c3638496e079cb08c5413367398a..aaf614ee08e4f1275f44b1cc15ff8c4d8c3af1ba 100644 (file)
@@ -1624,21 +1624,22 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head
 {
        struct ep_send_events_data *esed = priv;
        __poll_t revents;
-       struct epitem *epi;
-       struct epoll_event __user *uevent;
+       struct epitem *epi, *tmp;
+       struct epoll_event __user *uevent = esed->events;
        struct wakeup_source *ws;
        poll_table pt;
 
        init_poll_funcptr(&pt, NULL);
+       esed->res = 0;
 
        /*
         * We can loop without lock because we are passed a task private list.
         * Items cannot vanish during the loop because ep_scan_ready_list() is
         * holding "mtx" during this call.
         */
-       for (esed->res = 0, uevent = esed->events;
-            !list_empty(head) && esed->res < esed->maxevents;) {
-               epi = list_first_entry(head, struct epitem, rdllink);
+       list_for_each_entry_safe(epi, tmp, head, rdllink) {
+               if (esed->res >= esed->maxevents)
+                       break;
 
                /*
                 * Activate ep->ws before deactivating epi->ws to prevent
@@ -1658,42 +1659,42 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head
 
                list_del_init(&epi->rdllink);
 
-               revents = ep_item_poll(epi, &pt, 1);
-
                /*
                 * If the event mask intersect the caller-requested one,
                 * deliver the event to userspace. Again, ep_scan_ready_list()
-                * is holding "mtx", so no operations coming from userspace
+                * is holding ep->mtx, so no operations coming from userspace
                 * can change the item.
                 */
-               if (revents) {
-                       if (__put_user(revents, &uevent->events) ||
-                           __put_user(epi->event.data, &uevent->data)) {
-                               list_add(&epi->rdllink, head);
-                               ep_pm_stay_awake(epi);
-                               if (!esed->res)
-                                       esed->res = -EFAULT;
-                               return 0;
-                       }
-                       esed->res++;
-                       uevent++;
-                       if (epi->event.events & EPOLLONESHOT)
-                               epi->event.events &= EP_PRIVATE_BITS;
-                       else if (!(epi->event.events & EPOLLET)) {
-                               /*
-                                * If this file has been added with Level
-                                * Trigger mode, we need to insert back inside
-                                * the ready list, so that the next call to
-                                * epoll_wait() will check again the events
-                                * availability. At this point, no one can insert
-                                * into ep->rdllist besides us. The epoll_ctl()
-                                * callers are locked out by
-                                * ep_scan_ready_list() holding "mtx" and the
-                                * poll callback will queue them in ep->ovflist.
-                                */
-                               list_add_tail(&epi->rdllink, &ep->rdllist);
-                               ep_pm_stay_awake(epi);
-                       }
+               revents = ep_item_poll(epi, &pt, 1);
+               if (!revents)
+                       continue;
+
+               if (__put_user(revents, &uevent->events) ||
+                   __put_user(epi->event.data, &uevent->data)) {
+                       list_add(&epi->rdllink, head);
+                       ep_pm_stay_awake(epi);
+                       if (!esed->res)
+                               esed->res = -EFAULT;
+                       return 0;
+               }
+               esed->res++;
+               uevent++;
+               if (epi->event.events & EPOLLONESHOT)
+                       epi->event.events &= EP_PRIVATE_BITS;
+               else if (!(epi->event.events & EPOLLET)) {
+                       /*
+                        * If this file has been added with Level
+                        * Trigger mode, we need to insert back inside
+                        * the ready list, so that the next call to
+                        * epoll_wait() will check again the events
+                        * availability. At this point, no one can insert
+                        * into ep->rdllist besides us. The epoll_ctl()
+                        * callers are locked out by
+                        * ep_scan_ready_list() holding "mtx" and the
+                        * poll callback will queue them in ep->ovflist.
+                        */
+                       list_add_tail(&epi->rdllink, &ep->rdllist);
+                       ep_pm_stay_awake(epi);
                }
        }