]> git.baikalelectronics.ru Git - kernel.git/commitdiff
locking/rwsem: Simplify the is-owner-spinnable checks
authorOleg Nesterov <oleg@redhat.com>
Fri, 18 May 2018 16:55:35 +0000 (18:55 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 25 May 2018 06:11:47 +0000 (08:11 +0200)
Add the trivial owner_on_cpu() helper for rwsem_can_spin_on_owner() and
rwsem_spin_on_owner(), it also allows to make rwsem_can_spin_on_owner()
a bit more clear.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Cc: Amir Goldstein <amir73il@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Jan Kara <jack@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Theodore Y. Ts'o <tytso@mit.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180518165534.GA22348@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/rwsem-xadd.c

index a903367793758f3e1cc52ab34c18f1bfa78f38e3..3064c50e181e19ea8acb537fd2db9de015fe9e18 100644 (file)
@@ -347,6 +347,15 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
        }
 }
 
+static inline bool owner_on_cpu(struct task_struct *owner)
+{
+       /*
+        * As lock holder preemption issue, we both skip spinning if
+        * task is not on cpu or its cpu is preempted
+        */
+       return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
+}
+
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 {
        struct task_struct *owner;
@@ -359,17 +368,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 
        rcu_read_lock();
        owner = READ_ONCE(sem->owner);
-       if (!owner || !is_rwsem_owner_spinnable(owner)) {
-               ret = !owner;   /* !owner is spinnable */
-               goto done;
+       if (owner) {
+               ret = is_rwsem_owner_spinnable(owner) &&
+                     owner_on_cpu(owner);
        }
-
-       /*
-        * As lock holder preemption issue, we both skip spinning if task is not
-        * on cpu or its cpu is preempted
-        */
-       ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
-done:
        rcu_read_unlock();
        return ret;
 }
@@ -398,8 +400,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
                 * abort spinning when need_resched or owner is not running or
                 * owner's cpu is preempted.
                 */
-               if (!owner->on_cpu || need_resched() ||
-                               vcpu_is_preempted(task_cpu(owner))) {
+               if (need_resched() || !owner_on_cpu(owner)) {
                        rcu_read_unlock();
                        return false;
                }