]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ftrace: Protect ftrace_graph_hash with ftrace_sync
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 5 Feb 2020 14:20:32 +0000 (09:20 -0500)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 5 Feb 2020 22:16:42 +0000 (17:16 -0500)
As function_graph tracer can run when RCU is not "watching", it can not be
protected by synchronize_rcu() it requires running a task on each CPU before
it can be freed. Calling schedule_on_each_cpu(ftrace_sync) needs to be used.

Link: https://lore.kernel.org/r/20200205131110.GT2935@paulmck-ThinkPad-P72
Cc: stable@vger.kernel.org
Fixes: cf5c188e481b2 ("ftrace: Convert graph filter to use hash tables")
Reported-by: "Paul E. McKenney" <paulmck@kernel.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/trace/ftrace.c
kernel/trace/trace.h

index 481ede3eac131cfc1d64572e2fa1a16fb7e59508..3f7ee102868a21b2374735c8ea7492004d178ce7 100644 (file)
@@ -5867,8 +5867,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&graph_lock);
 
-               /* Wait till all users are no longer using the old hash */
-               synchronize_rcu();
+               /*
+                * We need to do a hard force of sched synchronization.
+                * This is because we use preempt_disable() to do RCU, but
+                * the function tracers can be called where RCU is not watching
+                * (like before user_exit()). We can not rely on the RCU
+                * infrastructure to do the synchronization, thus we must do it
+                * ourselves.
+                */
+               schedule_on_each_cpu(ftrace_sync);
 
                free_ftrace_hash(old_hash);
        }
index 8c52f5de9384498fb9feab77c47197c03d2de25a..3c75d29bd861806f8292388867091280585c9bd2 100644 (file)
@@ -979,6 +979,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
         * Have to open code "rcu_dereference_sched()" because the
         * function graph tracer can be called when RCU is not
         * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
         */
        hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
 
@@ -1031,6 +1032,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
         * Have to open code "rcu_dereference_sched()" because the
         * function graph tracer can be called when RCU is not
         * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
         */
        notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
                                                 !preemptible());