]> git.baikalelectronics.ru Git - kernel.git/commitdiff
tracing: Disable interrupt or preemption before acquiring arch_spinlock_t
authorWaiman Long <longman@redhat.com>
Thu, 22 Sep 2022 14:56:22 +0000 (10:56 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 26 Oct 2022 11:22:21 +0000 (13:22 +0200)
commit 1712d593d2c43dbe2bf22d2a4a74be790f20f4c5 upstream.

It was found that some tracing functions in kernel/trace/trace.c acquire
an arch_spinlock_t with preemption and irqs enabled. An example is the
tracing_saved_cmdlines_size_read() function which intermittently causes
a "BUG: using smp_processor_id() in preemptible" warning when the LTP
read_all_proc test is run.

That can be problematic in case preemption happens after acquiring the
lock. Add the necessary preemption or interrupt disabling code in the
appropriate places before acquiring an arch_spinlock_t.

The convention here is to disable preemption for trace_cmdline_lock and
interupt for max_lock.

Link: https://lkml.kernel.org/r/20220922145622.1744826-1-longman@redhat.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: stable@vger.kernel.org
Fixes: 117601251602 ("tracing: Add conditional snapshot")
Fixes: ed5a83df142d ("tracing: Introduce saved_cmdlines_size file")
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/trace/trace.c

index 55da88f18342f8e87c9bd78de00fde5a44cd65e8..62fd8798b0c4fe0c62eeb99c44181afcbeab72a5 100644 (file)
@@ -1015,12 +1015,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
 {
        void *cond_data = NULL;
 
+       local_irq_disable();
        arch_spin_lock(&tr->max_lock);
 
        if (tr->cond_snapshot)
                cond_data = tr->cond_snapshot->cond_data;
 
        arch_spin_unlock(&tr->max_lock);
+       local_irq_enable();
 
        return cond_data;
 }
@@ -1156,9 +1158,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
                goto fail_unlock;
        }
 
+       local_irq_disable();
        arch_spin_lock(&tr->max_lock);
        tr->cond_snapshot = cond_snapshot;
        arch_spin_unlock(&tr->max_lock);
+       local_irq_enable();
 
        mutex_unlock(&trace_types_lock);
 
@@ -1185,6 +1189,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
 {
        int ret = 0;
 
+       local_irq_disable();
        arch_spin_lock(&tr->max_lock);
 
        if (!tr->cond_snapshot)
@@ -1195,6 +1200,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
        }
 
        arch_spin_unlock(&tr->max_lock);
+       local_irq_enable();
 
        return ret;
 }
@@ -1951,6 +1957,11 @@ static size_t tgid_map_max;
 
 #define SAVED_CMDLINES_DEFAULT 128
 #define NO_CMDLINE_MAP UINT_MAX
+/*
+ * Preemption must be disabled before acquiring trace_cmdline_lock.
+ * The various trace_arrays' max_lock must be acquired in a context
+ * where interrupt is disabled.
+ */
 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 struct saved_cmdlines_buffer {
        unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
@@ -2163,6 +2174,9 @@ static int trace_save_cmdline(struct task_struct *tsk)
         * the lock, but we also don't want to spin
         * nor do we want to disable interrupts,
         * so if we miss here, then better luck next time.
+        *
+        * This is called within the scheduler and wake up, so interrupts
+        * had better been disabled and run queue lock been held.
         */
        if (!arch_spin_trylock(&trace_cmdline_lock))
                return 0;
@@ -5199,9 +5213,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
        char buf[64];
        int r;
 
+       preempt_disable();
        arch_spin_lock(&trace_cmdline_lock);
        r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
        arch_spin_unlock(&trace_cmdline_lock);
+       preempt_enable();
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
@@ -5226,10 +5242,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
                return -ENOMEM;
        }
 
+       preempt_disable();
        arch_spin_lock(&trace_cmdline_lock);
        savedcmd_temp = savedcmd;
        savedcmd = s;
        arch_spin_unlock(&trace_cmdline_lock);
+       preempt_enable();
        free_saved_cmdlines_buffer(savedcmd_temp);
 
        return 0;
@@ -5684,10 +5702,12 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
 
 #ifdef CONFIG_TRACER_SNAPSHOT
        if (t->use_max_tr) {
+               local_irq_disable();
                arch_spin_lock(&tr->max_lock);
                if (tr->cond_snapshot)
                        ret = -EBUSY;
                arch_spin_unlock(&tr->max_lock);
+               local_irq_enable();
                if (ret)
                        goto out;
        }
@@ -6767,10 +6787,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                goto out;
        }
 
+       local_irq_disable();
        arch_spin_lock(&tr->max_lock);
        if (tr->cond_snapshot)
                ret = -EBUSY;
        arch_spin_unlock(&tr->max_lock);
+       local_irq_enable();
        if (ret)
                goto out;