]> git.baikalelectronics.ru Git - kernel.git/commitdiff
tracing: Replace deprecated CPU-hotplug functions.
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tue, 3 Aug 2021 14:16:19 +0000 (16:16 +0200)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Tue, 17 Aug 2021 19:47:14 +0000 (15:47 -0400)
The functions get_online_cpus() and put_online_cpus() have been
deprecated during the CPU hotplug rework. They map directly to
cpus_read_lock() and cpus_read_unlock().

Replace deprecated CPU-hotplug functions with the official version.
The behavior remains unchanged.

Link: https://lkml.kernel.org/r/20210803141621.780504-37-bigeasy@linutronix.de
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Acked-by: Daniel Bristot de Oliveira <bristot@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/trace/ring_buffer.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_osnoise.c

index e592d1df6f888a3da7f7b75d58957f2d332eb18d..c5a3fbf19617eded741d93bb815da66e44c65156 100644 (file)
@@ -2111,7 +2111,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
                        }
                }
 
-               get_online_cpus();
+               cpus_read_lock();
                /*
                 * Fire off all the required work handlers
                 * We can't schedule on offline CPUs, but it's not necessary
@@ -2143,7 +2143,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
                        cpu_buffer->nr_pages_to_update = 0;
                }
 
-               put_online_cpus();
+               cpus_read_unlock();
        } else {
                cpu_buffer = buffer->buffers[cpu_id];
 
@@ -2171,7 +2171,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
                        goto out_err;
                }
 
-               get_online_cpus();
+               cpus_read_lock();
 
                /* Can't run something on an offline CPU. */
                if (!cpu_online(cpu_id))
@@ -2183,7 +2183,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
                }
 
                cpu_buffer->nr_pages_to_update = 0;
-               put_online_cpus();
+               cpus_read_unlock();
        }
 
  out:
index 14f46aae1981f8ef61be8bcb0ff59e6e5e5c525e..1b83d75eb103b7734b4cd3ba48550b3280d67510 100644 (file)
@@ -325,10 +325,10 @@ static void move_to_next_cpu(void)
        if (!cpumask_equal(current_mask, current->cpus_ptr))
                goto change_mode;
 
-       get_online_cpus();
+       cpus_read_lock();
        cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
        next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (next_cpu >= nr_cpu_ids)
                next_cpu = cpumask_first(current_mask);
@@ -398,7 +398,7 @@ static void stop_single_kthread(void)
        struct hwlat_kthread_data *kdata = get_cpu_data();
        struct task_struct *kthread;
 
-       get_online_cpus();
+       cpus_read_lock();
        kthread = kdata->kthread;
 
        if (!kthread)
@@ -408,7 +408,7 @@ static void stop_single_kthread(void)
        kdata->kthread = NULL;
 
 out_put_cpus:
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 
@@ -425,14 +425,14 @@ static int start_single_kthread(struct trace_array *tr)
        struct task_struct *kthread;
        int next_cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (kdata->kthread)
                goto out_put_cpus;
 
        kthread = kthread_create(kthread_fn, NULL, "hwlatd");
        if (IS_ERR(kthread)) {
                pr_err(BANNER "could not start sampling thread\n");
-               put_online_cpus();
+               cpus_read_unlock();
                return -ENOMEM;
        }
 
@@ -452,7 +452,7 @@ static int start_single_kthread(struct trace_array *tr)
        wake_up_process(kthread);
 
 out_put_cpus:
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
@@ -479,10 +479,10 @@ static void stop_per_cpu_kthreads(void)
 {
        unsigned int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu)
                stop_cpu_kthread(cpu);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 /*
@@ -515,7 +515,7 @@ static void hwlat_hotplug_workfn(struct work_struct *dummy)
 
        mutex_lock(&trace_types_lock);
        mutex_lock(&hwlat_data.lock);
-       get_online_cpus();
+       cpus_read_lock();
 
        if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
                goto out_unlock;
@@ -526,7 +526,7 @@ static void hwlat_hotplug_workfn(struct work_struct *dummy)
        start_cpu_kthread(cpu);
 
 out_unlock:
-       put_online_cpus();
+       cpus_read_unlock();
        mutex_unlock(&hwlat_data.lock);
        mutex_unlock(&trace_types_lock);
 }
@@ -582,7 +582,7 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
        unsigned int cpu;
        int retval;
 
-       get_online_cpus();
+       cpus_read_lock();
        /*
         * Run only on CPUs in which hwlat is allowed to run.
         */
@@ -596,12 +596,12 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
                if (retval)
                        goto out_error;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 
 out_error:
-       put_online_cpus();
+       cpus_read_unlock();
        stop_per_cpu_kthreads();
        return retval;
 }
index b61eefe5ccf53134044821f09940cd58cc96b8de..65b08b8e5bf8f55ef163e90df1cc6c6a6df27a49 100644 (file)
@@ -1498,12 +1498,12 @@ static void stop_per_cpu_kthreads(void)
 {
        int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        for_each_online_cpu(cpu)
                stop_kthread(cpu);
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 /*
@@ -1551,7 +1551,7 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
        int retval;
        int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        /*
         * Run only on CPUs in which trace and osnoise are allowed to run.
         */
@@ -1572,7 +1572,7 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
                }
        }
 
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 }
@@ -1590,7 +1590,7 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
                goto out_unlock_trace;
 
        mutex_lock(&interface_lock);
-       get_online_cpus();
+       cpus_read_lock();
 
        if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
                goto out_unlock;
@@ -1601,7 +1601,7 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
        start_kthread(cpu);
 
 out_unlock:
-       put_online_cpus();
+       cpus_read_unlock();
        mutex_unlock(&interface_lock);
 out_unlock_trace:
        mutex_unlock(&trace_types_lock);
@@ -1743,11 +1743,11 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
        /*
         * osnoise_cpumask is read by CPU hotplug operations.
         */
-       get_online_cpus();
+       cpus_read_lock();
 
        cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
 
-       put_online_cpus();
+       cpus_read_unlock();
        mutex_unlock(&interface_lock);
 
        if (running)