]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc: Replace deprecated CPU-hotplug functions.
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tue, 3 Aug 2021 14:15:46 +0000 (16:15 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 10 Aug 2021 13:14:56 +0000 (23:14 +1000)
The functions get_online_cpus() and put_online_cpus() have been
deprecated during the CPU hotplug rework. They map directly to
cpus_read_lock() and cpus_read_unlock().

Replace deprecated CPU-hotplug functions with the official version.
The behavior remains unchanged.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210803141621.780504-4-bigeasy@linutronix.de
arch/powerpc/kernel/rtasd.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/opal-imc.c

index 8561dfb33f241c7a44187341dff3e9368f929264..32ee17753eb4abd8b57e35564068d83b50714b54 100644 (file)
@@ -429,7 +429,7 @@ static void rtas_event_scan(struct work_struct *w)
 
        do_event_scan();
 
-       get_online_cpus();
+       cpus_read_lock();
 
        /* raw_ OK because just using CPU as starting point. */
        cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
@@ -451,7 +451,7 @@ static void rtas_event_scan(struct work_struct *w)
        schedule_delayed_work_on(cpu, &event_scan_work,
                __round_jiffies_relative(event_scan_delay, cpu));
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 #ifdef CONFIG_PPC64
index be8ef1c5b1bfb187cf3b06ac914889c737985491..fcf4760a3a0ea27ae04b12819fd7fa9b10bad749 100644 (file)
@@ -137,23 +137,23 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
  * exist in the system. We use a counter of VMs to track this.
  *
  * One of the operations we need to block is onlining of secondaries, so we
- * protect hv_vm_count with get/put_online_cpus().
+ * protect hv_vm_count with cpus_read_lock/unlock().
  */
 static atomic_t hv_vm_count;
 
 void kvm_hv_vm_activated(void)
 {
-       get_online_cpus();
+       cpus_read_lock();
        atomic_inc(&hv_vm_count);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
 
 void kvm_hv_vm_deactivated(void)
 {
-       get_online_cpus();
+       cpus_read_lock();
        atomic_dec(&hv_vm_count);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
 
index 528a7e0cf83aac9961242fb07f3fc349a11b301d..aa27689b832dbddb18d8cee9fdc996b74e06fa1d 100644 (file)
@@ -199,12 +199,12 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
         */
        power7_fastsleep_workaround_exit = false;
 
-       get_online_cpus();
+       cpus_read_lock();
        primary_thread_mask = cpu_online_cores_map();
        on_each_cpu_mask(&primary_thread_mask,
                                pnv_fastsleep_workaround_apply,
                                &err, 1);
-       put_online_cpus();
+       cpus_read_unlock();
        if (err) {
                pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
                goto fail;
index 7824cc364bc408b2e9af9ffceddb5aa911f79c81..ba02a75c1410225201fbbe935cdadf10682bee35 100644 (file)
@@ -186,7 +186,7 @@ static void disable_nest_pmu_counters(void)
        int nid, cpu;
        const struct cpumask *l_cpumask;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_node_with_cpus(nid) {
                l_cpumask = cpumask_of_node(nid);
                cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
@@ -195,7 +195,7 @@ static void disable_nest_pmu_counters(void)
                opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
                                       get_hard_smp_processor_id(cpu));
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static void disable_core_pmu_counters(void)
@@ -203,7 +203,7 @@ static void disable_core_pmu_counters(void)
        cpumask_t cores_map;
        int cpu, rc;
 
-       get_online_cpus();
+       cpus_read_lock();
        /* Disable the IMC Core functions */
        cores_map = cpu_online_cores_map();
        for_each_cpu(cpu, &cores_map) {
@@ -213,7 +213,7 @@ static void disable_core_pmu_counters(void)
                        pr_err("%s: Failed to stop Core (cpu = %d)\n",
                                __FUNCTION__, cpu);
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 int get_max_nest_dev(void)