]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: patch_text: Fixup last cpu should be master
authorGuo Ren <guoren@linux.alibaba.com>
Thu, 7 Apr 2022 07:33:20 +0000 (15:33 +0800)
committerWill Deacon <will@kernel.org>
Fri, 8 Apr 2022 10:43:46 +0000 (11:43 +0100)
These patch_text implementations are using stop_machine_cpuslocked
infrastructure with atomic cpu_count. The original idea: When the
master CPU patch_text, the others should wait for it. But current
implementation is using the first CPU as master, which couldn't
guarantee the remaining CPUs are waiting. This patch changes the
last CPU as the master to solve the potential risk.

Fixes: e547c45eafa1 ("arm64: introduce interfaces to hotpatch kernel and module code")
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
Cc: <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/20220407073323.743224-2-guoren@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/patching.c

index 771f543464e060729740949d5edf7f63762e43a9..33e0fabc0b79b7ba7794c9b3522d3cb194f65876 100644 (file)
@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
        int i, ret = 0;
        struct aarch64_insn_patch *pp = arg;
 
-       /* The first CPU becomes master */
-       if (atomic_inc_return(&pp->cpu_count) == 1) {
+       /* The last CPU becomes master */
+       if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
                for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
                        ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
                                                             pp->new_insns[i]);