]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: Improve diagnostics when trapping BRK with FAULT_BRK_IMM
authorWill Deacon <will@kernel.org>
Tue, 15 Sep 2020 14:48:09 +0000 (15:48 +0100)
committerWill Deacon <will@kernel.org>
Fri, 18 Sep 2020 15:35:54 +0000 (16:35 +0100)
When generating instructions at runtime, for example due to kernel text
patching or the BPF JIT, we can emit a trapping BRK instruction if we
are asked to encode an invalid instruction such as an out-of-range]
branch. This is indicative of a bug in the caller, and will result in a
crash on executing the generated code. Unfortunately, the message from
the crash is really unhelpful, and mumbles something about ptrace:

  | Unexpected kernel BRK exception at EL1
  | Internal error: ptrace BRK handler: f2000100 [#1] SMP

We can do better than this. Install a break handler for FAULT_BRK_IMM,
which is the immediate used to encode the "I've been asked to generate
an invalid instruction" error, and triage the faulting PC to determine
whether or not the failure occurred in the BPF JIT.

Link: https://lore.kernel.org/r/20200915141707.GB26439@willie-the-truck
Reported-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/extable.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/traps.c
arch/arm64/mm/extable.c

index 840a35ed92ec8dd54a8d772420e936e54fd7098c..b15eb4a3e6b20830e916720fb25503367f7818b9 100644 (file)
@@ -22,6 +22,15 @@ struct exception_table_entry
 
 #define ARCH_HAS_RELATIVE_EXTABLE
 
+static inline bool in_bpf_jit(struct pt_regs *regs)
+{
+       if (!IS_ENABLED(CONFIG_BPF_JIT))
+               return false;
+
+       return regs->pc >= BPF_JIT_REGION_START &&
+              regs->pc < BPF_JIT_REGION_END;
+}
+
 #ifdef CONFIG_BPF_JIT
 int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
                              struct pt_regs *regs);
index 7310a4f7f9931ab904e391740badbf109145859e..fa76151de6ff148372963f91521af354931fc115 100644 (file)
@@ -384,7 +384,7 @@ void __init debug_traps_init(void)
        hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
                              TRAP_TRACE, "single-step handler");
        hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
-                             TRAP_BRKPT, "ptrace BRK handler");
+                             TRAP_BRKPT, "BRK handler");
 }
 
 /* Re-enable single step for syscall restarting. */
index 13ebd5ca20706a0746c3a30b48343772e29171b7..00e92c9f338c05dae948582a252225e7add4ca1d 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/extable.h>
 #include <asm/insn.h>
 #include <asm/kprobes.h>
 #include <asm/traps.h>
@@ -994,6 +995,21 @@ static struct break_hook bug_break_hook = {
        .imm = BUG_BRK_IMM,
 };
 
+static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
+{
+       pr_err("%s generated an invalid instruction at %pS!\n",
+               in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
+               (void *)instruction_pointer(regs));
+
+       /* We cannot handle this */
+       return DBG_HOOK_ERROR;
+}
+
+static struct break_hook fault_break_hook = {
+       .fn = reserved_fault_handler,
+       .imm = FAULT_BRK_IMM,
+};
+
 #ifdef CONFIG_KASAN_SW_TAGS
 
 #define KASAN_ESR_RECOVER      0x20
@@ -1059,6 +1075,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
 void __init trap_init(void)
 {
        register_kernel_break_hook(&bug_break_hook);
+       register_kernel_break_hook(&fault_break_hook);
 #ifdef CONFIG_KASAN_SW_TAGS
        register_kernel_break_hook(&kasan_break_hook);
 #endif
index eee1732ab6cd38e3cdf61a0f139d95b959941e6d..aa0060178343a88a4d25f362ec8b0db2d2bda878 100644 (file)
@@ -14,9 +14,7 @@ int fixup_exception(struct pt_regs *regs)
        if (!fixup)
                return 0;
 
-       if (IS_ENABLED(CONFIG_BPF_JIT) &&
-           regs->pc >= BPF_JIT_REGION_START &&
-           regs->pc < BPF_JIT_REGION_END)
+       if (in_bpf_jit(regs))
                return arm64_bpf_fixup_exception(fixup, regs);
 
        regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;