]> git.baikalelectronics.ru Git - kernel.git/commitdiff
selftests/bpf: Tests for state pruning with u32 spill/fill
authorPaul Chaignon <paul@isovalent.com>
Thu, 9 Dec 2021 23:47:00 +0000 (00:47 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 10 Dec 2021 17:13:21 +0000 (09:13 -0800)
This patch adds tests for the verifier's tracking for spilled, <8B
registers. The first two test cases ensure the verifier doesn't
incorrectly prune states in case of <8B spill/fills. The last one simply
checks that a filled u64 register is marked unknown if the register
spilled in the same slack slot was less than 8B.

The map value access at the end of the first program is only incorrect
for the path R6=32. If the precision bit for register R8 isn't
backtracked through the u32 spill/fill, the R6=32 path is pruned at
instruction 9 and the program is incorrectly accepted. The second
program is a variation of the same with u32 spills and a u64 fill.

The additional instructions to introduce the first pruning point may be
a bit fragile as they depend on the heuristics for pruning points in the
verifier (currently at least 8 instructions and 2 jumps). If the
heuristics are changed, the pruning point may move (e.g., to the
subsequent jump) or disappear, which would cause the test to always pass.

Signed-off-by: Paul Chaignon <paul@isovalent.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/verifier/search_pruning.c
tools/testing/selftests/bpf/verifier/spill_fill.c

index 7e50cb80873a5b7c08f607fe27006bda2afc6bf5..682519769fe3c0019e53b151e27c3c4c8d37d5e7 100644 (file)
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
+{
+       "precision tracking for u32 spill/fill",
+       .insns = {
+               BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV32_IMM(BPF_REG_6, 32),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV32_IMM(BPF_REG_6, 4),
+               /* Additional insns to introduce a pruning point. */
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               /* u32 spill/fill */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
+               /* out-of-bound map value access for r6=32 */
+               BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+               BPF_LD_MAP_FD(BPF_REG_1, 0),
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 15 },
+       .result = REJECT,
+       .errstr = "R0 min value is outside of the allowed memory range",
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+       "precision tracking for u32 spills, u64 fill",
+       .insns = {
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+               BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
+               /* Additional insns to introduce a pruning point. */
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+               /* u32 spills, u64 fill */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
+               /* if r8 != X goto pc+1  r8 known in fallthrough branch */
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               /* if r8 == X goto pc+1  condition always true on first
+                * traversal, so starts backtracking to mark r8 as requiring
+                * precision. r7 marked as needing precision. r6 not marked
+                * since it's not tracked.
+                */
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
+               /* fails if r8 correctly marked unknown after fill. */
+               BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "div by zero",
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
 {
        "allocated_stack",
        .insns = {
index 7ab3de1087614663dcb6f3cd0f775f9762ba1681..6c907144311f81c4ab39a95e61941cce13ba418f 100644 (file)
        .errstr = "invalid access to packet",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
+{
+       "Spill u32 const scalars.  Refill as u64.  Offset to skb->data",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       /* r6 = 0 */
+       BPF_MOV32_IMM(BPF_REG_6, 0),
+       /* r7 = 20 */
+       BPF_MOV32_IMM(BPF_REG_7, 20),
+       /* *(u32 *)(r10 -4) = r6 */
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+       /* *(u32 *)(r10 -8) = r7 */
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+       /* r4 = *(u64 *)(r10 -8) */
+       BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+       /* r0 = r2 */
+       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "invalid access to packet",
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
 {
        "Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
        .insns = {