]> git.baikalelectronics.ru Git - kernel.git/commitdiff
bpf: add test cases for new BPF_J{LT, LE, SLT, SLE} instructions
authorDaniel Borkmann <daniel@iogearbox.net>
Wed, 9 Aug 2017 23:40:03 +0000 (01:40 +0200)
committerDavid S. Miller <davem@davemloft.net>
Wed, 9 Aug 2017 23:53:57 +0000 (16:53 -0700)
Add test cases to the verifier selftest suite in order to verify that
i) direct packet access, and ii) dynamic map value access is working
with the changes related to the new instructions.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
tools/testing/selftests/bpf/test_verifier.c

index ab0cd11983265d6f4b3fea380e6cbce6dea055da..1b767127e141fae711992fa7d01c3262def662fc 100644 (file)
@@ -2830,6 +2830,79 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
        },
+       {
+               "direct packet access: test25 (marking on <, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test26 (marking on <, bad access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+               },
+               .result = REJECT,
+               .errstr = "invalid access to packet",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test27 (marking on <=, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test28 (marking on <=, bad access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+               },
+               .result = REJECT,
+               .errstr = "invalid access to packet",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
@@ -4488,6 +4561,246 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
+       {
+               "helper access to map: bounds check using <, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using <, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 unbounded memory access",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using <=, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using <=, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 unbounded memory access",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<, good access 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 min value is negative",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<=, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<=, good access 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<=, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 min value is negative",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
        {
                "map element value is preserved across register spilling",
                .insns = {