]> git.baikalelectronics.ru Git - kernel.git/commitdiff
bpf: Remove bpf_lsm_file_mprotect from sleepable list.
authorAlexei Starovoitov <ast@kernel.org>
Mon, 31 Aug 2020 20:16:51 +0000 (13:16 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 31 Aug 2020 21:03:57 +0000 (23:03 +0200)
Technically the bpf programs can sleep while attached to bpf_lsm_file_mprotect,
but such programs need to access user memory. So they're in might_fault()
category. Which means they cannot be called from file_mprotect lsm hook that
takes write lock on mm->mmap_lock.
Adjust the test accordingly.

Also add might_fault() to __bpf_prog_enter_sleepable() to catch such deadlocks early.

Fixes: 4f9aef2a7f98 ("bpf: Introduce sleepable BPF programs")
Fixes: 4021d934d935 ("selftests/bpf: Add sleepable tests")
Reported-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200831201651.82447-1-alexei.starovoitov@gmail.com
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
tools/testing/selftests/bpf/progs/lsm.c

index c2b76545153ced704d2c51d3c54e02e527bc3a8d..7dd523a7e32d65e954dbdc478666cc604eddf8ae 100644 (file)
@@ -409,6 +409,7 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
 void notrace __bpf_prog_enter_sleepable(void)
 {
        rcu_read_lock_trace();
+       might_fault();
 }
 
 void notrace __bpf_prog_exit_sleepable(void)
index b4c22b5ce5a2b0cc90ff55c0fb8d56705d65af78..b4e9c56b8b329a874d0f581eef19890edaf85b18 100644 (file)
@@ -11006,7 +11006,6 @@ static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
 /* non exhaustive list of sleepable bpf_lsm_*() functions */
 BTF_SET_START(btf_sleepable_lsm_hooks)
 #ifdef CONFIG_BPF_LSM
-BTF_ID(func, bpf_lsm_file_mprotect)
 BTF_ID(func, bpf_lsm_bprm_committed_creds)
 #else
 BTF_ID_UNUSED
index 49fa6ca99755aa24e9b7130944cf4fc9f21e201d..ff4d343b94b56d6a8e84bf0d9b7b9b498e4ef143 100644 (file)
@@ -36,14 +36,10 @@ int monitored_pid = 0;
 int mprotect_count = 0;
 int bprm_count = 0;
 
-SEC("lsm.s/file_mprotect")
+SEC("lsm/file_mprotect")
 int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
             unsigned long reqprot, unsigned long prot, int ret)
 {
-       char args[64];
-       __u32 key = 0;
-       __u64 *value;
-
        if (ret != 0)
                return ret;
 
@@ -53,18 +49,6 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
        is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
                    vma->vm_end >= vma->vm_mm->start_stack);
 
-       bpf_copy_from_user(args, sizeof(args), (void *)vma->vm_mm->arg_start);
-
-       value = bpf_map_lookup_elem(&array, &key);
-       if (value)
-               *value = 0;
-       value = bpf_map_lookup_elem(&hash, &key);
-       if (value)
-               *value = 0;
-       value = bpf_map_lookup_elem(&lru_hash, &key);
-       if (value)
-               *value = 0;
-
        if (is_stack && monitored_pid == pid) {
                mprotect_count++;
                ret = -EPERM;
@@ -77,10 +61,26 @@ SEC("lsm.s/bprm_committed_creds")
 int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
 {
        __u32 pid = bpf_get_current_pid_tgid() >> 32;
+       char args[64];
+       __u32 key = 0;
+       __u64 *value;
 
        if (monitored_pid == pid)
                bprm_count++;
 
+       bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start);
+       bpf_copy_from_user(args, sizeof(args), (void *)bprm->mm->arg_start);
+
+       value = bpf_map_lookup_elem(&array, &key);
+       if (value)
+               *value = 0;
+       value = bpf_map_lookup_elem(&hash, &key);
+       if (value)
+               *value = 0;
+       value = bpf_map_lookup_elem(&lru_hash, &key);
+       if (value)
+               *value = 0;
+
        return 0;
 }
 SEC("lsm/task_free") /* lsm/ is ok, lsm.s/ fails */