]> git.baikalelectronics.ru Git - kernel.git/commitdiff
sched, cpuset: Fix dl_cpu_busy() panic due to empty cs->cpus_allowed
authorWaiman Long <longman@redhat.com>
Wed, 3 Aug 2022 01:54:51 +0000 (21:54 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Aug 2022 12:24:14 +0000 (14:24 +0200)
[ Upstream commit 4b193e6a7f9115d389d35c1e707975a03ee54ccf ]

With cgroup v2, the cpuset's cpus_allowed mask can be empty indicating
that the cpuset will just use the effective CPUs of its parent. So
cpuset_can_attach() can call task_can_attach() with an empty mask.
This can lead to cpumask_any_and() returns nr_cpu_ids causing the call
to dl_bw_of() to crash due to percpu value access of an out of bound
CPU value. For example:

[80468.182258] BUG: unable to handle page fault for address: ffffffff8b6648b0
  :
[80468.191019] RIP: 0010:dl_cpu_busy+0x30/0x2b0
  :
[80468.207946] Call Trace:
[80468.208947]  cpuset_can_attach+0xa0/0x140
[80468.209953]  cgroup_migrate_execute+0x8c/0x490
[80468.210931]  cgroup_update_dfl_csses+0x254/0x270
[80468.211898]  cgroup_subtree_control_write+0x322/0x400
[80468.212854]  kernfs_fop_write_iter+0x11c/0x1b0
[80468.213777]  new_sync_write+0x11f/0x1b0
[80468.214689]  vfs_write+0x1eb/0x280
[80468.215592]  ksys_write+0x5f/0xe0
[80468.216463]  do_syscall_64+0x5c/0x80
[80468.224287]  entry_SYSCALL_64_after_hwframe+0x44/0xae

Fix that by using effective_cpus instead. For cgroup v1, effective_cpus
is the same as cpus_allowed. For v2, effective_cpus is the real cpumask
to be used by tasks within the cpuset anyway.

Also update task_can_attach()'s 2nd argument name to cs_effective_cpus to
reflect the change. In addition, a check is added to task_can_attach()
to guard against the possibility that cpumask_any_and() may return a
value >= nr_cpu_ids.

Fixes: d7c5c8a758f2 ("sched/deadline: Fix bandwidth check/update when migrating tasks between exclusive cpusets")
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lore.kernel.org/r/20220803015451.2219567-1-longman@redhat.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/sched.h
kernel/cgroup/cpuset.c
kernel/sched/core.c

index ad7ff332a0ac8b19002a5f00587d93cb9a1eb09a..dcba347cbffa13d59f9000b2e332d55cab4d8bca 100644 (file)
@@ -1797,7 +1797,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
 }
 
 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
index 31f94c6ea0a551455bc052452b8478fcf7b3dc53..9c5b659db63f499f83f01c400c902418febbb62a 100644 (file)
@@ -2199,7 +2199,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
                goto out_unlock;
 
        cgroup_taskset_for_each(task, css, tset) {
-               ret = task_can_attach(task, cs->cpus_allowed);
+               ret = task_can_attach(task, cs->effective_cpus);
                if (ret)
                        goto out_unlock;
                ret = security_task_setscheduler(task);
index 154506e4d1f5f97ad1f135e05cf46023056e9da7..5c7937b504d2c874c098bcf680ea12960f73c74e 100644 (file)
@@ -8741,7 +8741,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
 }
 
 int task_can_attach(struct task_struct *p,
-                   const struct cpumask *cs_cpus_allowed)
+                   const struct cpumask *cs_effective_cpus)
 {
        int ret = 0;
 
@@ -8760,9 +8760,11 @@ int task_can_attach(struct task_struct *p,
        }
 
        if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
-                                             cs_cpus_allowed)) {
-               int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
+                                             cs_effective_cpus)) {
+               int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
 
+               if (unlikely(cpu >= nr_cpu_ids))
+                       return -EINVAL;
                ret = dl_cpu_busy(cpu, p);
        }