]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI
authorReinette Chatre <reinette.chatre@intel.com>
Thu, 17 Dec 2020 22:31:20 +0000 (14:31 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Jan 2023 10:42:05 +0000 (11:42 +0100)
[ Upstream commit ed4a0f2dfb7c2acf4b24dc5a3c649f8b386f24ac ]

James reported in [1] that there could be two tasks running on the same CPU
with task_struct->on_cpu set. Using task_struct->on_cpu as a test if a task
is running on a CPU may thus match the old task for a CPU while the
scheduler is running and IPI it unnecessarily.

task_curr() is the correct helper to use. While doing so move the #ifdef
check of the CONFIG_SMP symbol to be a C conditional used to determine
if this helper should be used to ensure the code is always checked for
correctness by the compiler.

[1] https://lore.kernel.org/lkml/a782d2f3-d2f6-795f-f4b1-9462205fd581@arm.com

Reported-by: James Morse <james.morse@arm.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/e9e68ce1441a73401e08b641cc3b9a3cf13fe6d4.1608243147.git.reinette.chatre@intel.com
Stable-dep-of: fe1f0714385f ("x86/resctrl: Fix task CLOSID/RMID update race")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kernel/cpu/resctrl/rdtgroup.c

index 28f786289fce4cc145dc8ae89890dc4e39804fb0..2c19f2ecfa0326103f9c5eae9f13b5d8178368a9 100644 (file)
@@ -2178,19 +2178,15 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
                        t->closid = to->closid;
                        t->rmid = to->mon.rmid;
 
-#ifdef CONFIG_SMP
                        /*
-                        * This is safe on x86 w/o barriers as the ordering
-                        * of writing to task_cpu() and t->on_cpu is
-                        * reverse to the reading here. The detection is
-                        * inaccurate as tasks might move or schedule
-                        * before the smp function call takes place. In
-                        * such a case the function call is pointless, but
+                        * If the task is on a CPU, set the CPU in the mask.
+                        * The detection is inaccurate as tasks might move or
+                        * schedule before the smp function call takes place.
+                        * In such a case the function call is pointless, but
                         * there is no other side effect.
                         */
-                       if (mask && t->on_cpu)
+                       if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
                                cpumask_set_cpu(task_cpu(t), mask);
-#endif
                }
        }
        read_unlock(&tasklist_lock);