]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/resctrl: Apply READ_ONCE/WRITE_ONCE to task_struct.{rmid,closid}
authorValentin Schneider <valentin.schneider@arm.com>
Thu, 17 Dec 2020 22:31:21 +0000 (14:31 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 11 Mar 2023 15:44:16 +0000 (16:44 +0100)
commit cc8279d2dbd9e67ed43306f9ac734bc68a91d94c upstream.

A CPU's current task can have its {closid, rmid} fields read locally
while they are being concurrently written to from another CPU.
This can happen anytime __resctrl_sched_in() races with either
__rdtgroup_move_task() or rdt_move_group_tasks().

Prevent load / store tearing for those accesses by giving them the
READ_ONCE() / WRITE_ONCE() treatment.

Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9921fda88ad81afb9885b517fbe864a2bc7c35a9.1608243147.git.reinette.chatre@intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/resctrl_sched.h
arch/x86/kernel/cpu/resctrl/rdtgroup.c

index f6b7fe2833cc72a78bed2c0cad3b59b8c6eb4f23..c1921b5e13cde6f4cdaee2ff90376fd84d1bb1db 100644 (file)
@@ -56,19 +56,22 @@ static void __resctrl_sched_in(void)
        struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
        u32 closid = state->default_closid;
        u32 rmid = state->default_rmid;
+       u32 tmp;
 
        /*
         * If this task has a closid/rmid assigned, use it.
         * Else use the closid/rmid assigned to this cpu.
         */
        if (static_branch_likely(&rdt_alloc_enable_key)) {
-               if (current->closid)
-                       closid = current->closid;
+               tmp = READ_ONCE(current->closid);
+               if (tmp)
+                       closid = tmp;
        }
 
        if (static_branch_likely(&rdt_mon_enable_key)) {
-               if (current->rmid)
-                       rmid = current->rmid;
+               tmp = READ_ONCE(current->rmid);
+               if (tmp)
+                       rmid = tmp;
        }
 
        if (closid != state->cur_closid || rmid != state->cur_rmid) {
index 8d6023e6ad9e389ae261438b739f8517094c0372..56b1f186e50930c56270b941ede87da67318b4cc 100644 (file)
@@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
         */
 
        if (rdtgrp->type == RDTCTRL_GROUP) {
-               tsk->closid = rdtgrp->closid;
-               tsk->rmid = rdtgrp->mon.rmid;
+               WRITE_ONCE(tsk->closid, rdtgrp->closid);
+               WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
        } else if (rdtgrp->type == RDTMON_GROUP) {
                if (rdtgrp->mon.parent->closid == tsk->closid) {
-                       tsk->rmid = rdtgrp->mon.rmid;
+                       WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
                } else {
                        rdt_last_cmd_puts("Can't move task to different control group\n");
                        return -EINVAL;
@@ -2177,8 +2177,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
        for_each_process_thread(p, t) {
                if (!from || is_closid_match(t, from) ||
                    is_rmid_match(t, from)) {
-                       t->closid = to->closid;
-                       t->rmid = to->mon.rmid;
+                       WRITE_ONCE(t->closid, to->closid);
+                       WRITE_ONCE(t->rmid, to->mon.rmid);
 
                        /*
                         * Order the closid/rmid stores above before the loads