]> git.baikalelectronics.ru Git - kernel.git/commitdiff
s390/smp: enforce lowcore protection on CPU restart
authorAlexander Gordeev <agordeev@linux.ibm.com>
Wed, 20 Jul 2022 05:24:03 +0000 (07:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Aug 2022 12:24:13 +0000 (14:24 +0200)
[ Upstream commit e3640cb262f0b13be4ad1610c51ef4d84f636c5f ]

As result of commit 26ea42b4ff74 ("s390/smp: enable DAT before
CPU restart callback is called") the low-address protection bit
gets mistakenly unset in control register 0 save area of the
absolute zero memory. That area is used when manual PSW restart
happened to hit an offline CPU. In this case the low-address
protection for that CPU will be dropped.

Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Fixes: 26ea42b4ff74 ("s390/smp: enable DAT before CPU restart callback is called")
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/s390/kernel/setup.c

index 6b1a8697fae8d5c115e8743b49192b10e7e4d8a2..c8e7b3db82e28043469f4d9bbca39f4a1d3651c4 100644 (file)
@@ -507,8 +507,8 @@ static void __init setup_lowcore_dat_on(void)
        S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
-       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
+       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
        put_abs_lowcore(program_new_psw, lc->program_new_psw);
        for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)