]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/cpu: Cleanup the untrain mess
authorPeter Zijlstra <peterz@infradead.org>
Mon, 14 Aug 2023 11:44:34 +0000 (13:44 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 26 Aug 2023 11:26:59 +0000 (13:26 +0200)
commit e7c25c441e9e0fa75b4c83e0b26306b702cfe90d upstream.

Since there can only be one active return_thunk, there only needs be
one (matching) untrain_ret. It fundamentally doesn't make sense to
allow multiple untrain_ret at the same time.

Fold all the 3 different untrain methods into a single (temporary)
helper stub.

Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/lib/retpoline.S

index 08262e6badc34ac5a7631fbc5373cb2d6121b71a..2f123d4fb85b5bf86c9953dc8bc4c75e44009417 100644 (file)
 .endm
 
 #ifdef CONFIG_CPU_UNRET_ENTRY
-#define CALL_ZEN_UNTRAIN_RET   "call retbleed_untrain_ret"
+#define CALL_UNTRAIN_RET       "call entry_untrain_ret"
 #else
-#define CALL_ZEN_UNTRAIN_RET   ""
+#define CALL_UNTRAIN_RET       ""
 #endif
 
 /*
        defined(CONFIG_CPU_SRSO)
        ANNOTATE_UNRET_END
        ALTERNATIVE_2 "",                                               \
-                     CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,          \
+                     CALL_UNTRAIN_RET, X86_FEATURE_UNRET,              \
                      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
 #endif
-
-#ifdef CONFIG_CPU_SRSO
-       ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
-                         "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
-#endif
 .endm
 
 #else /* __ASSEMBLY__ */
@@ -224,6 +219,7 @@ extern void retbleed_untrain_ret(void);
 extern void srso_untrain_ret(void);
 extern void srso_alias_untrain_ret(void);
 
+extern void entry_untrain_ret(void);
 extern void entry_ibpb(void);
 
 #ifdef CONFIG_RETPOLINE
index 07eebfa39f5b6d43ef0367131816d16291d7dc09..c2f8f98f20d53e3560ba932a1f886757ea3a8289 100644 (file)
@@ -2429,6 +2429,7 @@ static void __init srso_select_mitigation(void)
                         * like ftrace, static_call, etc.
                         */
                        setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+                       setup_force_cpu_cap(X86_FEATURE_UNRET);
 
                        if (boot_cpu_data.x86 == 0x19) {
                                setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
index 4ba7739be027998b29dc19e4d7402f7b8d1ca70c..08a9faa220fe1f89c840a6d0beb14d6d8f84348b 100644 (file)
@@ -233,6 +233,13 @@ SYM_CODE_START(srso_return_thunk)
        ud2
 SYM_CODE_END(srso_return_thunk)
 
+SYM_FUNC_START(entry_untrain_ret)
+       ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
+                     "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+                     "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+SYM_FUNC_END(entry_untrain_ret)
+__EXPORT_THUNK(entry_untrain_ret)
+
 SYM_CODE_START(__x86_return_thunk)
        UNWIND_HINT_FUNC
        ANNOTATE_NOENDBR