x86/cpu: Cleanup the untrain mess

Since there can only be one active return_thunk, there only needs be
one (matching) untrain_ret. It fundamentally doesn't make sense to
allow multiple untrain_ret at the same time.

Fold all the 3 different untrain methods into a single (temporary)
helper stub.

Fixes: fb3bd914b3 ("x86/srso: Add a Speculative RAS Overflow mitigation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org
This commit is contained in:
Peter Zijlstra 2023-08-14 13:44:34 +02:00 committed by Borislav Petkov (AMD)
parent 42be649dd1
commit e7c25c441e
3 changed files with 13 additions and 14 deletions

View File

@ -272,9 +272,9 @@
.endm .endm
#ifdef CONFIG_CPU_UNRET_ENTRY #ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret" #define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else #else
#define CALL_ZEN_UNTRAIN_RET "" #define CALL_UNTRAIN_RET ""
#endif #endif
/* /*
@ -293,15 +293,10 @@
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif #endif
#ifdef CONFIG_CPU_SRSO
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
.endm .endm
.macro UNTRAIN_RET_FROM_CALL .macro UNTRAIN_RET_FROM_CALL
@ -309,15 +304,10 @@
defined(CONFIG_CALL_DEPTH_TRACKING) defined(CONFIG_CALL_DEPTH_TRACKING)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
#endif #endif
#ifdef CONFIG_CPU_SRSO
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
.endm .endm
@ -355,6 +345,7 @@ extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void); extern void srso_untrain_ret(void);
extern void srso_alias_untrain_ret(void); extern void srso_alias_untrain_ret(void);
extern void entry_untrain_ret(void);
extern void entry_ibpb(void); extern void entry_ibpb(void);
extern void (*x86_return_thunk)(void); extern void (*x86_return_thunk)(void);

View File

@ -2460,6 +2460,7 @@ static void __init srso_select_mitigation(void)
* like ftrace, static_call, etc. * like ftrace, static_call, etc.
*/ */
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
if (boot_cpu_data.x86 == 0x19) { if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);

View File

@ -289,6 +289,13 @@ SYM_CODE_START(srso_return_thunk)
ud2 ud2
SYM_CODE_END(srso_return_thunk) SYM_CODE_END(srso_return_thunk)
SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
SYM_CODE_START(__x86_return_thunk) SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
ANNOTATE_NOENDBR ANNOTATE_NOENDBR