x86/alternative: Make custom return thunk unconditional
commit 095b8303f3
upstream.
There is infrastructure to rewrite return thunks to point to any
random thunk one desires, unwrap that from CALL_THUNKS, which up to
now was the sole user of that.
[ bp: Make the thunks visible on 32-bit and add ifdeffery for the
32-bit builds. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121148.775293785@infradead.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8bb1ed390d
commit
53ebbe1c8c
|
@ -210,7 +210,12 @@
|
|||
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
extern void __x86_return_thunk(void);
|
||||
#else
|
||||
static inline void __x86_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void zen_untrain_ret(void);
|
||||
extern void srso_untrain_ret(void);
|
||||
extern void srso_untrain_ret_alias(void);
|
||||
|
|
|
@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
|||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
static void update_spec_ctrl(u64 val)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue