mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-11-19 21:25:10 +00:00
x86/its: Add support for ITS-safe return thunk
RETs in the lower half of cacheline may be affected by ITS bug,
specifically when the RSB-underflows. Use ITS-safe return thunk for such
RETs.
RETs that are not patched:
- RET in retpoline sequence does not need to be patched, because the
sequence itself fills an RSB before RET.
- RET in Call Depth Tracking (CDT) thunks __x86_indirect_{call|jump}_thunk
and call_depth_return_thunk are not patched because CDT by design
prevents RSB-underflow.
- RETs in .init section are not reachable after init.
- RETs that are explicitly marked safe with ANNOTATE_UNRET_SAFE.
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
This commit is contained in:
parent
8754e67ad4
commit
a75bf27fe4
8 changed files with 57 additions and 7 deletions
|
|
@ -124,6 +124,20 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
|
||||
extern bool cpu_wants_rethunk(void);
|
||||
extern bool cpu_wants_rethunk_at(void *addr);
|
||||
#else
|
||||
static __always_inline bool cpu_wants_rethunk(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static __always_inline bool cpu_wants_rethunk_at(void *addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *locks, void *locks_end,
|
||||
|
|
|
|||
|
|
@ -367,6 +367,12 @@ static inline void srso_return_thunk(void) {}
|
|||
static inline void srso_alias_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
extern void its_return_thunk(void);
|
||||
#else
|
||||
static inline void its_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void retbleed_return_thunk(void);
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
|
|
|||
|
|
@ -814,6 +814,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
|||
|
||||
#ifdef CONFIG_MITIGATION_RETHUNK
|
||||
|
||||
bool cpu_wants_rethunk(void)
|
||||
{
|
||||
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
|
||||
}
|
||||
|
||||
bool cpu_wants_rethunk_at(void *addr)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
return false;
|
||||
if (x86_return_thunk != its_return_thunk)
|
||||
return true;
|
||||
|
||||
return !((unsigned long)addr & 0x20);
|
||||
}
|
||||
|
||||
/*
|
||||
* Rewrite the compiler generated return thunk tail-calls.
|
||||
*
|
||||
|
|
@ -830,7 +845,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
|
|||
int i = 0;
|
||||
|
||||
/* Patch the custom return thunks... */
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
||||
if (cpu_wants_rethunk_at(addr)) {
|
||||
i = JMP32_INSN_SIZE;
|
||||
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
|
||||
} else {
|
||||
|
|
@ -847,7 +862,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
|||
{
|
||||
s32 *s;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk())
|
||||
static_call_force_reinit();
|
||||
|
||||
for (s = start; s < end; s++) {
|
||||
|
|
|
|||
|
|
@ -354,7 +354,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||
goto fail;
|
||||
|
||||
ip = trampoline + size;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk_at(ip))
|
||||
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
|
||||
else
|
||||
memcpy(ip, retq, sizeof(retq));
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|||
break;
|
||||
|
||||
case RET:
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk_at(insn))
|
||||
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
|
||||
else
|
||||
code = &retinsn;
|
||||
|
|
@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|||
case JCC:
|
||||
if (!func) {
|
||||
func = __static_call_return;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk())
|
||||
func = x86_return_thunk;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -503,6 +503,10 @@ PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
|
|||
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
|
||||
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -393,7 +393,18 @@ SYM_CODE_START(__x86_indirect_its_thunk_array)
|
|||
.align 64, 0xcc
|
||||
SYM_CODE_END(__x86_indirect_its_thunk_array)
|
||||
|
||||
#endif
|
||||
.align 64, 0xcc
|
||||
.skip 32, 0xcc
|
||||
SYM_CODE_START(its_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(its_return_thunk)
|
||||
EXPORT_SYMBOL(its_return_thunk)
|
||||
|
||||
#endif /* CONFIG_MITIGATION_ITS */
|
||||
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
|
|
|
|||
|
|
@ -686,7 +686,7 @@ static void emit_return(u8 **pprog, u8 *ip)
|
|||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
||||
if (cpu_wants_rethunk()) {
|
||||
emit_jump(&prog, x86_return_thunk, ip);
|
||||
} else {
|
||||
EMIT1(0xC3); /* ret */
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue