2017-12-18 17:52:48 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/* Copyright (C) 2017 Andes Technology Corporation */
|
|
|
|
|
2019-10-28 00:42:47 -07:00
|
|
|
#ifndef _ASM_RISCV_FTRACE_H
|
|
|
|
#define _ASM_RISCV_FTRACE_H
|
|
|
|
|
2017-12-18 17:52:48 +08:00
|
|
|
/*
|
|
|
|
* The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
|
|
|
|
* Check arch/riscv/kernel/mcount.S for detail.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
|
|
|
|
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
|
|
|
#endif
|
2018-02-13 13:13:17 +08:00
|
|
|
|
2018-02-13 13:13:19 +08:00
|
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
2018-02-13 13:13:17 +08:00
|
|
|
#ifndef __ASSEMBLY__
|
2024-02-02 01:51:02 +00:00
|
|
|
|
|
|
|
extern void *return_address(unsigned int level);
|
|
|
|
|
|
|
|
#define ftrace_return_address(n) return_address(n)
|
|
|
|
|
2024-01-25 15:55:13 -07:00
|
|
|
void _mcount(void);
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
unsigned long ftrace_call_adjust(unsigned long addr);
|
|
|
|
unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip);
|
|
|
|
#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)
|
2018-02-13 13:13:17 +08:00
|
|
|
|
2023-10-03 20:24:07 +02:00
|
|
|
/*
|
|
|
|
* Let's do like x86/arm64 and ignore the compat syscalls.
|
|
|
|
*/
|
|
|
|
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
|
|
|
|
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return is_compat_task();
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
|
|
|
static inline bool arch_syscall_match_sym_name(const char *sym,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Since all syscall functions have __riscv_ prefix, we must skip it.
|
|
|
|
* However, as we described above, we decided to ignore compat
|
|
|
|
* syscalls, so we don't care about __riscv_compat_ prefix here.
|
|
|
|
*/
|
|
|
|
return !strcmp(sym + 8, name);
|
|
|
|
}
|
|
|
|
|
2018-02-13 13:13:17 +08:00
|
|
|
struct dyn_arch_ftrace {
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
/*
|
|
|
|
* A general call in RISC-V is a pair of insts:
|
|
|
|
* 1) auipc: setting high-20 pc-related bits to ra register
|
|
|
|
* 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
|
|
|
|
* return address (original pc + 4)
|
|
|
|
*
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
* The first 2 instructions for each tracable function is compiled to 2 nop
|
|
|
|
* instructions. Then, the kernel initializes the first instruction to auipc at
|
|
|
|
* boot time (<ftrace disable>). The second instruction is patched to jalr to
|
|
|
|
* start the trace.
|
|
|
|
*
|
|
|
|
*<Image>:
|
|
|
|
* 0: nop
|
|
|
|
* 4: nop
|
|
|
|
*
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
*<ftrace enable>:
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
* 0: auipc t0, 0x?
|
|
|
|
* 4: jalr t0, ?(t0)
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
*
|
|
|
|
*<ftrace disable>:
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
* 0: auipc t0, 0x?
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
* 4: nop
|
|
|
|
*
|
2018-02-13 13:13:17 +08:00
|
|
|
* Dynamic ftrace generates probes to call sites, so we must deal with
|
|
|
|
* both auipc and jalr at the same time.
|
|
|
|
*/
|
|
|
|
|
2024-01-25 15:55:13 -07:00
|
|
|
#define MCOUNT_ADDR ((unsigned long)_mcount)
|
2018-02-13 13:13:17 +08:00
|
|
|
#define JALR_SIGN_MASK (0x00000800)
|
|
|
|
#define JALR_OFFSET_MASK (0x00000fff)
|
|
|
|
#define AUIPC_OFFSET_MASK (0xfffff000)
|
|
|
|
#define AUIPC_PAD (0x00001000)
|
|
|
|
#define JALR_SHIFT 20
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
#define JALR_T0 (0x000282e7)
|
|
|
|
#define AUIPC_T0 (0x00000297)
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
#define JALR_RANGE (JALR_SIGN_MASK - 1)
|
2018-02-13 13:13:17 +08:00
|
|
|
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
#define to_jalr_t0(offset) \
|
|
|
|
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
|
|
|
|
|
|
|
|
#define to_auipc_t0(offset) \
|
|
|
|
((offset & JALR_SIGN_MASK) ? \
|
|
|
|
(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \
|
|
|
|
((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
|
|
|
|
|
|
|
|
#define make_call_t0(caller, callee, call) \
|
2018-02-13 13:13:17 +08:00
|
|
|
do { \
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
unsigned int offset = \
|
2025-02-06 13:28:36 -06:00
|
|
|
(unsigned long) (callee) - (unsigned long) (caller); \
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 04:05:59 -05:00
|
|
|
call[0] = to_auipc_t0(offset); \
|
|
|
|
call[1] = to_jalr_t0(offset); \
|
2018-02-13 13:13:17 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/*
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
* Only the jalr insn in the auipc+jalr is patched, so we make it 4
|
|
|
|
* bytes here.
|
2018-02-13 13:13:17 +08:00
|
|
|
*/
|
riscv: ftrace: prepare ftrace for atomic code patching
We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since
instruction fetch can break down to 4 byte at a time, it is impossible
to update two instructions without a race. In order to mitigate it, we
initialize the patchable entry to AUIPC + NOP4. Then, the run-time code
patching can change NOP4 to JALR to eable/disable ftrcae from a
function. This limits the reach of each ftrace entry to +-2KB displacing
from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to
reach ftrace caller target. Now, it loads the target address from a
memory location, then perform the jump. This enable the kernel to update
the target atomically.
The new don't-stop-the-world text patching on change only one RISC-V
instruction:
| -8: &ftrace_ops of the associated tracer function.
| <ftrace enable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: jalr t0, lo(ftrace_caller)
|
| -8: &ftrace_nop_ops
| <ftrace disable>:
| 0: auipc t0, hi(ftrace_caller)
| 4: nop
This means that f+0x0 is fixed, and should not be claimed by ftrace,
e.g. kprobe should be able to put a probe in f+0x0. Thus, we adjust the
offset and MCOUNT_INSN_SIZE accordingly.
[ alex: Fix build errors with !CONFIG_DYNAMIC_FTRACE ]
Co-developed-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-5-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
2025-04-08 02:08:29 +08:00
|
|
|
#define MCOUNT_INSN_SIZE 4
|
|
|
|
#define MCOUNT_AUIPC_SIZE 4
|
|
|
|
#define MCOUNT_JALR_SIZE 4
|
|
|
|
#define MCOUNT_NOP4_SIZE 4
|
2020-08-24 17:21:22 -07:00
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
struct dyn_ftrace;
|
|
|
|
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
|
|
|
#define ftrace_init_nop ftrace_init_nop
|
2023-11-30 13:15:29 +01:00
|
|
|
|
2024-04-05 14:24:53 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
|
|
|
|
#define arch_ftrace_get_regs(regs) NULL
|
2024-10-10 20:21:14 -04:00
|
|
|
#define HAVE_ARCH_FTRACE_REGS
|
2023-11-30 13:15:29 +01:00
|
|
|
struct ftrace_ops;
|
2024-10-08 19:05:28 -04:00
|
|
|
struct ftrace_regs;
|
|
|
|
#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs))
|
|
|
|
|
|
|
|
struct __arch_ftrace_regs {
|
2024-04-05 14:24:53 +00:00
|
|
|
unsigned long epc;
|
|
|
|
unsigned long ra;
|
|
|
|
unsigned long sp;
|
|
|
|
unsigned long s0;
|
|
|
|
unsigned long t1;
|
2025-04-08 02:08:35 +08:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
|
|
|
unsigned long direct_tramp;
|
|
|
|
#endif
|
2024-04-05 14:24:53 +00:00
|
|
|
union {
|
|
|
|
unsigned long args[8];
|
|
|
|
struct {
|
|
|
|
unsigned long a0;
|
|
|
|
unsigned long a1;
|
|
|
|
unsigned long a2;
|
|
|
|
unsigned long a3;
|
|
|
|
unsigned long a4;
|
|
|
|
unsigned long a5;
|
|
|
|
unsigned long a6;
|
|
|
|
unsigned long a7;
|
2025-04-08 02:08:25 +08:00
|
|
|
#ifdef CONFIG_CC_IS_CLANG
|
|
|
|
unsigned long t2;
|
|
|
|
unsigned long t3;
|
|
|
|
unsigned long t4;
|
|
|
|
unsigned long t5;
|
|
|
|
unsigned long t6;
|
|
|
|
#endif
|
2024-04-05 14:24:53 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
static __always_inline unsigned long ftrace_regs_get_instruction_pointer(const struct ftrace_regs
|
|
|
|
*fregs)
|
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
return arch_ftrace_regs(fregs)->epc;
|
2024-04-05 14:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
|
|
|
|
unsigned long pc)
|
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
arch_ftrace_regs(fregs)->epc = pc;
|
2024-04-05 14:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
|
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
return arch_ftrace_regs(fregs)->sp;
|
2024-04-05 14:24:53 +00:00
|
|
|
}
|
|
|
|
|
2024-12-26 14:11:55 +09:00
|
|
|
static __always_inline unsigned long ftrace_regs_get_frame_pointer(const struct ftrace_regs *fregs)
|
|
|
|
{
|
|
|
|
return arch_ftrace_regs(fregs)->s0;
|
|
|
|
}
|
|
|
|
|
2024-04-05 14:24:53 +00:00
|
|
|
static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
if (n < 8)
|
2024-10-08 19:05:28 -04:00
|
|
|
return arch_ftrace_regs(fregs)->args[n];
|
2024-04-05 14:24:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline unsigned long ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
|
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
return arch_ftrace_regs(fregs)->a0;
|
2024-04-05 14:24:53 +00:00
|
|
|
}
|
|
|
|
|
2024-12-26 14:13:59 +09:00
|
|
|
static __always_inline unsigned long ftrace_regs_get_return_address(const struct ftrace_regs *fregs)
|
|
|
|
{
|
|
|
|
return arch_ftrace_regs(fregs)->ra;
|
|
|
|
}
|
|
|
|
|
2024-04-05 14:24:53 +00:00
|
|
|
static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs,
|
|
|
|
unsigned long ret)
|
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
arch_ftrace_regs(fregs)->a0 = ret;
|
2024-04-05 14:24:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void ftrace_override_function_with_return(struct ftrace_regs *fregs)
|
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
arch_ftrace_regs(fregs)->epc = arch_ftrace_regs(fregs)->ra;
|
2024-04-05 14:24:53 +00:00
|
|
|
}
|
|
|
|
|
2024-12-26 14:12:47 +09:00
|
|
|
static __always_inline struct pt_regs *
|
|
|
|
ftrace_partial_regs(const struct ftrace_regs *fregs, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct __arch_ftrace_regs *afregs = arch_ftrace_regs(fregs);
|
|
|
|
|
2025-02-24 18:42:21 -08:00
|
|
|
memcpy(®s->a_regs, afregs->args, sizeof(afregs->args));
|
2024-12-26 14:12:47 +09:00
|
|
|
regs->epc = afregs->epc;
|
|
|
|
regs->ra = afregs->ra;
|
|
|
|
regs->sp = afregs->sp;
|
|
|
|
regs->s0 = afregs->s0;
|
|
|
|
regs->t1 = afregs->t1;
|
|
|
|
return regs;
|
|
|
|
}
|
|
|
|
|
2024-04-05 14:24:53 +00:00
|
|
|
int ftrace_regs_query_register_offset(const char *name);
|
|
|
|
|
2023-11-30 13:15:29 +01:00
|
|
|
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|
|
|
struct ftrace_ops *op, struct ftrace_regs *fregs);
|
|
|
|
#define ftrace_graph_func ftrace_graph_func
|
2023-11-30 13:15:30 +01:00
|
|
|
|
2025-04-08 02:08:35 +08:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
2024-04-05 14:24:53 +00:00
|
|
|
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
|
2023-11-30 13:15:30 +01:00
|
|
|
{
|
2024-10-08 19:05:28 -04:00
|
|
|
arch_ftrace_regs(fregs)->t1 = addr;
|
2023-11-30 13:15:30 +01:00
|
|
|
}
|
2025-04-08 02:08:35 +08:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
|
|
|
|
|
2024-04-05 14:24:53 +00:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
|
2023-11-30 13:15:29 +01:00
|
|
|
|
|
|
|
#endif /* __ASSEMBLY__ */
|
2020-08-24 17:21:22 -07:00
|
|
|
|
2023-03-03 14:37:55 +00:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
2019-10-28 00:42:47 -07:00
|
|
|
|
|
|
|
#endif /* _ASM_RISCV_FTRACE_H */
|