mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Commit 67361cf807
("powerpc/ftrace: Handle large kernel configs")
added ftrace support for ppc64 kernel images with a text section larger
than 32MB. The patch did two things:
1. Add stubs at the end of .text to branch into ftrace_[regs_]caller for
functions that were out of branch range.
2. Re-purpose linker-generated long branches to _mcount to instead branch
to ftrace_[regs_]caller.
Before that, we only supported kernel .text up to ~32MB. With the above,
we now support up to ~96MB:
- The first 32MB of kernel text can branch directly into
ftrace_[regs_]caller since that symbol is usually at the beginning.
- The modified long_branch from (2) above is used by the next 32MB of
kernel text.
- The next 32MB of kernel text can use the stub at the end of text to
branch back to ftrace_[regs_]caller.
While re-purposing the long branch works in practice, it still restricts
ftrace to kernel text up to ~96MB. The stub at the end of kernel text
from (1) already enables us to extend ftrace support for kernel text
up to 64MB, which fulfils the original requirement. Further, once we
switch to -fpatchable-function-entry, there will not be a long branch
that we can use.
Stop re-purposing the linker-generated long branches for ftrace to
simplify the code. If there are good reasons to support ftrace on
kernels beyond 64MB, we can consider adding support by using
-fpatchable-function-entry.
Signed-off-by: Naveen N Rao <naveen@kernel.org>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/33fa3be97f8e1f2171254ef2e1b0d5c8836c11fd.1687166935.git.naveen@kernel.org
745 lines
18 KiB
C
745 lines
18 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Code for replacing ftrace calls with jumps.
|
|
*
|
|
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
*
|
|
* Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
|
|
*
|
|
* Added function graph tracer code, taken from x86 that was written
|
|
* by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
|
|
*
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "ftrace-powerpc: " fmt
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/module.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/init.h>
|
|
#include <linux/list.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/code-patching.h>
|
|
#include <asm/ftrace.h>
|
|
#include <asm/syscall.h>
|
|
#include <asm/inst.h>
|
|
|
|
#define NUM_FTRACE_TRAMPS 2
|
|
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
|
|
|
|
static ppc_inst_t
|
|
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
|
|
{
|
|
ppc_inst_t op;
|
|
|
|
addr = ppc_function_entry((void *)addr);
|
|
|
|
/* if (link) set op to 'bl' else 'b' */
|
|
create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
|
|
|
|
return op;
|
|
}
|
|
|
|
static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
|
|
{
|
|
if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
|
|
pr_err("0x%lx: fetching instruction failed\n", ip);
|
|
return -EFAULT;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
|
|
{
|
|
ppc_inst_t op;
|
|
int ret;
|
|
|
|
ret = ftrace_read_inst(ip, &op);
|
|
if (!ret && !ppc_inst_equal(op, inst)) {
|
|
pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
|
|
ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
|
|
{
|
|
int ret = ftrace_validate_inst(ip, old);
|
|
|
|
if (!ret)
|
|
ret = patch_instruction((u32 *)ip, new);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Helper functions that are the same for both PPC64 and PPC32.
|
|
*/
|
|
static int test_24bit_addr(unsigned long ip, unsigned long addr)
|
|
{
|
|
addr = ppc_function_entry((void *)addr);
|
|
|
|
return is_offset_in_branch_range(addr - ip);
|
|
}
|
|
|
|
static int is_bl_op(ppc_inst_t op)
|
|
{
|
|
return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
|
|
}
|
|
|
|
static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
|
|
{
|
|
int offset;
|
|
|
|
offset = PPC_LI(ppc_inst_val(op));
|
|
/* make it signed */
|
|
if (offset & 0x02000000)
|
|
offset |= 0xfe000000;
|
|
|
|
return ip + (long)offset;
|
|
}
|
|
|
|
#ifdef CONFIG_MODULES
|
|
static int
|
|
__ftrace_make_nop(struct module *mod,
|
|
struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
unsigned long entry, ptr, tramp;
|
|
unsigned long ip = rec->ip;
|
|
ppc_inst_t op, pop;
|
|
|
|
/* read where this goes */
|
|
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
|
|
pr_err("Fetching opcode failed.\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
/* Make sure that this is still a 24bit jump */
|
|
if (!is_bl_op(op)) {
|
|
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* lets find where the pointer goes */
|
|
tramp = find_bl_target(ip, op);
|
|
|
|
pr_devel("ip:%lx jumps to %lx", ip, tramp);
|
|
|
|
if (module_trampoline_target(mod, tramp, &ptr)) {
|
|
pr_err("Failed to get trampoline target\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
pr_devel("trampoline target %lx", ptr);
|
|
|
|
entry = ppc_global_function_entry((void *)addr);
|
|
/* This should match what was called */
|
|
if (ptr != entry) {
|
|
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
|
|
if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
|
|
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
|
|
return -EFAULT;
|
|
}
|
|
|
|
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
|
|
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
|
|
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
|
|
pr_err("Unexpected instruction %08lx around bl _mcount\n",
|
|
ppc_inst_as_ulong(op));
|
|
return -EINVAL;
|
|
}
|
|
} else if (IS_ENABLED(CONFIG_PPC64)) {
|
|
/*
|
|
* Check what is in the next instruction. We can see ld r2,40(r1), but
|
|
* on first pass after boot we will see mflr r0.
|
|
*/
|
|
if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
|
|
pr_err("Fetching op failed.\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
|
|
pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
|
|
ppc_inst_as_ulong(op));
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* When using -mprofile-kernel or PPC32 there is no load to jump over.
|
|
*
|
|
* Otherwise our original call site looks like:
|
|
*
|
|
* bl <tramp>
|
|
* ld r2,XX(r1)
|
|
*
|
|
* Milton Miller pointed out that we can not simply nop the branch.
|
|
* If a task was preempted when calling a trace function, the nops
|
|
* will remove the way to restore the TOC in r2 and the r2 TOC will
|
|
* get corrupted.
|
|
*
|
|
* Use a b +8 to jump over the load.
|
|
* XXX: could make PCREL depend on MPROFILE_KERNEL
|
|
* XXX: check PCREL && MPROFILE_KERNEL calling sequence
|
|
*/
|
|
if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
|
|
pop = ppc_inst(PPC_RAW_NOP());
|
|
else
|
|
pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
|
|
|
|
if (patch_instruction((u32 *)ip, pop)) {
|
|
pr_err("Patching NOP failed.\n");
|
|
return -EPERM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#else
|
|
static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_MODULES */
|
|
|
|
static unsigned long find_ftrace_tramp(unsigned long ip)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
|
|
if (!ftrace_tramps[i])
|
|
continue;
|
|
else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
|
|
return ftrace_tramps[i];
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
unsigned long tramp, ip = rec->ip;
|
|
ppc_inst_t op;
|
|
|
|
/* Read where this goes */
|
|
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
|
|
pr_err("Fetching opcode failed.\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
/* Make sure that this is still a 24bit jump */
|
|
if (!is_bl_op(op)) {
|
|
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Let's find where the pointer goes */
|
|
tramp = find_bl_target(ip, op);
|
|
|
|
pr_devel("ip:%lx jumps to %lx", ip, tramp);
|
|
|
|
/* Are ftrace trampolines reachable? */
|
|
if (!find_ftrace_tramp(ip)) {
|
|
pr_err("No ftrace trampolines reachable from %ps\n", (void *)ip);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
|
|
pr_err("Patching NOP failed.\n");
|
|
return -EPERM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int ftrace_make_nop(struct module *mod,
|
|
struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
unsigned long ip = rec->ip;
|
|
ppc_inst_t old, new;
|
|
|
|
/*
|
|
* If the calling address is more that 24 bits away,
|
|
* then we had to use a trampoline to make the call.
|
|
* Otherwise just update the call site.
|
|
*/
|
|
if (test_24bit_addr(ip, addr)) {
|
|
/* within range */
|
|
old = ftrace_call_replace(ip, addr, 1);
|
|
new = ppc_inst(PPC_RAW_NOP());
|
|
return ftrace_modify_code(ip, old, new);
|
|
} else if (core_kernel_text(ip)) {
|
|
return __ftrace_make_nop_kernel(rec, addr);
|
|
} else if (!IS_ENABLED(CONFIG_MODULES)) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* Out of range jumps are called from modules.
|
|
* We should either already have a pointer to the module
|
|
* or it has been passed in.
|
|
*/
|
|
if (!rec->arch.mod) {
|
|
if (!mod) {
|
|
pr_err("No module loaded addr=%lx\n", addr);
|
|
return -EFAULT;
|
|
}
|
|
rec->arch.mod = mod;
|
|
} else if (mod) {
|
|
if (mod != rec->arch.mod) {
|
|
pr_err("Record mod %p not equal to passed in mod %p\n",
|
|
rec->arch.mod, mod);
|
|
return -EINVAL;
|
|
}
|
|
/* nothing to do if mod == rec->arch.mod */
|
|
} else
|
|
mod = rec->arch.mod;
|
|
|
|
return __ftrace_make_nop(mod, rec, addr);
|
|
}
|
|
|
|
#ifdef CONFIG_MODULES
|
|
/*
|
|
* Examine the existing instructions for __ftrace_make_call.
|
|
* They should effectively be a NOP, and follow formal constraints,
|
|
* depending on the ABI. Return false if they don't.
|
|
*/
|
|
static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
|
|
{
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
|
|
return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
|
|
else
|
|
return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
|
|
ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
|
|
}
|
|
|
|
static int
|
|
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
ppc_inst_t op[2];
|
|
void *ip = (void *)rec->ip;
|
|
unsigned long entry, ptr, tramp;
|
|
struct module *mod = rec->arch.mod;
|
|
|
|
/* read where this goes */
|
|
if (copy_inst_from_kernel_nofault(op, ip))
|
|
return -EFAULT;
|
|
|
|
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
|
|
copy_inst_from_kernel_nofault(op + 1, ip + 4))
|
|
return -EFAULT;
|
|
|
|
if (!expected_nop_sequence(ip, op[0], op[1])) {
|
|
pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
|
|
ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* If we never set up ftrace trampoline(s), then bail */
|
|
if (!mod->arch.tramp ||
|
|
(IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
|
|
pr_err("No ftrace trampoline\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
|
|
tramp = mod->arch.tramp_regs;
|
|
else
|
|
tramp = mod->arch.tramp;
|
|
|
|
if (module_trampoline_target(mod, tramp, &ptr)) {
|
|
pr_err("Failed to get trampoline target\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
pr_devel("trampoline target %lx", ptr);
|
|
|
|
entry = ppc_global_function_entry((void *)addr);
|
|
/* This should match what was called */
|
|
if (ptr != entry) {
|
|
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
|
|
pr_err("REL24 out of range!\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#else
|
|
static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_MODULES */
|
|
|
|
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
ppc_inst_t op;
|
|
void *ip = (void *)rec->ip;
|
|
unsigned long tramp, entry, ptr;
|
|
|
|
/* Make sure we're being asked to patch branch to a known ftrace addr */
|
|
entry = ppc_global_function_entry((void *)ftrace_caller);
|
|
ptr = ppc_global_function_entry((void *)addr);
|
|
|
|
if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
|
|
entry = ppc_global_function_entry((void *)ftrace_regs_caller);
|
|
|
|
if (ptr != entry) {
|
|
pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Make sure we have a nop */
|
|
if (copy_inst_from_kernel_nofault(&op, ip)) {
|
|
pr_err("Unable to read ftrace location %p\n", ip);
|
|
return -EFAULT;
|
|
}
|
|
|
|
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
|
|
pr_err("Unexpected call sequence at %p: %08lx\n",
|
|
ip, ppc_inst_as_ulong(op));
|
|
return -EINVAL;
|
|
}
|
|
|
|
tramp = find_ftrace_tramp((unsigned long)ip);
|
|
if (!tramp) {
|
|
pr_err("No ftrace trampolines reachable from %ps\n", ip);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
|
|
pr_err("Error patching branch to ftrace tramp!\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|
{
|
|
unsigned long ip = rec->ip;
|
|
ppc_inst_t old, new;
|
|
|
|
/*
|
|
* If the calling address is more that 24 bits away,
|
|
* then we had to use a trampoline to make the call.
|
|
* Otherwise just update the call site.
|
|
*/
|
|
if (test_24bit_addr(ip, addr)) {
|
|
/* within range */
|
|
old = ppc_inst(PPC_RAW_NOP());
|
|
new = ftrace_call_replace(ip, addr, 1);
|
|
return ftrace_modify_code(ip, old, new);
|
|
} else if (core_kernel_text(ip)) {
|
|
return __ftrace_make_call_kernel(rec, addr);
|
|
} else if (!IS_ENABLED(CONFIG_MODULES)) {
|
|
/* We should not get here without modules */
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* Out of range jumps are called from modules.
|
|
* Being that we are converting from nop, it had better
|
|
* already have a module defined.
|
|
*/
|
|
if (!rec->arch.mod) {
|
|
pr_err("No module loaded\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return __ftrace_make_call(rec, addr);
|
|
}
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
#ifdef CONFIG_MODULES
|
|
static int
|
|
__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
|
unsigned long addr)
|
|
{
|
|
ppc_inst_t op;
|
|
unsigned long ip = rec->ip;
|
|
unsigned long entry, ptr, tramp;
|
|
struct module *mod = rec->arch.mod;
|
|
|
|
/* If we never set up ftrace trampolines, then bail */
|
|
if (!mod->arch.tramp || !mod->arch.tramp_regs) {
|
|
pr_err("No ftrace trampoline\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* read where this goes */
|
|
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
|
|
pr_err("Fetching opcode failed.\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
/* Make sure that this is still a 24bit jump */
|
|
if (!is_bl_op(op)) {
|
|
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* lets find where the pointer goes */
|
|
tramp = find_bl_target(ip, op);
|
|
entry = ppc_global_function_entry((void *)old_addr);
|
|
|
|
pr_devel("ip:%lx jumps to %lx", ip, tramp);
|
|
|
|
if (tramp != entry) {
|
|
/* old_addr is not within range, so we must have used a trampoline */
|
|
if (module_trampoline_target(mod, tramp, &ptr)) {
|
|
pr_err("Failed to get trampoline target\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
pr_devel("trampoline target %lx", ptr);
|
|
|
|
/* This should match what was called */
|
|
if (ptr != entry) {
|
|
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
/* The new target may be within range */
|
|
if (test_24bit_addr(ip, addr)) {
|
|
/* within range */
|
|
if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
|
|
pr_err("REL24 out of range!\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
if (rec->flags & FTRACE_FL_REGS)
|
|
tramp = mod->arch.tramp_regs;
|
|
else
|
|
tramp = mod->arch.tramp;
|
|
|
|
if (module_trampoline_target(mod, tramp, &ptr)) {
|
|
pr_err("Failed to get trampoline target\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
pr_devel("trampoline target %lx", ptr);
|
|
|
|
entry = ppc_global_function_entry((void *)addr);
|
|
/* This should match what was called */
|
|
if (ptr != entry) {
|
|
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
|
|
pr_err("REL24 out of range!\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#else
|
|
static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
|
unsigned long addr)
|
|
{
|
|
unsigned long ip = rec->ip;
|
|
ppc_inst_t old, new;
|
|
|
|
/*
|
|
* If the calling address is more that 24 bits away,
|
|
* then we had to use a trampoline to make the call.
|
|
* Otherwise just update the call site.
|
|
*/
|
|
if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
|
|
/* within range */
|
|
old = ftrace_call_replace(ip, old_addr, 1);
|
|
new = ftrace_call_replace(ip, addr, 1);
|
|
return ftrace_modify_code(ip, old, new);
|
|
} else if (core_kernel_text(ip)) {
|
|
/*
|
|
* We always patch out of range locations to go to the regs
|
|
* variant, so there is nothing to do here
|
|
*/
|
|
return 0;
|
|
} else if (!IS_ENABLED(CONFIG_MODULES)) {
|
|
/* We should not get here without modules */
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* Out of range jumps are called from modules.
|
|
*/
|
|
if (!rec->arch.mod) {
|
|
pr_err("No module loaded\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return __ftrace_modify_call(rec, old_addr, addr);
|
|
}
|
|
#endif
|
|
|
|
int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
{
|
|
unsigned long ip = (unsigned long)(&ftrace_call);
|
|
ppc_inst_t old, new;
|
|
int ret;
|
|
|
|
old = ppc_inst_read((u32 *)&ftrace_call);
|
|
new = ftrace_call_replace(ip, (unsigned long)func, 1);
|
|
ret = ftrace_modify_code(ip, old, new);
|
|
|
|
/* Also update the regs callback function */
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
|
|
ip = (unsigned long)(&ftrace_regs_call);
|
|
old = ppc_inst_read((u32 *)&ftrace_regs_call);
|
|
new = ftrace_call_replace(ip, (unsigned long)func, 1);
|
|
ret = ftrace_modify_code(ip, old, new);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Use the default ftrace_modify_all_code, but without
|
|
* stop_machine().
|
|
*/
|
|
void arch_ftrace_update_code(int command)
|
|
{
|
|
ftrace_modify_all_code(command);
|
|
}
|
|
|
|
void ftrace_free_init_tramp(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
|
|
if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
|
|
ftrace_tramps[i] = 0;
|
|
return;
|
|
}
|
|
}
|
|
|
|
static void __init add_ftrace_tramp(unsigned long tramp)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
|
|
if (!ftrace_tramps[i]) {
|
|
ftrace_tramps[i] = tramp;
|
|
return;
|
|
}
|
|
}
|
|
|
|
int __init ftrace_dyn_arch_init(void)
|
|
{
|
|
unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
|
|
unsigned long addr = FTRACE_REGS_ADDR;
|
|
long reladdr;
|
|
int i;
|
|
u32 stub_insns[] = {
|
|
#ifdef CONFIG_PPC_KERNEL_PCREL
|
|
/* pla r12,addr */
|
|
PPC_PREFIX_MLS | __PPC_PRFX_R(1),
|
|
PPC_INST_PADDI | ___PPC_RT(_R12),
|
|
PPC_RAW_MTCTR(_R12),
|
|
PPC_RAW_BCTR()
|
|
#elif defined(CONFIG_PPC64)
|
|
PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
|
|
PPC_RAW_ADDIS(_R12, _R12, 0),
|
|
PPC_RAW_ADDI(_R12, _R12, 0),
|
|
PPC_RAW_MTCTR(_R12),
|
|
PPC_RAW_BCTR()
|
|
#else
|
|
PPC_RAW_LIS(_R12, 0),
|
|
PPC_RAW_ADDI(_R12, _R12, 0),
|
|
PPC_RAW_MTCTR(_R12),
|
|
PPC_RAW_BCTR()
|
|
#endif
|
|
};
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
|
|
for (i = 0; i < 2; i++) {
|
|
reladdr = addr - (unsigned long)tramp[i];
|
|
|
|
if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
|
|
pr_err("Address of %ps out of range of pcrel address.\n",
|
|
(void *)addr);
|
|
return -1;
|
|
}
|
|
|
|
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
|
|
tramp[i][0] |= IMM_H18(reladdr);
|
|
tramp[i][1] |= IMM_L(reladdr);
|
|
add_ftrace_tramp((unsigned long)tramp[i]);
|
|
}
|
|
} else if (IS_ENABLED(CONFIG_PPC64)) {
|
|
reladdr = addr - kernel_toc_addr();
|
|
|
|
if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
|
|
pr_err("Address of %ps out of range of kernel_toc.\n",
|
|
(void *)addr);
|
|
return -1;
|
|
}
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
|
|
tramp[i][1] |= PPC_HA(reladdr);
|
|
tramp[i][2] |= PPC_LO(reladdr);
|
|
add_ftrace_tramp((unsigned long)tramp[i]);
|
|
}
|
|
} else {
|
|
for (i = 0; i < 2; i++) {
|
|
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
|
|
tramp[i][0] |= PPC_HA(addr);
|
|
tramp[i][1] |= PPC_LO(addr);
|
|
add_ftrace_tramp((unsigned long)tramp[i]);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
|
{
|
|
unsigned long sp = fregs->regs.gpr[1];
|
|
int bit;
|
|
|
|
if (unlikely(ftrace_graph_is_dead()))
|
|
goto out;
|
|
|
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
|
goto out;
|
|
|
|
bit = ftrace_test_recursion_trylock(ip, parent_ip);
|
|
if (bit < 0)
|
|
goto out;
|
|
|
|
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
|
|
parent_ip = ppc_function_entry(return_to_handler);
|
|
|
|
ftrace_test_recursion_unlock(bit);
|
|
out:
|
|
fregs->regs.link = parent_ip;
|
|
}
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|