fgraph: Get ftrace recursion lock in function_graph_enter

Get the ftrace recursion lock in the generic function_graph_enter()
instead of each architecture code.
This changes all function_graph tracer callbacks running in
non-preemptive state. On x86 and powerpc, this is by default, but
on the other architecutres, this will be new.

Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Naveen N Rao <naveen@kernel.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/173379653720.973433.18438622234884980494.stgit@devnote2
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
Masami Hiramatsu (Google) 2024-12-10 11:08:57 +09:00 committed by Steven Rostedt (Google)
parent 1d95fd9d6b
commit d576aec24d
4 changed files with 7 additions and 20 deletions

View file

@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs) struct ftrace_ops *op, struct ftrace_regs *fregs)
{ {
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1]; unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
int bit;
if (unlikely(ftrace_graph_is_dead())) if (unlikely(ftrace_graph_is_dead()))
goto out; goto out;
@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out; goto out;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
goto out;
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp)) if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
parent_ip = ppc_function_entry(return_to_handler); parent_ip = ppc_function_entry(return_to_handler);
ftrace_test_recursion_unlock(bit);
out: out:
arch_ftrace_regs(fregs)->regs.link = parent_ip; arch_ftrace_regs(fregs)->regs.link = parent_ip;
} }

View file

@ -790,7 +790,6 @@ static unsigned long
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{ {
unsigned long return_hooker; unsigned long return_hooker;
int bit;
if (unlikely(ftrace_graph_is_dead())) if (unlikely(ftrace_graph_is_dead()))
goto out; goto out;
@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out; goto out;
bit = ftrace_test_recursion_trylock(ip, parent);
if (bit < 0)
goto out;
return_hooker = ppc_function_entry(return_to_handler); return_hooker = ppc_function_entry(return_to_handler);
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
parent = return_hooker; parent = return_hooker;
ftrace_test_recursion_unlock(bit);
out: out:
return parent; return parent;
} }

View file

@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer) unsigned long frame_pointer)
{ {
unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long return_hooker = (unsigned long)&return_to_handler;
int bit;
/* /*
* When resuming from suspend-to-ram, this function can be indirectly * When resuming from suspend-to-ram, this function can be indirectly
@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
return; return;
bit = ftrace_test_recursion_trylock(ip, *parent);
if (bit < 0)
return;
if (!function_graph_enter(*parent, ip, frame_pointer, parent)) if (!function_graph_enter(*parent, ip, frame_pointer, parent))
*parent = return_hooker; *parent = return_hooker;
ftrace_test_recursion_unlock(bit);
} }
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS

View file

@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
struct ftrace_graph_ent trace; struct ftrace_graph_ent trace;
unsigned long bitmap = 0; unsigned long bitmap = 0;
int offset; int offset;
int bit;
int i; int i;
bit = ftrace_test_recursion_trylock(func, ret);
if (bit < 0)
return -EBUSY;
trace.func = func; trace.func = func;
trace.depth = ++current->curr_ret_depth; trace.depth = ++current->curr_ret_depth;
@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
* flag, set that bit always. * flag, set that bit always.
*/ */
set_bitmap(current, offset, bitmap | BIT(0)); set_bitmap(current, offset, bitmap | BIT(0));
ftrace_test_recursion_unlock(bit);
return 0; return 0;
out_ret: out_ret:
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
out: out:
current->curr_ret_depth--; current->curr_ret_depth--;
ftrace_test_recursion_unlock(bit);
return -EBUSY; return -EBUSY;
} }