linux/tools/perf/util/ftrace.h
Changbin Du 129f70bd60 perf: ftrace: add graph tracer options args/retval/retval-hex/retaddr
This change adds support for new funcgraph tracer options funcgraph-args,
funcgraph-retval, funcgraph-retval-hex and funcgraph-retaddr.

The new added options are:
  - args       : Show function arguments.
  - retval     : Show function return value.
  - retval-hex : Show function return value in hexadecimal format.
  - retaddr    : Show function return address.

 # ./perf ftrace -G vfs_write --graph-opts retval,retaddr
 # tracer: function_graph
 #
 # CPU  DURATION                  FUNCTION CALLS
 # |     |   |                     |   |   |   |
 5)               |  mutex_unlock() { /* <-rb_simple_write+0xda/0x150 */
 5)   0.188 us    |    local_clock(); /* <-lock_release+0x2ad/0x440 ret=0x3bf2a3cf90e */
 5)               |    rt_mutex_slowunlock() { /* <-rb_simple_write+0xda/0x150 */
 5)               |      _raw_spin_lock_irqsave() { /* <-rt_mutex_slowunlock+0x4f/0x200 */
 5)   0.123 us    |        preempt_count_add(); /* <-_raw_spin_lock_irqsave+0x23/0x90 ret=0x0 */
 5)   0.128 us    |        local_clock(); /* <-__lock_acquire.isra.0+0x17a/0x740 ret=0x3bf2a3cfc8b */
 5)   0.086 us    |        do_raw_spin_trylock(); /* <-_raw_spin_lock_irqsave+0x4a/0x90 ret=0x1 */
 5)   0.845 us    |      } /* _raw_spin_lock_irqsave ret=0x292 */
 5)               |      _raw_spin_unlock_irqrestore() { /* <-rt_mutex_slowunlock+0x191/0x200 */
 5)   0.097 us    |        local_clock(); /* <-lock_release+0x2ad/0x440 ret=0x3bf2a3cff1f */
 5)   0.086 us    |        do_raw_spin_unlock(); /* <-_raw_spin_unlock_irqrestore+0x23/0x60 ret=0x1 */
 5)   0.104 us    |        preempt_count_sub(); /* <-_raw_spin_unlock_irqrestore+0x35/0x60 ret=0x0 */
 5)   0.726 us    |      } /* _raw_spin_unlock_irqrestore ret=0x80000000 */
 5)   1.881 us    |    } /* rt_mutex_slowunlock ret=0x0 */
 5)   2.931 us    |  } /* mutex_unlock ret=0x0 */

Signed-off-by: Changbin Du <changbin.du@huawei.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Link: https://lore.kernel.org/r/20250613114048.132336-1-changbin.du@huawei.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2025-07-22 17:47:22 -07:00

96 lines
2.1 KiB
C

#ifndef __PERF_FTRACE_H__
#define __PERF_FTRACE_H__
#include <linux/list.h>
#include "target.h"
struct evlist;
struct hashamp;
struct stats;
struct perf_ftrace {
struct evlist *evlist;
struct target target;
const char *tracer;
struct list_head filters;
struct list_head notrace;
struct list_head graph_funcs;
struct list_head nograph_funcs;
struct list_head event_pair;
struct hashmap *profile_hash;
unsigned long percpu_buffer_size;
bool inherit;
bool use_nsec;
unsigned int bucket_range;
unsigned int min_latency;
unsigned int max_latency;
unsigned int bucket_num;
bool hide_empty;
int graph_depth;
int func_stack_trace;
int func_irq_info;
int graph_args;
int graph_retval;
int graph_retval_hex;
int graph_retaddr;
int graph_nosleep_time;
int graph_noirqs;
int graph_verbose;
int graph_thresh;
int graph_tail;
};
struct filter_entry {
struct list_head list;
char name[];
};
#define NUM_BUCKET 22 /* 20 + 2 (for outliers in both direction) */
#ifdef HAVE_BPF_SKEL
int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
int buckets[], struct stats *stats);
int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
#else /* !HAVE_BPF_SKEL */
static inline int
perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
return -1;
}
static inline int
perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
return -1;
}
static inline int
perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
return -1;
}
static inline int
perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
int buckets[] __maybe_unused,
struct stats *stats __maybe_unused)
{
return -1;
}
static inline int
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
{
return -1;
}
#endif /* HAVE_BPF_SKEL */
#endif /* __PERF_FTRACE_H__ */