linux/tools/perf/util/kwork.h
Yang Jihong 2f21f5e4b4 perf kwork top: Add statistics on hardirq event support
Calculate the runtime of the hardirq events and subtract it from
the corresponding task runtime to improve the precision.

Example usage:

  # perf kwork -k sched,irq record -- perf record -o perf_record.data -a sleep 10
  [ perf record: Woken up 2 times to write data ]
  [ perf record: Captured and wrote 1.054 MB perf_record.data (18019 samples) ]
  [ perf record: Woken up 1 times to write data ]
  [ perf record: Captured and wrote 1.798 MB perf.data (16334 samples) ]
  #
  # perf kwork top

  Total  : 139240.869 ms, 8 cpus
  %Cpu(s):  94.91% id,   0.05% hi
  %Cpu0   [                                 0.05%]
  %Cpu1   [|                                5.00%]
  %Cpu2   [                                 0.43%]
  %Cpu3   [                                 0.57%]
  %Cpu4   [                                 1.19%]
  %Cpu5   [||||||                          20.46%]
  %Cpu6   [                                 0.48%]
  %Cpu7   [|||                             12.10%]

        PID    %CPU           RUNTIME  COMMMAND
    ----------------------------------------------------
          0   99.54      17325.622 ms  swapper/2
          0   99.54      17327.527 ms  swapper/0
          0   99.51      17319.909 ms  swapper/6
          0   99.42      17304.934 ms  swapper/3
          0   98.80      17197.385 ms  swapper/4
          0   94.99      16534.991 ms  swapper/1
          0   87.89      15295.264 ms  swapper/7
          0   79.53      13843.182 ms  swapper/5
       4252   36.50       6361.768 ms  perf
       4256    1.17        205.215 ms  bash
        151    0.53         93.298 ms  systemd-resolve
       4254    0.39         69.468 ms  perf
        423    0.34         59.368 ms  bash
        412    0.29         51.204 ms  sshd
        249    0.20         35.288 ms  sd-resolve
         16    0.17         30.287 ms  rcu_preempt
        153    0.09         17.266 ms  systemd-timesyn
          1    0.09         17.078 ms  systemd
       4253    0.07         12.457 ms  perf
       4255    0.06         11.559 ms  perf
       4234    0.03          6.105 ms  kworker/u16:1
         69    0.03          6.259 ms  kworker/1:1H
       4251    0.02          4.615 ms  perf
       4095    0.02          4.890 ms  kworker/7:1
         61    0.02          4.005 ms  kcompactd0
         75    0.02          3.546 ms  kworker/2:1
         97    0.01          3.106 ms  kworker/7:1H
         98    0.01          1.995 ms  jbd2/sda-8
       4088    0.01          1.779 ms  kworker/u16:3
       2909    0.01          1.795 ms  kworker/0:2
       4246    0.00          1.117 ms  kworker/7:2
         51    0.00          0.327 ms  ksoftirqd/7
         50    0.00          0.369 ms  migration/7
        102    0.00          0.160 ms  kworker/6:1H
         76    0.00          0.609 ms  kworker/6:1
         45    0.00          0.779 ms  migration/6
         87    0.00          0.504 ms  kworker/5:1H
         73    0.00          1.130 ms  kworker/5:1
         41    0.00          0.152 ms  ksoftirqd/5
         40    0.00          0.702 ms  migration/5
         64    0.00          0.316 ms  kworker/4:1
         35    0.00          0.791 ms  migration/4
        353    0.00          2.211 ms  sshd
         74    0.00          0.272 ms  kworker/3:1
         30    0.00          0.819 ms  migration/3
         25    0.00          0.784 ms  migration/2
        397    0.00          0.539 ms  kworker/1:1
         21    0.00          1.600 ms  ksoftirqd/1
         20    0.00          0.773 ms  migration/1
         17    0.00          1.682 ms  migration/0
         15    0.00          0.076 ms  ksoftirqd/0

Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yang Jihong <yangjihong1@huawei.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Link: https://lore.kernel.org/r/20230812084917.169338-12-yangjihong1@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-09-12 17:31:59 -03:00

287 lines
8.1 KiB
C

#ifndef PERF_UTIL_KWORK_H
#define PERF_UTIL_KWORK_H
#include "util/tool.h"
#include "util/time-utils.h"
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/types.h>
struct perf_sample;
struct perf_session;
enum kwork_class_type {
KWORK_CLASS_IRQ,
KWORK_CLASS_SOFTIRQ,
KWORK_CLASS_WORKQUEUE,
KWORK_CLASS_SCHED,
KWORK_CLASS_MAX,
};
enum kwork_report_type {
KWORK_REPORT_RUNTIME,
KWORK_REPORT_LATENCY,
KWORK_REPORT_TIMEHIST,
KWORK_REPORT_TOP,
};
enum kwork_trace_type {
KWORK_TRACE_RAISE,
KWORK_TRACE_ENTRY,
KWORK_TRACE_EXIT,
KWORK_TRACE_MAX,
};
/*
* data structure:
*
* +==================+ +============+ +======================+
* | class | | work | | atom |
* +==================+ +============+ +======================+
* +------------+ | +-----+ | | +------+ | | +-------+ +-----+ |
* | perf_kwork | +-> | irq | --------|+-> | eth0 | --+-> | raise | - | ... | --+ +-----------+
* +-----+------+ || +-----+ ||| +------+ ||| +-------+ +-----+ | | | |
* | || ||| ||| | +-> | atom_page |
* | || ||| ||| +-------+ +-----+ | | |
* | class_list ||| |+-> | entry | - | ... | ----> | |
* | || ||| ||| +-------+ +-----+ | | |
* | || ||| ||| | +-> | |
* | || ||| ||| +-------+ +-----+ | | | |
* | || ||| |+-> | exit | - | ... | --+ +-----+-----+
* | || ||| | | +-------+ +-----+ | |
* | || ||| | | | |
* | || ||| +-----+ | | | |
* | || |+-> | ... | | | | |
* | || | | +-----+ | | | |
* | || | | | | | |
* | || +---------+ | | +-----+ | | +-------+ +-----+ | |
* | +-> | softirq | -------> | RCU | ---+-> | raise | - | ... | --+ +-----+-----+
* | || +---------+ | | +-----+ ||| +-------+ +-----+ | | | |
* | || | | ||| | +-> | atom_page |
* | || | | ||| +-------+ +-----+ | | |
* | || | | |+-> | entry | - | ... | ----> | |
* | || | | ||| +-------+ +-----+ | | |
* | || | | ||| | +-> | |
* | || | | ||| +-------+ +-----+ | | | |
* | || | | |+-> | exit | - | ... | --+ +-----+-----+
* | || | | | | +-------+ +-----+ | |
* | || | | | | | |
* | || +-----------+ | | +-----+ | | | |
* | +-> | workqueue | -----> | ... | | | | |
* | | +-----------+ | | +-----+ | | | |
* | +==================+ +============+ +======================+ |
* | |
* +----> atom_page_list ---------------------------------------------------------+
*
*/
struct kwork_atom {
struct list_head list;
u64 time;
struct kwork_atom *prev;
void *page_addr;
unsigned long bit_inpage;
};
#define NR_ATOM_PER_PAGE 128
struct kwork_atom_page {
struct list_head list;
struct kwork_atom atoms[NR_ATOM_PER_PAGE];
DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
};
struct perf_kwork;
struct kwork_class;
struct kwork_work {
/*
* class field
*/
struct rb_node node;
struct kwork_class *class;
/*
* work field
*/
u64 id;
int cpu;
char *name;
/*
* atom field
*/
u64 nr_atoms;
struct list_head atom_list[KWORK_TRACE_MAX];
/*
* runtime report
*/
u64 max_runtime;
u64 max_runtime_start;
u64 max_runtime_end;
u64 total_runtime;
/*
* latency report
*/
u64 max_latency;
u64 max_latency_start;
u64 max_latency_end;
u64 total_latency;
/*
* top report
*/
u32 cpu_usage;
};
struct kwork_class {
struct list_head list;
const char *name;
enum kwork_class_type type;
unsigned int nr_tracepoints;
const struct evsel_str_handler *tp_handlers;
struct rb_root_cached work_root;
int (*class_init)(struct kwork_class *class,
struct perf_session *session);
void (*work_init)(struct perf_kwork *kwork,
struct kwork_class *class,
struct kwork_work *work,
enum kwork_trace_type src_type,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
void (*work_name)(struct kwork_work *work,
char *buf, int len);
};
struct trace_kwork_handler {
int (*raise_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
int (*entry_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
int (*exit_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
int (*sched_switch_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
};
struct __top_cpus_runtime {
u64 load;
u64 idle;
u64 irq;
u64 total;
};
struct kwork_top_stat {
DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
struct __top_cpus_runtime *cpus_runtime;
};
struct perf_kwork {
/*
* metadata
*/
struct perf_tool tool;
struct list_head class_list;
struct list_head atom_page_list;
struct list_head sort_list, cmp_id;
struct rb_root_cached sorted_work_root;
const struct trace_kwork_handler *tp_handler;
/*
* profile filters
*/
const char *profile_name;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
const char *time_str;
struct perf_time_interval ptime;
/*
* options for command
*/
bool force;
const char *event_list_str;
enum kwork_report_type report;
/*
* options for subcommand
*/
bool summary;
const char *sort_order;
bool show_callchain;
unsigned int max_stack;
bool use_bpf;
/*
* statistics
*/
u64 timestart;
u64 timeend;
unsigned long nr_events;
unsigned long nr_lost_chunks;
unsigned long nr_lost_events;
u64 all_runtime;
u64 all_count;
u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
/*
* perf kwork top data
*/
struct kwork_top_stat top_stat;
};
struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
struct kwork_class *class,
struct kwork_work *key);
#ifdef HAVE_BPF_SKEL
int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
void perf_kwork__report_cleanup_bpf(void);
void perf_kwork__trace_start(void);
void perf_kwork__trace_finish(void);
#else /* !HAVE_BPF_SKEL */
static inline int
perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
{
return -1;
}
static inline int
perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
{
return -1;
}
static inline void perf_kwork__report_cleanup_bpf(void) {}
static inline void perf_kwork__trace_start(void) {}
static inline void perf_kwork__trace_finish(void) {}
#endif /* HAVE_BPF_SKEL */
#endif /* PERF_UTIL_KWORK_H */