2022-07-29 13:07:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2023-09-06 10:49:00 -07:00
|
|
|
#include "util/cgroup.h"
|
2022-07-29 13:07:55 -07:00
|
|
|
#include "util/debug.h"
|
2022-07-29 13:07:56 -07:00
|
|
|
#include "util/evlist.h"
|
2024-12-19 22:00:08 -08:00
|
|
|
#include "util/hashmap.h"
|
2022-07-29 13:07:55 -07:00
|
|
|
#include "util/machine.h"
|
|
|
|
#include "util/map.h"
|
|
|
|
#include "util/symbol.h"
|
2022-07-29 13:07:56 -07:00
|
|
|
#include "util/target.h"
|
2022-12-09 11:07:25 -08:00
|
|
|
#include "util/thread.h"
|
2022-07-29 13:07:56 -07:00
|
|
|
#include "util/thread_map.h"
|
2022-07-29 13:07:55 -07:00
|
|
|
#include "util/lock-contention.h"
|
|
|
|
#include <linux/zalloc.h>
|
2022-09-11 22:53:12 -07:00
|
|
|
#include <linux/string.h>
|
2025-03-31 23:30:55 -07:00
|
|
|
#include <api/fs/fs.h>
|
2022-07-29 13:07:55 -07:00
|
|
|
#include <bpf/bpf.h>
|
2024-12-19 22:00:07 -08:00
|
|
|
#include <bpf/btf.h>
|
2023-11-18 02:48:57 +00:00
|
|
|
#include <inttypes.h>
|
2022-07-29 13:07:55 -07:00
|
|
|
|
|
|
|
#include "bpf_skel/lock_contention.skel.h"
|
2022-12-09 11:07:24 -08:00
|
|
|
#include "bpf_skel/lock_data.h"
|
2022-07-29 13:07:55 -07:00
|
|
|
|
|
|
|
static struct lock_contention_bpf *skel;
|
2024-12-19 22:00:07 -08:00
|
|
|
static bool has_slab_iter;
|
2024-12-19 22:00:08 -08:00
|
|
|
static struct hashmap slab_hash;
|
|
|
|
|
|
|
|
static size_t slab_cache_hash(long key, void *ctx __maybe_unused)
|
|
|
|
{
|
|
|
|
return key;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
|
|
|
|
{
|
|
|
|
return key1 == key2;
|
|
|
|
}
|
2024-12-19 22:00:07 -08:00
|
|
|
|
|
|
|
static void check_slab_cache_iter(struct lock_contention *con)
|
|
|
|
{
|
|
|
|
s32 ret;
|
|
|
|
|
2024-12-19 22:00:08 -08:00
|
|
|
hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
|
|
|
|
|
2025-03-31 23:30:55 -07:00
|
|
|
con->btf = btf__load_vmlinux_btf();
|
|
|
|
if (con->btf == NULL) {
|
2024-12-19 22:00:07 -08:00
|
|
|
pr_debug("BTF loading failed: %s\n", strerror(errno));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-03-31 23:30:55 -07:00
|
|
|
ret = btf__find_by_name_kind(con->btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
|
2024-12-19 22:00:07 -08:00
|
|
|
if (ret < 0) {
|
|
|
|
bpf_program__set_autoload(skel->progs.slab_cache_iter, false);
|
|
|
|
pr_debug("slab cache iterator is not available: %d\n", ret);
|
2025-03-31 23:30:55 -07:00
|
|
|
return;
|
2024-12-19 22:00:07 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
has_slab_iter = true;
|
|
|
|
|
|
|
|
bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void run_slab_cache_iter(void)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
char buf[256];
|
2024-12-19 22:00:08 -08:00
|
|
|
long key, *prev_key;
|
2024-12-19 22:00:07 -08:00
|
|
|
|
|
|
|
if (!has_slab_iter)
|
|
|
|
return;
|
|
|
|
|
|
|
|
fd = bpf_iter_create(bpf_link__fd(skel->links.slab_cache_iter));
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_debug("cannot create slab cache iter: %d\n", fd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This will run the bpf program */
|
|
|
|
while (read(fd, buf, sizeof(buf)) > 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
close(fd);
|
2024-12-19 22:00:08 -08:00
|
|
|
|
|
|
|
/* Read the slab cache map and build a hash with IDs */
|
|
|
|
fd = bpf_map__fd(skel->maps.slab_caches);
|
|
|
|
prev_key = NULL;
|
|
|
|
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
|
|
|
|
struct slab_cache_data *data;
|
|
|
|
|
|
|
|
data = malloc(sizeof(*data));
|
|
|
|
if (data == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (bpf_map_lookup_elem(fd, &key, data) < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
hashmap__add(&slab_hash, data->id, data);
|
|
|
|
prev_key = &key;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void exit_slab_cache_iter(void)
|
|
|
|
{
|
|
|
|
struct hashmap_entry *cur;
|
|
|
|
unsigned bkt;
|
|
|
|
|
|
|
|
hashmap__for_each_entry(&slab_hash, cur, bkt)
|
|
|
|
free(cur->pvalue);
|
|
|
|
|
|
|
|
hashmap__clear(&slab_hash);
|
2024-12-19 22:00:07 -08:00
|
|
|
}
|
2022-07-29 13:07:55 -07:00
|
|
|
|
2025-03-31 23:30:55 -07:00
|
|
|
static void init_numa_data(struct lock_contention *con)
|
|
|
|
{
|
|
|
|
struct symbol *sym;
|
|
|
|
struct map *kmap;
|
|
|
|
char *buf = NULL, *p;
|
|
|
|
size_t len;
|
|
|
|
long last = -1;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 'struct zone' is embedded in 'struct pglist_data' as an array.
|
|
|
|
* As we may not have full information of the struct zone in the
|
|
|
|
* (fake) vmlinux.h, let's get the actual size from BTF.
|
|
|
|
*/
|
|
|
|
ret = btf__find_by_name_kind(con->btf, "zone", BTF_KIND_STRUCT);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_debug("cannot get type of struct zone: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btf__resolve_size(con->btf, ret);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_debug("cannot get size of struct zone: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
skel->rodata->sizeof_zone = ret;
|
|
|
|
|
|
|
|
/* UMA system doesn't have 'node_data[]' - just use contig_page_data. */
|
|
|
|
sym = machine__find_kernel_symbol_by_name(con->machine,
|
|
|
|
"contig_page_data",
|
|
|
|
&kmap);
|
|
|
|
if (sym) {
|
|
|
|
skel->rodata->contig_page_data_addr = map__unmap_ip(kmap, sym->start);
|
|
|
|
map__put(kmap);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The 'node_data' is an array of pointers to struct pglist_data.
|
|
|
|
* It needs to follow the pointer for each node in BPF to get the
|
|
|
|
* address of struct pglist_data and its zones.
|
|
|
|
*/
|
|
|
|
sym = machine__find_kernel_symbol_by_name(con->machine,
|
|
|
|
"node_data",
|
|
|
|
&kmap);
|
|
|
|
if (sym == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
skel->rodata->node_data_addr = map__unmap_ip(kmap, sym->start);
|
|
|
|
map__put(kmap);
|
|
|
|
|
|
|
|
/* get the number of online nodes using the last node number + 1 */
|
|
|
|
ret = sysfs__read_str("devices/system/node/online", &buf, &len);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_debug("failed to read online node: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = buf;
|
|
|
|
while (p && *p) {
|
|
|
|
last = strtol(p, &p, 0);
|
|
|
|
|
|
|
|
if (p && (*p == ',' || *p == '-' || *p == '\n'))
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
skel->rodata->nr_nodes = last + 1;
|
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
|
2022-08-02 12:10:02 -07:00
|
|
|
int lock_contention_prepare(struct lock_contention *con)
|
2022-07-29 13:07:55 -07:00
|
|
|
{
|
2022-07-29 13:07:56 -07:00
|
|
|
int i, fd;
|
2024-12-19 22:00:09 -08:00
|
|
|
int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1, nslabs = 1;
|
2022-08-02 12:10:02 -07:00
|
|
|
struct evlist *evlist = con->evlist;
|
|
|
|
struct target *target = con->target;
|
2022-07-29 13:07:56 -07:00
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
skel = lock_contention_bpf__open();
|
|
|
|
if (!skel) {
|
|
|
|
pr_err("Failed to open lock-contention BPF skeleton\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-09-11 22:53:13 -07:00
|
|
|
bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
|
2022-08-02 12:10:03 -07:00
|
|
|
bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
|
2022-11-18 11:01:09 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
|
2022-08-02 12:10:03 -07:00
|
|
|
|
2023-02-02 18:13:24 -08:00
|
|
|
if (con->aggr_mode == LOCK_AGGR_TASK)
|
2022-12-09 11:07:25 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
|
2023-02-02 18:13:24 -08:00
|
|
|
else
|
2022-12-09 11:07:25 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.task_data, 1);
|
2023-02-02 18:13:24 -08:00
|
|
|
|
2025-02-26 16:28:53 -08:00
|
|
|
if (con->save_callstack) {
|
2022-12-09 11:07:25 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
|
2025-02-26 16:28:53 -08:00
|
|
|
if (con->owner) {
|
|
|
|
bpf_map__set_value_size(skel->maps.stack_buf, con->max_stack * sizeof(u64));
|
|
|
|
bpf_map__set_key_size(skel->maps.owner_stacks,
|
|
|
|
con->max_stack * sizeof(u64));
|
|
|
|
bpf_map__set_max_entries(skel->maps.owner_stacks, con->map_nr_entries);
|
|
|
|
bpf_map__set_max_entries(skel->maps.owner_data, con->map_nr_entries);
|
|
|
|
bpf_map__set_max_entries(skel->maps.owner_stat, con->map_nr_entries);
|
|
|
|
skel->rodata->max_stack = con->max_stack;
|
|
|
|
}
|
|
|
|
} else {
|
2023-02-02 18:13:24 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.stacks, 1);
|
2025-02-26 16:28:53 -08:00
|
|
|
}
|
2022-12-09 11:07:25 -08:00
|
|
|
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
if (target__has_cpu(target)) {
|
|
|
|
skel->rodata->has_cpu = 1;
|
2022-07-29 13:07:56 -07:00
|
|
|
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
}
|
|
|
|
if (target__has_task(target)) {
|
|
|
|
skel->rodata->has_task = 1;
|
2022-07-29 13:07:56 -07:00
|
|
|
ntasks = perf_thread_map__nr(evlist->core.threads);
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
}
|
|
|
|
if (con->filters->nr_types) {
|
|
|
|
skel->rodata->has_type = 1;
|
2022-12-19 12:17:29 -08:00
|
|
|
ntypes = con->filters->nr_types;
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
}
|
|
|
|
if (con->filters->nr_cgrps) {
|
|
|
|
skel->rodata->has_cgroup = 1;
|
2023-09-06 10:49:02 -07:00
|
|
|
ncgrps = con->filters->nr_cgrps;
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
}
|
2022-07-29 13:07:56 -07:00
|
|
|
|
2022-12-19 12:17:31 -08:00
|
|
|
/* resolve lock name filters to addr */
|
|
|
|
if (con->filters->nr_syms) {
|
|
|
|
struct symbol *sym;
|
|
|
|
struct map *kmap;
|
|
|
|
unsigned long *addrs;
|
|
|
|
|
|
|
|
for (i = 0; i < con->filters->nr_syms; i++) {
|
|
|
|
sym = machine__find_kernel_symbol_by_name(con->machine,
|
|
|
|
con->filters->syms[i],
|
|
|
|
&kmap);
|
|
|
|
if (sym == NULL) {
|
|
|
|
pr_warning("ignore unknown symbol: %s\n",
|
|
|
|
con->filters->syms[i]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
addrs = realloc(con->filters->addrs,
|
|
|
|
(con->filters->nr_addrs + 1) * sizeof(*addrs));
|
|
|
|
if (addrs == NULL) {
|
|
|
|
pr_warning("memory allocation failure\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-04-04 13:59:44 -07:00
|
|
|
addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
|
2022-12-19 12:17:31 -08:00
|
|
|
con->filters->addrs = addrs;
|
|
|
|
}
|
|
|
|
naddrs = con->filters->nr_addrs;
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
skel->rodata->has_addr = 1;
|
2022-12-19 12:17:31 -08:00
|
|
|
}
|
|
|
|
|
2025-05-09 10:19:50 -07:00
|
|
|
/* resolve lock name in delays */
|
|
|
|
if (con->nr_delays) {
|
|
|
|
struct symbol *sym;
|
|
|
|
struct map *kmap;
|
|
|
|
|
|
|
|
for (i = 0; i < con->nr_delays; i++) {
|
|
|
|
sym = machine__find_kernel_symbol_by_name(con->machine,
|
|
|
|
con->delays[i].sym,
|
|
|
|
&kmap);
|
|
|
|
if (sym == NULL) {
|
|
|
|
pr_warning("ignore unknown symbol: %s\n",
|
|
|
|
con->delays[i].sym);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
con->delays[i].addr = map__unmap_ip(kmap, sym->start);
|
|
|
|
}
|
|
|
|
skel->rodata->lock_delay = 1;
|
|
|
|
bpf_map__set_max_entries(skel->maps.lock_delays, con->nr_delays);
|
|
|
|
}
|
|
|
|
|
2022-07-29 13:07:56 -07:00
|
|
|
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
|
|
|
|
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
|
2022-12-19 12:17:29 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
|
2022-12-19 12:17:31 -08:00
|
|
|
bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
|
2023-09-06 10:49:02 -07:00
|
|
|
bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
|
2022-07-29 13:07:56 -07:00
|
|
|
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
skel->rodata->stack_skip = con->stack_skip;
|
|
|
|
skel->rodata->aggr_mode = con->aggr_mode;
|
|
|
|
skel->rodata->needs_callstack = con->save_callstack;
|
|
|
|
skel->rodata->lock_owner = con->owner;
|
|
|
|
|
|
|
|
if (con->aggr_mode == LOCK_AGGR_CGROUP || con->filters->nr_cgrps) {
|
|
|
|
if (cgroup_is_v2("perf_event"))
|
|
|
|
skel->rodata->use_cgroup_v2 = 1;
|
|
|
|
}
|
|
|
|
|
2024-12-19 22:00:07 -08:00
|
|
|
check_slab_cache_iter(con);
|
|
|
|
|
2024-12-19 22:00:09 -08:00
|
|
|
if (con->filters->nr_slabs && has_slab_iter) {
|
|
|
|
skel->rodata->has_slab = 1;
|
|
|
|
nslabs = con->filters->nr_slabs;
|
|
|
|
}
|
|
|
|
|
|
|
|
bpf_map__set_max_entries(skel->maps.slab_filter, nslabs);
|
|
|
|
|
2025-03-31 23:30:55 -07:00
|
|
|
init_numa_data(con);
|
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
if (lock_contention_bpf__load(skel) < 0) {
|
|
|
|
pr_err("Failed to load lock-contention BPF skeleton\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-07-29 13:07:56 -07:00
|
|
|
if (target__has_cpu(target)) {
|
|
|
|
u32 cpu;
|
|
|
|
u8 val = 1;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.cpu_filter);
|
|
|
|
|
|
|
|
for (i = 0; i < ncpus; i++) {
|
|
|
|
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
|
|
|
|
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (target__has_task(target)) {
|
|
|
|
u32 pid;
|
|
|
|
u8 val = 1;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.task_filter);
|
|
|
|
|
|
|
|
for (i = 0; i < ntasks; i++) {
|
|
|
|
pid = perf_thread_map__pid(evlist->core.threads, i);
|
|
|
|
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (target__none(target) && evlist->workload.pid > 0) {
|
|
|
|
u32 pid = evlist->workload.pid;
|
|
|
|
u8 val = 1;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.task_filter);
|
|
|
|
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
|
|
|
|
}
|
|
|
|
|
2022-12-19 12:17:29 -08:00
|
|
|
if (con->filters->nr_types) {
|
|
|
|
u8 val = 1;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.type_filter);
|
|
|
|
|
|
|
|
for (i = 0; i < con->filters->nr_types; i++)
|
|
|
|
bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
|
|
|
|
}
|
|
|
|
|
2022-12-19 12:17:31 -08:00
|
|
|
if (con->filters->nr_addrs) {
|
|
|
|
u8 val = 1;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.addr_filter);
|
|
|
|
|
|
|
|
for (i = 0; i < con->filters->nr_addrs; i++)
|
|
|
|
bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
|
|
|
|
}
|
|
|
|
|
2023-09-06 10:49:02 -07:00
|
|
|
if (con->filters->nr_cgrps) {
|
|
|
|
u8 val = 1;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.cgroup_filter);
|
|
|
|
|
|
|
|
for (i = 0; i < con->filters->nr_cgrps; i++)
|
|
|
|
bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
|
|
|
|
}
|
|
|
|
|
2025-05-09 10:19:50 -07:00
|
|
|
if (con->nr_delays) {
|
|
|
|
fd = bpf_map__fd(skel->maps.lock_delays);
|
|
|
|
|
|
|
|
for (i = 0; i < con->nr_delays; i++)
|
|
|
|
bpf_map_update_elem(fd, &con->delays[i].addr, &con->delays[i].time, BPF_ANY);
|
|
|
|
}
|
|
|
|
|
perf lock contention: Constify control data for BPF
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf lock contention --use-bpf
contended total wait max wait avg wait type caller
5 31.57 us 14.93 us 6.31 us mutex btrfs_delayed_update_inode+0x43
1 16.91 us 16.91 us 16.91 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 15.13 us 15.13 us 15.13 us spinlock btrfs_getattr+0xd1
1 6.65 us 6.65 us 6.65 us rwsem:R btrfs_tree_read_lock_nested+0x1b
1 4.34 us 4.34 us 4.34 us spinlock process_one_work+0x1a9
root@x1:~#
root@x1:~# perf trace -e bpf --max-events 10 perf lock contention --use-bpf
0.000 ( 0.013 ms): :2948281/2948281 bpf(cmd: 36, uattr: 0x7ffd5f12d730, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.024 ( 0.120 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d460, size: 148) = 16
0.158 ( 0.034 ms): :2948281/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d520, size: 148) = 16
26.653 ( 0.154 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d3d0, size: 148) = 16
26.825 ( 0.014 ms): perf/2948281 bpf(uattr: 0x7ffd5f12d580, size: 80) = 16
87.924 ( 0.038 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d400, size: 40) = 16
87.988 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d470, size: 40) = 16
88.019 ( 0.006 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d250, size: 40) = 16
88.029 ( 0.172 ms): perf/2948281 bpf(cmd: PROG_LOAD, uattr: 0x7ffd5f12d320, size: 148) = 17
88.217 ( 0.005 ms): perf/2948281 bpf(cmd: BTF_LOAD, uattr: 0x7ffd5f12d4d0, size: 40) = 16
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-09-02 13:05:14 -07:00
|
|
|
if (con->aggr_mode == LOCK_AGGR_CGROUP)
|
2023-09-06 10:49:00 -07:00
|
|
|
read_all_cgroups(&con->cgroups);
|
|
|
|
|
2023-03-13 13:48:24 -07:00
|
|
|
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
|
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
lock_contention_bpf__attach(skel);
|
2024-12-19 22:00:09 -08:00
|
|
|
|
|
|
|
/* run the slab iterator after attaching */
|
|
|
|
run_slab_cache_iter();
|
|
|
|
|
|
|
|
if (con->filters->nr_slabs) {
|
|
|
|
u8 val = 1;
|
|
|
|
int cache_fd;
|
|
|
|
long key, *prev_key;
|
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.slab_filter);
|
|
|
|
|
|
|
|
/* Read the slab cache map and build a hash with its address */
|
|
|
|
cache_fd = bpf_map__fd(skel->maps.slab_caches);
|
|
|
|
prev_key = NULL;
|
|
|
|
while (!bpf_map_get_next_key(cache_fd, prev_key, &key)) {
|
|
|
|
struct slab_cache_data data;
|
|
|
|
|
|
|
|
if (bpf_map_lookup_elem(cache_fd, &key, &data) < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
for (i = 0; i < con->filters->nr_slabs; i++) {
|
|
|
|
if (!strcmp(con->filters->slabs[i], data.name)) {
|
|
|
|
bpf_map_update_elem(fd, &key, &val, BPF_ANY);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
prev_key = &key;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-27 21:33:35 -08:00
|
|
|
/*
|
|
|
|
* Run the BPF program directly using BPF_PROG_TEST_RUN to update the end
|
|
|
|
* timestamp in ktime so that it can calculate delta easily.
|
|
|
|
*/
|
|
|
|
static void mark_end_timestamp(void)
|
|
|
|
{
|
|
|
|
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
|
|
.flags = BPF_F_TEST_RUN_ON_CPU,
|
|
|
|
);
|
|
|
|
int prog_fd = bpf_program__fd(skel->progs.end_timestamp);
|
|
|
|
|
|
|
|
bpf_prog_test_run_opts(prog_fd, &opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_lock_stat(int map_fd, int pid, u64 end_ts,
|
|
|
|
enum lock_aggr_mode aggr_mode,
|
|
|
|
struct tstamp_data *ts_data)
|
|
|
|
{
|
|
|
|
u64 delta;
|
|
|
|
struct contention_key stat_key = {};
|
|
|
|
struct contention_data stat_data;
|
|
|
|
|
|
|
|
if (ts_data->timestamp >= end_ts)
|
|
|
|
return;
|
|
|
|
|
|
|
|
delta = end_ts - ts_data->timestamp;
|
|
|
|
|
|
|
|
switch (aggr_mode) {
|
|
|
|
case LOCK_AGGR_CALLER:
|
|
|
|
stat_key.stack_id = ts_data->stack_id;
|
|
|
|
break;
|
|
|
|
case LOCK_AGGR_TASK:
|
|
|
|
stat_key.pid = pid;
|
|
|
|
break;
|
|
|
|
case LOCK_AGGR_ADDR:
|
|
|
|
stat_key.lock_addr_or_cgroup = ts_data->lock;
|
|
|
|
break;
|
|
|
|
case LOCK_AGGR_CGROUP:
|
|
|
|
/* TODO */
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
stat_data.total_time += delta;
|
|
|
|
stat_data.count++;
|
|
|
|
|
|
|
|
if (delta > stat_data.max_time)
|
|
|
|
stat_data.max_time = delta;
|
|
|
|
if (delta < stat_data.min_time)
|
|
|
|
stat_data.min_time = delta;
|
|
|
|
|
|
|
|
bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Account entries in the tstamp map (which didn't see the corresponding
|
|
|
|
* lock:contention_end tracepoint) using end_ts.
|
|
|
|
*/
|
|
|
|
static void account_end_timestamp(struct lock_contention *con)
|
|
|
|
{
|
|
|
|
int ts_fd, stat_fd;
|
|
|
|
int *prev_key, key;
|
|
|
|
u64 end_ts = skel->bss->end_ts;
|
|
|
|
int total_cpus;
|
|
|
|
enum lock_aggr_mode aggr_mode = con->aggr_mode;
|
|
|
|
struct tstamp_data ts_data, *cpu_data;
|
|
|
|
|
|
|
|
/* Iterate per-task tstamp map (key = TID) */
|
|
|
|
ts_fd = bpf_map__fd(skel->maps.tstamp);
|
|
|
|
stat_fd = bpf_map__fd(skel->maps.lock_stat);
|
|
|
|
|
|
|
|
prev_key = NULL;
|
|
|
|
while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
|
|
|
|
if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) {
|
|
|
|
int pid = key;
|
|
|
|
|
|
|
|
if (aggr_mode == LOCK_AGGR_TASK && con->owner)
|
|
|
|
pid = ts_data.flags;
|
|
|
|
|
|
|
|
update_lock_stat(stat_fd, pid, end_ts, aggr_mode,
|
|
|
|
&ts_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_key = &key;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now it'll check per-cpu tstamp map which doesn't have TID. */
|
|
|
|
if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP)
|
|
|
|
return;
|
|
|
|
|
|
|
|
total_cpus = cpu__max_cpu().cpu;
|
|
|
|
ts_fd = bpf_map__fd(skel->maps.tstamp_cpu);
|
|
|
|
|
|
|
|
cpu_data = calloc(total_cpus, sizeof(*cpu_data));
|
|
|
|
if (cpu_data == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
prev_key = NULL;
|
|
|
|
while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
|
|
|
|
if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
for (int i = 0; i < total_cpus; i++) {
|
2024-08-27 22:29:53 -07:00
|
|
|
if (cpu_data[i].lock == 0)
|
|
|
|
continue;
|
|
|
|
|
2024-02-27 21:33:35 -08:00
|
|
|
update_lock_stat(stat_fd, -1, end_ts, aggr_mode,
|
|
|
|
&cpu_data[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
next:
|
|
|
|
prev_key = &key;
|
|
|
|
}
|
|
|
|
free(cpu_data);
|
|
|
|
}
|
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
int lock_contention_start(void)
|
|
|
|
{
|
|
|
|
skel->bss->enabled = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lock_contention_stop(void)
|
|
|
|
{
|
|
|
|
skel->bss->enabled = 0;
|
2024-02-27 21:33:35 -08:00
|
|
|
mark_end_timestamp();
|
2022-07-29 13:07:55 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-02 18:13:22 -08:00
|
|
|
static const char *lock_contention_get_name(struct lock_contention *con,
|
|
|
|
struct contention_key *key,
|
2023-03-13 13:48:23 -07:00
|
|
|
u64 *stack_trace, u32 flags)
|
2023-02-02 18:13:22 -08:00
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
u64 addr;
|
|
|
|
static char name_buf[KSYM_NAME_LEN];
|
|
|
|
struct symbol *sym;
|
|
|
|
struct map *kmap;
|
|
|
|
struct machine *machine = con->machine;
|
|
|
|
|
|
|
|
if (con->aggr_mode == LOCK_AGGR_TASK) {
|
|
|
|
struct contention_task_data task;
|
2023-02-02 18:13:24 -08:00
|
|
|
int pid = key->pid;
|
2023-02-02 18:13:22 -08:00
|
|
|
int task_fd = bpf_map__fd(skel->maps.task_data);
|
|
|
|
|
|
|
|
/* do not update idle comm which contains CPU number */
|
|
|
|
if (pid) {
|
2024-02-29 21:36:42 -08:00
|
|
|
struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid);
|
2023-02-02 18:13:22 -08:00
|
|
|
|
2025-02-26 16:28:56 -08:00
|
|
|
if (t != NULL &&
|
|
|
|
!bpf_map_lookup_elem(task_fd, &pid, &task) &&
|
|
|
|
thread__set_comm(t, task.comm, /*timestamp=*/0)) {
|
|
|
|
snprintf(name_buf, sizeof(name_buf), "%s", task.comm);
|
|
|
|
return name_buf;
|
|
|
|
}
|
2023-02-02 18:13:22 -08:00
|
|
|
}
|
2025-02-26 16:28:56 -08:00
|
|
|
return "";
|
2023-02-02 18:13:22 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (con->aggr_mode == LOCK_AGGR_ADDR) {
|
2023-03-13 13:48:24 -07:00
|
|
|
int lock_fd = bpf_map__fd(skel->maps.lock_syms);
|
2024-12-19 22:00:08 -08:00
|
|
|
struct slab_cache_data *slab_data;
|
2023-03-13 13:48:24 -07:00
|
|
|
|
|
|
|
/* per-process locks set upper bits of the flags */
|
2023-03-13 13:48:23 -07:00
|
|
|
if (flags & LCD_F_MMAP_LOCK)
|
|
|
|
return "mmap_lock";
|
|
|
|
if (flags & LCD_F_SIGHAND_LOCK)
|
|
|
|
return "siglock";
|
2023-03-13 13:48:24 -07:00
|
|
|
|
|
|
|
/* global locks with symbols */
|
2023-09-06 10:49:01 -07:00
|
|
|
sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
|
2023-02-02 18:13:22 -08:00
|
|
|
if (sym)
|
2023-03-13 13:48:24 -07:00
|
|
|
return sym->name;
|
|
|
|
|
|
|
|
/* try semi-global locks collected separately */
|
2023-09-06 10:49:01 -07:00
|
|
|
if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
|
2023-03-13 13:48:24 -07:00
|
|
|
if (flags == LOCK_CLASS_RQLOCK)
|
|
|
|
return "rq_lock";
|
|
|
|
}
|
|
|
|
|
2025-03-31 23:30:55 -07:00
|
|
|
if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
|
|
|
|
if (flags == LOCK_CLASS_ZONE_LOCK)
|
|
|
|
return "zone_lock";
|
|
|
|
}
|
|
|
|
|
2024-12-19 22:00:08 -08:00
|
|
|
/* look slab_hash for dynamic locks in a slab object */
|
|
|
|
if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
|
|
|
|
snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
|
|
|
|
return name_buf;
|
|
|
|
}
|
|
|
|
|
2023-03-13 13:48:24 -07:00
|
|
|
return "";
|
2023-02-02 18:13:22 -08:00
|
|
|
}
|
|
|
|
|
2023-09-06 10:49:01 -07:00
|
|
|
if (con->aggr_mode == LOCK_AGGR_CGROUP) {
|
|
|
|
u64 cgrp_id = key->lock_addr_or_cgroup;
|
2023-09-06 10:49:00 -07:00
|
|
|
struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
|
|
|
|
|
|
|
|
if (cgrp)
|
|
|
|
return cgrp->name;
|
|
|
|
|
2023-11-18 02:48:57 +00:00
|
|
|
snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
|
2023-09-06 10:49:00 -07:00
|
|
|
return name_buf;
|
|
|
|
}
|
|
|
|
|
2023-02-02 18:13:22 -08:00
|
|
|
/* LOCK_AGGR_CALLER: skip lock internal functions */
|
|
|
|
while (machine__is_lock_function(machine, stack_trace[idx]) &&
|
|
|
|
idx < con->max_stack - 1)
|
|
|
|
idx++;
|
|
|
|
|
|
|
|
addr = stack_trace[idx];
|
|
|
|
sym = machine__find_kernel_symbol(machine, addr, &kmap);
|
|
|
|
|
|
|
|
if (sym) {
|
|
|
|
unsigned long offset;
|
|
|
|
|
2023-04-04 13:59:44 -07:00
|
|
|
offset = map__map_ip(kmap, addr) - sym->start;
|
2023-02-02 18:13:22 -08:00
|
|
|
|
|
|
|
if (offset == 0)
|
|
|
|
return sym->name;
|
|
|
|
|
|
|
|
snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
|
|
|
|
} else {
|
|
|
|
snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return name_buf;
|
|
|
|
}
|
|
|
|
|
2025-02-26 16:28:56 -08:00
|
|
|
struct lock_stat *pop_owner_stack_trace(struct lock_contention *con)
|
|
|
|
{
|
|
|
|
int stacks_fd, stat_fd;
|
|
|
|
u64 *stack_trace = NULL;
|
|
|
|
s32 stack_id;
|
|
|
|
struct contention_key ckey = {};
|
|
|
|
struct contention_data cdata = {};
|
|
|
|
size_t stack_size = con->max_stack * sizeof(*stack_trace);
|
|
|
|
struct lock_stat *st = NULL;
|
|
|
|
|
|
|
|
stacks_fd = bpf_map__fd(skel->maps.owner_stacks);
|
|
|
|
stat_fd = bpf_map__fd(skel->maps.owner_stat);
|
|
|
|
if (!stacks_fd || !stat_fd)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
stack_trace = zalloc(stack_size);
|
|
|
|
if (stack_trace == NULL)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
if (bpf_map_get_next_key(stacks_fd, NULL, stack_trace))
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
bpf_map_lookup_elem(stacks_fd, stack_trace, &stack_id);
|
|
|
|
ckey.stack_id = stack_id;
|
|
|
|
bpf_map_lookup_elem(stat_fd, &ckey, &cdata);
|
|
|
|
|
|
|
|
st = zalloc(sizeof(struct lock_stat));
|
|
|
|
if (!st)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
st->name = strdup(stack_trace[0] ? lock_contention_get_name(con, NULL, stack_trace, 0) :
|
|
|
|
"unknown");
|
|
|
|
if (!st->name)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
st->flags = cdata.flags;
|
|
|
|
st->nr_contended = cdata.count;
|
|
|
|
st->wait_time_total = cdata.total_time;
|
|
|
|
st->wait_time_max = cdata.max_time;
|
|
|
|
st->wait_time_min = cdata.min_time;
|
|
|
|
st->callstack = stack_trace;
|
|
|
|
|
|
|
|
if (cdata.count)
|
|
|
|
st->avg_wait_time = cdata.total_time / cdata.count;
|
|
|
|
|
|
|
|
bpf_map_delete_elem(stacks_fd, stack_trace);
|
|
|
|
bpf_map_delete_elem(stat_fd, &ckey);
|
|
|
|
|
|
|
|
return st;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
free(stack_trace);
|
|
|
|
free(st);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-08-02 12:10:02 -07:00
|
|
|
int lock_contention_read(struct lock_contention *con)
|
2022-07-29 13:07:55 -07:00
|
|
|
{
|
2023-02-02 18:13:22 -08:00
|
|
|
int fd, stack, err = 0;
|
2023-03-23 17:19:22 -07:00
|
|
|
struct contention_key *prev_key, key = {};
|
2022-12-09 11:07:24 -08:00
|
|
|
struct contention_data data = {};
|
2022-10-28 11:01:27 -07:00
|
|
|
struct lock_stat *st = NULL;
|
2022-08-02 12:10:02 -07:00
|
|
|
struct machine *machine = con->machine;
|
2022-10-28 11:01:27 -07:00
|
|
|
u64 *stack_trace;
|
|
|
|
size_t stack_size = con->max_stack * sizeof(*stack_trace);
|
2022-07-29 13:07:55 -07:00
|
|
|
|
|
|
|
fd = bpf_map__fd(skel->maps.lock_stat);
|
|
|
|
stack = bpf_map__fd(skel->maps.stacks);
|
|
|
|
|
2023-03-27 15:57:11 -07:00
|
|
|
con->fails.task = skel->bss->task_fail;
|
|
|
|
con->fails.stack = skel->bss->stack_fail;
|
|
|
|
con->fails.time = skel->bss->time_fail;
|
2023-04-06 14:06:08 -07:00
|
|
|
con->fails.data = skel->bss->data_fail;
|
2022-08-02 12:10:04 -07:00
|
|
|
|
2022-10-28 11:01:27 -07:00
|
|
|
stack_trace = zalloc(stack_size);
|
|
|
|
if (stack_trace == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2024-02-27 21:33:35 -08:00
|
|
|
account_end_timestamp(con);
|
|
|
|
|
2022-12-09 11:07:25 -08:00
|
|
|
if (con->aggr_mode == LOCK_AGGR_TASK) {
|
2024-02-29 21:36:42 -08:00
|
|
|
struct thread *idle = machine__findnew_thread(machine,
|
2022-12-09 11:07:25 -08:00
|
|
|
/*pid=*/0,
|
|
|
|
/*tid=*/0);
|
|
|
|
thread__set_comm(idle, "swapper", /*timestamp=*/0);
|
|
|
|
}
|
|
|
|
|
2023-03-13 13:48:24 -07:00
|
|
|
if (con->aggr_mode == LOCK_AGGR_ADDR) {
|
|
|
|
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
|
|
.flags = BPF_F_TEST_RUN_ON_CPU,
|
|
|
|
);
|
|
|
|
int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
|
|
|
|
|
|
|
|
bpf_prog_test_run_opts(prog_fd, &opts);
|
|
|
|
}
|
|
|
|
|
2022-12-09 11:07:26 -08:00
|
|
|
/* make sure it loads the kernel map */
|
2023-12-06 17:16:53 -08:00
|
|
|
maps__load_first(machine->kmaps);
|
2022-12-09 11:07:26 -08:00
|
|
|
|
2022-12-09 11:07:24 -08:00
|
|
|
prev_key = NULL;
|
|
|
|
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
|
2023-02-02 18:13:24 -08:00
|
|
|
s64 ls_key;
|
2023-02-02 18:13:23 -08:00
|
|
|
const char *name;
|
2022-07-29 13:07:55 -07:00
|
|
|
|
2022-10-28 11:01:27 -07:00
|
|
|
/* to handle errors in the loop body */
|
|
|
|
err = -1;
|
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
bpf_map_lookup_elem(fd, &key, &data);
|
2023-02-02 18:13:23 -08:00
|
|
|
if (con->save_callstack) {
|
2023-02-02 18:13:24 -08:00
|
|
|
bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
|
|
|
|
|
2024-11-18 17:16:37 -08:00
|
|
|
if (!match_callstack_filter(machine, stack_trace, con->max_stack)) {
|
2023-04-06 14:06:09 -07:00
|
|
|
con->nr_filtered += data.count;
|
2023-02-02 18:13:24 -08:00
|
|
|
goto next;
|
2023-04-06 14:06:09 -07:00
|
|
|
}
|
2023-02-02 18:13:23 -08:00
|
|
|
}
|
|
|
|
|
2023-02-02 18:13:24 -08:00
|
|
|
switch (con->aggr_mode) {
|
|
|
|
case LOCK_AGGR_CALLER:
|
|
|
|
ls_key = key.stack_id;
|
|
|
|
break;
|
|
|
|
case LOCK_AGGR_TASK:
|
|
|
|
ls_key = key.pid;
|
|
|
|
break;
|
|
|
|
case LOCK_AGGR_ADDR:
|
2023-09-06 10:49:01 -07:00
|
|
|
case LOCK_AGGR_CGROUP:
|
|
|
|
ls_key = key.lock_addr_or_cgroup;
|
2023-02-02 18:13:24 -08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
st = lock_stat_find(ls_key);
|
2023-02-02 18:13:23 -08:00
|
|
|
if (st != NULL) {
|
|
|
|
st->wait_time_total += data.total_time;
|
|
|
|
if (st->wait_time_max < data.max_time)
|
|
|
|
st->wait_time_max = data.max_time;
|
|
|
|
if (st->wait_time_min > data.min_time)
|
|
|
|
st->wait_time_min = data.min_time;
|
|
|
|
|
|
|
|
st->nr_contended += data.count;
|
|
|
|
if (st->nr_contended)
|
|
|
|
st->avg_wait_time = st->wait_time_total / st->nr_contended;
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2023-03-13 13:48:23 -07:00
|
|
|
name = lock_contention_get_name(con, &key, stack_trace, data.flags);
|
2023-02-02 18:13:24 -08:00
|
|
|
st = lock_stat_findnew(ls_key, name, data.flags);
|
2022-07-29 13:07:55 -07:00
|
|
|
if (st == NULL)
|
2022-10-28 11:01:27 -07:00
|
|
|
break;
|
2022-07-29 13:07:55 -07:00
|
|
|
|
|
|
|
st->nr_contended = data.count;
|
|
|
|
st->wait_time_total = data.total_time;
|
|
|
|
st->wait_time_max = data.max_time;
|
|
|
|
st->wait_time_min = data.min_time;
|
|
|
|
|
|
|
|
if (data.count)
|
|
|
|
st->avg_wait_time = data.total_time / data.count;
|
|
|
|
|
2023-04-06 14:06:10 -07:00
|
|
|
if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
|
2022-10-28 11:01:27 -07:00
|
|
|
st->callstack = memdup(stack_trace, stack_size);
|
|
|
|
if (st->callstack == NULL)
|
|
|
|
break;
|
2022-09-11 22:53:12 -07:00
|
|
|
}
|
2023-02-02 18:13:22 -08:00
|
|
|
|
2023-02-02 18:13:23 -08:00
|
|
|
next:
|
2022-12-09 11:07:24 -08:00
|
|
|
prev_key = &key;
|
2022-10-28 11:01:27 -07:00
|
|
|
|
2023-02-02 18:13:23 -08:00
|
|
|
/* we're fine now, reset the error */
|
2022-10-28 11:01:27 -07:00
|
|
|
err = 0;
|
2022-07-29 13:07:55 -07:00
|
|
|
}
|
|
|
|
|
2022-10-28 11:01:27 -07:00
|
|
|
free(stack_trace);
|
|
|
|
|
|
|
|
return err;
|
2022-07-29 13:07:55 -07:00
|
|
|
}
|
|
|
|
|
2023-09-06 10:49:00 -07:00
|
|
|
int lock_contention_finish(struct lock_contention *con)
|
2022-07-29 13:07:55 -07:00
|
|
|
{
|
|
|
|
if (skel) {
|
|
|
|
skel->bss->enabled = 0;
|
|
|
|
lock_contention_bpf__destroy(skel);
|
|
|
|
}
|
|
|
|
|
2023-09-06 10:49:00 -07:00
|
|
|
while (!RB_EMPTY_ROOT(&con->cgroups)) {
|
|
|
|
struct rb_node *node = rb_first(&con->cgroups);
|
|
|
|
struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
|
|
|
|
|
|
|
|
rb_erase(node, &con->cgroups);
|
|
|
|
cgroup__put(cgrp);
|
|
|
|
}
|
|
|
|
|
2024-12-19 22:00:08 -08:00
|
|
|
exit_slab_cache_iter();
|
2025-03-31 23:30:55 -07:00
|
|
|
btf__free(con->btf);
|
2024-12-19 22:00:08 -08:00
|
|
|
|
2022-07-29 13:07:55 -07:00
|
|
|
return 0;
|
|
|
|
}
|