2019-05-29 07:18:02 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-04-04 13:32:20 -03:00
|
|
|
/*
|
|
|
|
* System call table mapper
|
|
|
|
*
|
|
|
|
* (C) 2016 Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "syscalltbl.h"
|
2016-04-04 17:52:18 -03:00
|
|
|
#include <stdlib.h>
|
2017-09-22 22:11:53 +00:00
|
|
|
#include <linux/compiler.h>
|
2020-05-28 17:19:17 -03:00
|
|
|
#include <linux/zalloc.h>
|
2016-04-04 17:52:18 -03:00
|
|
|
|
2018-04-09 18:26:48 +08:00
|
|
|
#ifdef HAVE_SYSCALL_TABLE_SUPPORT
|
2016-04-04 13:32:20 -03:00
|
|
|
#include <string.h>
|
2017-08-31 11:46:49 -03:00
|
|
|
#include "string2.h"
|
2016-04-04 17:52:18 -03:00
|
|
|
|
perf tools: Build syscall table .c header from kernel's syscall_64.tbl
We used libaudit to map ids to syscall names and vice-versa, but that
imposes a delay in supporting new syscalls, having to wait for libaudit
to get those new syscalls on its tables.
To remove that delay, for x86_64 initially, grab a copy of
arch/x86/entry/syscalls/syscall_64.tbl and use it to generate those
tables.
Syscalls currently not available in audit-libs:
# trace -e copy_file_range,membarrier,mlock2,pread64,pwrite64,timerfd_create,userfaultfd
Error: Invalid syscall copy_file_range, membarrier, mlock2, pread64, pwrite64, timerfd_create, userfaultfd
Hint: try 'perf list syscalls:sys_enter_*'
Hint: and: 'man syscalls'
#
With this patch:
# trace -e copy_file_range,membarrier,mlock2,pread64,pwrite64,timerfd_create,userfaultfd
8505.733 ( 0.010 ms): gnome-shell/2519 timerfd_create(flags: 524288) = 36
8506.688 ( 0.005 ms): gnome-shell/2519 timerfd_create(flags: 524288) = 40
30023.097 ( 0.025 ms): qemu-system-x8/24629 pwrite64(fd: 18, buf: 0x7f63ae382000, count: 4096, pos: 529592320) = 4096
31268.712 ( 0.028 ms): qemu-system-x8/24629 pwrite64(fd: 18, buf: 0x7f63afd8b000, count: 4096, pos: 2314133504) = 4096
31268.854 ( 0.016 ms): qemu-system-x8/24629 pwrite64(fd: 18, buf: 0x7f63afda2000, count: 4096, pos: 2314137600) = 4096
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-51xfjbxevdsucmnbc4ka5r88@git.kernel.org
[ Added make dep for 'prepare' in 'LIBPERF_IN', fix by Wang Nan to fix parallell build ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-04-04 19:05:36 -03:00
|
|
|
#if defined(__x86_64__)
|
|
|
|
#include <asm/syscalls_64.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_x86_64;
|
2024-08-30 19:53:47 -03:00
|
|
|
#elif defined(__i386__)
|
|
|
|
#include <asm/syscalls_32.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_x86_MAX_ID;
|
|
|
|
static const char *const *syscalltbl_native = syscalltbl_x86;
|
2017-12-07 09:27:59 +01:00
|
|
|
#elif defined(__s390x__)
|
|
|
|
#include <asm/syscalls_64.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_s390_64;
|
2018-01-29 14:04:17 +05:30
|
|
|
#elif defined(__powerpc64__)
|
|
|
|
#include <asm/syscalls_64.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_64_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_powerpc_64;
|
2018-01-29 14:04:17 +05:30
|
|
|
#elif defined(__powerpc__)
|
|
|
|
#include <asm/syscalls_32.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_powerpc_32;
|
2018-07-06 16:34:54 -05:00
|
|
|
#elif defined(__aarch64__)
|
|
|
|
#include <asm/syscalls.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_arm64;
|
2021-02-04 11:35:24 +08:00
|
|
|
#elif defined(__mips__)
|
|
|
|
#include <asm/syscalls_n64.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_MIPS_N64_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_mips_n64;
|
2023-05-01 17:19:59 +08:00
|
|
|
#elif defined(__loongarch__)
|
|
|
|
#include <asm/syscalls.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_LOONGARCH_MAX_ID;
|
2023-05-30 18:09:57 +08:00
|
|
|
static const char *const *syscalltbl_native = syscalltbl_loongarch;
|
2024-10-24 12:03:51 -07:00
|
|
|
#elif defined(__riscv)
|
|
|
|
#include <asm/syscalls.c>
|
|
|
|
const int syscalltbl_native_max_id = SYSCALLTBL_RISCV_MAX_ID;
|
|
|
|
static const char *const *syscalltbl_native = syscalltbl_riscv;
|
2024-10-22 17:22:36 -03:00
|
|
|
#else
|
|
|
|
const int syscalltbl_native_max_id = 0;
|
|
|
|
static const char *const syscalltbl_native[] = {
|
|
|
|
[0] = "unknown",
|
|
|
|
};
|
perf tools: Build syscall table .c header from kernel's syscall_64.tbl
We used libaudit to map ids to syscall names and vice-versa, but that
imposes a delay in supporting new syscalls, having to wait for libaudit
to get those new syscalls on its tables.
To remove that delay, for x86_64 initially, grab a copy of
arch/x86/entry/syscalls/syscall_64.tbl and use it to generate those
tables.
Syscalls currently not available in audit-libs:
# trace -e copy_file_range,membarrier,mlock2,pread64,pwrite64,timerfd_create,userfaultfd
Error: Invalid syscall copy_file_range, membarrier, mlock2, pread64, pwrite64, timerfd_create, userfaultfd
Hint: try 'perf list syscalls:sys_enter_*'
Hint: and: 'man syscalls'
#
With this patch:
# trace -e copy_file_range,membarrier,mlock2,pread64,pwrite64,timerfd_create,userfaultfd
8505.733 ( 0.010 ms): gnome-shell/2519 timerfd_create(flags: 524288) = 36
8506.688 ( 0.005 ms): gnome-shell/2519 timerfd_create(flags: 524288) = 40
30023.097 ( 0.025 ms): qemu-system-x8/24629 pwrite64(fd: 18, buf: 0x7f63ae382000, count: 4096, pos: 529592320) = 4096
31268.712 ( 0.028 ms): qemu-system-x8/24629 pwrite64(fd: 18, buf: 0x7f63afd8b000, count: 4096, pos: 2314133504) = 4096
31268.854 ( 0.016 ms): qemu-system-x8/24629 pwrite64(fd: 18, buf: 0x7f63afda2000, count: 4096, pos: 2314137600) = 4096
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-51xfjbxevdsucmnbc4ka5r88@git.kernel.org
[ Added make dep for 'prepare' in 'LIBPERF_IN', fix by Wang Nan to fix parallell build ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-04-04 19:05:36 -03:00
|
|
|
#endif
|
|
|
|
|
2016-04-04 17:52:18 -03:00
|
|
|
struct syscall {
|
|
|
|
int id;
|
|
|
|
const char *name;
|
|
|
|
};
|
2016-04-04 13:32:20 -03:00
|
|
|
|
2016-04-04 17:52:18 -03:00
|
|
|
static int syscallcmpname(const void *vkey, const void *ventry)
|
|
|
|
{
|
|
|
|
const char *key = vkey;
|
|
|
|
const struct syscall *entry = ventry;
|
|
|
|
|
|
|
|
return strcmp(key, entry->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int syscallcmp(const void *va, const void *vb)
|
|
|
|
{
|
|
|
|
const struct syscall *a = va, *b = vb;
|
|
|
|
|
|
|
|
return strcmp(a->name, b->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int syscalltbl__init_native(struct syscalltbl *tbl)
|
|
|
|
{
|
|
|
|
int nr_entries = 0, i, j;
|
|
|
|
struct syscall *entries;
|
|
|
|
|
|
|
|
for (i = 0; i <= syscalltbl_native_max_id; ++i)
|
|
|
|
if (syscalltbl_native[i])
|
|
|
|
++nr_entries;
|
|
|
|
|
|
|
|
entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
|
|
|
|
if (tbl->syscalls.entries == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0, j = 0; i <= syscalltbl_native_max_id; ++i) {
|
|
|
|
if (syscalltbl_native[i]) {
|
|
|
|
entries[j].name = syscalltbl_native[i];
|
|
|
|
entries[j].id = i;
|
|
|
|
++j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp);
|
|
|
|
tbl->syscalls.nr_entries = nr_entries;
|
2019-07-18 20:19:30 -03:00
|
|
|
tbl->syscalls.max_id = syscalltbl_native_max_id;
|
2016-04-04 17:52:18 -03:00
|
|
|
return 0;
|
|
|
|
}
|
2016-04-04 13:32:20 -03:00
|
|
|
|
|
|
|
struct syscalltbl *syscalltbl__new(void)
|
|
|
|
{
|
|
|
|
struct syscalltbl *tbl = malloc(sizeof(*tbl));
|
|
|
|
if (tbl) {
|
2016-04-04 17:52:18 -03:00
|
|
|
if (syscalltbl__init_native(tbl)) {
|
|
|
|
free(tbl);
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-04-04 13:32:20 -03:00
|
|
|
}
|
|
|
|
return tbl;
|
|
|
|
}
|
|
|
|
|
2016-04-04 17:52:18 -03:00
|
|
|
void syscalltbl__delete(struct syscalltbl *tbl)
|
|
|
|
{
|
|
|
|
zfree(&tbl->syscalls.entries);
|
|
|
|
free(tbl);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *syscalltbl__name(const struct syscalltbl *tbl __maybe_unused, int id)
|
|
|
|
{
|
|
|
|
return id <= syscalltbl_native_max_id ? syscalltbl_native[id]: NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int syscalltbl__id(struct syscalltbl *tbl, const char *name)
|
|
|
|
{
|
|
|
|
struct syscall *sc = bsearch(name, tbl->syscalls.entries,
|
|
|
|
tbl->syscalls.nr_entries, sizeof(*sc),
|
|
|
|
syscallcmpname);
|
|
|
|
|
|
|
|
return sc ? sc->id : -1;
|
|
|
|
}
|
|
|
|
|
perf trace: Fix iteration of syscall ids in syscalltbl->entries
This is a bug found when implementing pretty-printing for the
landlock_add_rule system call, I decided to send this patch separately
because this is a serious bug that should be fixed fast.
I wrote a test program to do landlock_add_rule syscall in a loop,
yet perf trace -e landlock_add_rule freezes, giving no output.
This bug is introduced by the false understanding of the variable "key"
below:
```
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
struct syscall *sc = trace__syscall_info(trace, NULL, key);
...
}
```
The code above seems right at the beginning, but when looking at
syscalltbl.c, I found these lines:
```
for (i = 0; i <= syscalltbl_native_max_id; ++i)
if (syscalltbl_native[i])
++nr_entries;
entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
...
for (i = 0, j = 0; i <= syscalltbl_native_max_id; ++i) {
if (syscalltbl_native[i]) {
entries[j].name = syscalltbl_native[i];
entries[j].id = i;
++j;
}
}
```
meaning the key is merely an index to traverse the syscall table,
instead of the actual syscall id for this particular syscall.
So if one uses key to do trace__syscall_info(trace, NULL, key), because
key only goes up to trace->sctbl->syscalls.nr_entries, for example, on
my X86_64 machine, this number is 373, it will end up neglecting all
the rest of the syscall, in my case, everything after `rseq`, because
the traversal will stop at 373, and `rseq` is the last syscall whose id
is lower than 373
in tools/perf/arch/x86/include/generated/asm/syscalls_64.c:
```
...
[334] = "rseq",
[424] = "pidfd_send_signal",
...
```
The reason why the key is scrambled but perf trace works well is that
key is used in trace__syscall_info(trace, NULL, key) to do
trace->syscalls.table[id], this makes sure that the struct syscall returned
actually has an id the same value as key, making the later bpf_prog
matching all correct.
After fixing this bug, I can do perf trace on 38 more syscalls, and
because more syscalls are visible, we get 8 more syscalls that can be
augmented.
before:
perf $ perf trace -vv --max-events=1 |& grep Reusing
Reusing "open" BPF sys_enter augmenter for "stat"
Reusing "open" BPF sys_enter augmenter for "lstat"
Reusing "open" BPF sys_enter augmenter for "access"
Reusing "connect" BPF sys_enter augmenter for "accept"
Reusing "sendto" BPF sys_enter augmenter for "recvfrom"
Reusing "connect" BPF sys_enter augmenter for "bind"
Reusing "connect" BPF sys_enter augmenter for "getsockname"
Reusing "connect" BPF sys_enter augmenter for "getpeername"
Reusing "open" BPF sys_enter augmenter for "execve"
Reusing "open" BPF sys_enter augmenter for "truncate"
Reusing "open" BPF sys_enter augmenter for "chdir"
Reusing "open" BPF sys_enter augmenter for "mkdir"
Reusing "open" BPF sys_enter augmenter for "rmdir"
Reusing "open" BPF sys_enter augmenter for "creat"
Reusing "open" BPF sys_enter augmenter for "link"
Reusing "open" BPF sys_enter augmenter for "unlink"
Reusing "open" BPF sys_enter augmenter for "symlink"
Reusing "open" BPF sys_enter augmenter for "readlink"
Reusing "open" BPF sys_enter augmenter for "chmod"
Reusing "open" BPF sys_enter augmenter for "chown"
Reusing "open" BPF sys_enter augmenter for "lchown"
Reusing "open" BPF sys_enter augmenter for "mknod"
Reusing "open" BPF sys_enter augmenter for "statfs"
Reusing "open" BPF sys_enter augmenter for "pivot_root"
Reusing "open" BPF sys_enter augmenter for "chroot"
Reusing "open" BPF sys_enter augmenter for "acct"
Reusing "open" BPF sys_enter augmenter for "swapon"
Reusing "open" BPF sys_enter augmenter for "swapoff"
Reusing "open" BPF sys_enter augmenter for "delete_module"
Reusing "open" BPF sys_enter augmenter for "setxattr"
Reusing "open" BPF sys_enter augmenter for "lsetxattr"
Reusing "openat" BPF sys_enter augmenter for "fsetxattr"
Reusing "open" BPF sys_enter augmenter for "getxattr"
Reusing "open" BPF sys_enter augmenter for "lgetxattr"
Reusing "openat" BPF sys_enter augmenter for "fgetxattr"
Reusing "open" BPF sys_enter augmenter for "listxattr"
Reusing "open" BPF sys_enter augmenter for "llistxattr"
Reusing "open" BPF sys_enter augmenter for "removexattr"
Reusing "open" BPF sys_enter augmenter for "lremovexattr"
Reusing "fsetxattr" BPF sys_enter augmenter for "fremovexattr"
Reusing "open" BPF sys_enter augmenter for "mq_open"
Reusing "open" BPF sys_enter augmenter for "mq_unlink"
Reusing "fsetxattr" BPF sys_enter augmenter for "add_key"
Reusing "fremovexattr" BPF sys_enter augmenter for "request_key"
Reusing "fremovexattr" BPF sys_enter augmenter for "inotify_add_watch"
Reusing "fremovexattr" BPF sys_enter augmenter for "mkdirat"
Reusing "fremovexattr" BPF sys_enter augmenter for "mknodat"
Reusing "fremovexattr" BPF sys_enter augmenter for "fchownat"
Reusing "fremovexattr" BPF sys_enter augmenter for "futimesat"
Reusing "fremovexattr" BPF sys_enter augmenter for "newfstatat"
Reusing "fremovexattr" BPF sys_enter augmenter for "unlinkat"
Reusing "fremovexattr" BPF sys_enter augmenter for "linkat"
Reusing "open" BPF sys_enter augmenter for "symlinkat"
Reusing "fremovexattr" BPF sys_enter augmenter for "readlinkat"
Reusing "fremovexattr" BPF sys_enter augmenter for "fchmodat"
Reusing "fremovexattr" BPF sys_enter augmenter for "faccessat"
Reusing "fremovexattr" BPF sys_enter augmenter for "utimensat"
Reusing "connect" BPF sys_enter augmenter for "accept4"
Reusing "fremovexattr" BPF sys_enter augmenter for "name_to_handle_at"
Reusing "fremovexattr" BPF sys_enter augmenter for "renameat2"
Reusing "open" BPF sys_enter augmenter for "memfd_create"
Reusing "fremovexattr" BPF sys_enter augmenter for "execveat"
Reusing "fremovexattr" BPF sys_enter augmenter for "statx"
after
perf $ perf trace -vv --max-events=1 |& grep Reusing
Reusing "open" BPF sys_enter augmenter for "stat"
Reusing "open" BPF sys_enter augmenter for "lstat"
Reusing "open" BPF sys_enter augmenter for "access"
Reusing "connect" BPF sys_enter augmenter for "accept"
Reusing "sendto" BPF sys_enter augmenter for "recvfrom"
Reusing "connect" BPF sys_enter augmenter for "bind"
Reusing "connect" BPF sys_enter augmenter for "getsockname"
Reusing "connect" BPF sys_enter augmenter for "getpeername"
Reusing "open" BPF sys_enter augmenter for "execve"
Reusing "open" BPF sys_enter augmenter for "truncate"
Reusing "open" BPF sys_enter augmenter for "chdir"
Reusing "open" BPF sys_enter augmenter for "mkdir"
Reusing "open" BPF sys_enter augmenter for "rmdir"
Reusing "open" BPF sys_enter augmenter for "creat"
Reusing "open" BPF sys_enter augmenter for "link"
Reusing "open" BPF sys_enter augmenter for "unlink"
Reusing "open" BPF sys_enter augmenter for "symlink"
Reusing "open" BPF sys_enter augmenter for "readlink"
Reusing "open" BPF sys_enter augmenter for "chmod"
Reusing "open" BPF sys_enter augmenter for "chown"
Reusing "open" BPF sys_enter augmenter for "lchown"
Reusing "open" BPF sys_enter augmenter for "mknod"
Reusing "open" BPF sys_enter augmenter for "statfs"
Reusing "open" BPF sys_enter augmenter for "pivot_root"
Reusing "open" BPF sys_enter augmenter for "chroot"
Reusing "open" BPF sys_enter augmenter for "acct"
Reusing "open" BPF sys_enter augmenter for "swapon"
Reusing "open" BPF sys_enter augmenter for "swapoff"
Reusing "open" BPF sys_enter augmenter for "delete_module"
Reusing "open" BPF sys_enter augmenter for "setxattr"
Reusing "open" BPF sys_enter augmenter for "lsetxattr"
Reusing "openat" BPF sys_enter augmenter for "fsetxattr"
Reusing "open" BPF sys_enter augmenter for "getxattr"
Reusing "open" BPF sys_enter augmenter for "lgetxattr"
Reusing "openat" BPF sys_enter augmenter for "fgetxattr"
Reusing "open" BPF sys_enter augmenter for "listxattr"
Reusing "open" BPF sys_enter augmenter for "llistxattr"
Reusing "open" BPF sys_enter augmenter for "removexattr"
Reusing "open" BPF sys_enter augmenter for "lremovexattr"
Reusing "fsetxattr" BPF sys_enter augmenter for "fremovexattr"
Reusing "open" BPF sys_enter augmenter for "mq_open"
Reusing "open" BPF sys_enter augmenter for "mq_unlink"
Reusing "fsetxattr" BPF sys_enter augmenter for "add_key"
Reusing "fremovexattr" BPF sys_enter augmenter for "request_key"
Reusing "fremovexattr" BPF sys_enter augmenter for "inotify_add_watch"
Reusing "fremovexattr" BPF sys_enter augmenter for "mkdirat"
Reusing "fremovexattr" BPF sys_enter augmenter for "mknodat"
Reusing "fremovexattr" BPF sys_enter augmenter for "fchownat"
Reusing "fremovexattr" BPF sys_enter augmenter for "futimesat"
Reusing "fremovexattr" BPF sys_enter augmenter for "newfstatat"
Reusing "fremovexattr" BPF sys_enter augmenter for "unlinkat"
Reusing "fremovexattr" BPF sys_enter augmenter for "linkat"
Reusing "open" BPF sys_enter augmenter for "symlinkat"
Reusing "fremovexattr" BPF sys_enter augmenter for "readlinkat"
Reusing "fremovexattr" BPF sys_enter augmenter for "fchmodat"
Reusing "fremovexattr" BPF sys_enter augmenter for "faccessat"
Reusing "fremovexattr" BPF sys_enter augmenter for "utimensat"
Reusing "connect" BPF sys_enter augmenter for "accept4"
Reusing "fremovexattr" BPF sys_enter augmenter for "name_to_handle_at"
Reusing "fremovexattr" BPF sys_enter augmenter for "renameat2"
Reusing "open" BPF sys_enter augmenter for "memfd_create"
Reusing "fremovexattr" BPF sys_enter augmenter for "execveat"
Reusing "fremovexattr" BPF sys_enter augmenter for "statx"
TL;DR:
These are the new syscalls that can be augmented
Reusing "openat" BPF sys_enter augmenter for "open_tree"
Reusing "openat" BPF sys_enter augmenter for "openat2"
Reusing "openat" BPF sys_enter augmenter for "mount_setattr"
Reusing "openat" BPF sys_enter augmenter for "move_mount"
Reusing "open" BPF sys_enter augmenter for "fsopen"
Reusing "openat" BPF sys_enter augmenter for "fspick"
Reusing "openat" BPF sys_enter augmenter for "faccessat2"
Reusing "openat" BPF sys_enter augmenter for "fchmodat2"
as for the perf trace output:
before
perf $ perf trace -e faccessat2 --max-events=1
[no output]
after
perf $ ./perf trace -e faccessat2 --max-events=1
0.000 ( 0.037 ms): waybar/958 faccessat2(dfd: 40, filename: "uevent") = 0
P.S. The reason why this bug was not found in the past five years is
probably because it only happens to the newer syscalls whose id is
greater, for instance, faccessat2 of id 439, which not a lot of people
care about when using perf trace.
[Arnaldo]: notes
That and the fact that the BPF code was hidden before having to use -e,
that got changed kinda recently when we switched to using BPF skels for
augmenting syscalls in 'perf trace':
⬢[acme@toolbox perf-tools-next]$ git log --oneline tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
a9f4c6c999008c92 perf trace: Collect sys_nanosleep first argument
29d16de26df17e94 perf augmented_raw_syscalls.bpf: Move 'struct timespec64' to vmlinux.h
5069211e2f0b47e7 perf trace: Use the right bpf_probe_read(_str) variant for reading user data
33b725ce7b988756 perf trace: Avoid compile error wrt redefining bool
7d9642311b6d9d31 perf bpf augmented_raw_syscalls: Add an assert to make sure sizeof(augmented_arg->value) is a power of two.
262b54b6c9396823 perf bpf augmented_raw_syscalls: Add an assert to make sure sizeof(saddr) is a power of two.
1836480429d173c0 perf bpf_skel augmented_raw_syscalls: Cap the socklen parameter using &= sizeof(saddr)
cd2cece61ac5f900 perf trace: Tidy comments related to BPF + syscall augmentation
5e6da6be3082f77b perf trace: Migrate BPF augmentation to use a skeleton
⬢[acme@toolbox perf-tools-next]$
⬢[acme@toolbox perf-tools-next]$ git show --oneline --pretty=reference 5e6da6be3082f77b | head -1
5e6da6be3082f77b (perf trace: Migrate BPF augmentation to use a skeleton, 2023-08-10)
⬢[acme@toolbox perf-tools-next]$
I.e. from August, 2023.
One had as well to ask for BUILD_BPF_SKEL=1, which now is default if all
it needs is available on the system.
I simplified the code to not expose the 'struct syscall' outside of
tools/perf/util/syscalltbl.c, instead providing a function to go from
the index to the syscall id:
int syscalltbl__id_at_idx(struct syscalltbl *tbl, int idx);
Signed-off-by: Howard Chu <howardchu95@gmail.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: https://lore.kernel.org/lkml/ZmhlAxbVcAKoPTg8@x1
Link: https://lore.kernel.org/r/20240705132059.853205-2-howardchu95@gmail.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-07-05 21:20:51 +08:00
|
|
|
int syscalltbl__id_at_idx(struct syscalltbl *tbl, int idx)
|
|
|
|
{
|
|
|
|
struct syscall *syscalls = tbl->syscalls.entries;
|
|
|
|
|
|
|
|
return idx < tbl->syscalls.nr_entries ? syscalls[idx].id : -1;
|
|
|
|
}
|
|
|
|
|
2017-08-31 11:46:49 -03:00
|
|
|
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct syscall *syscalls = tbl->syscalls.entries;
|
|
|
|
|
|
|
|
for (i = *idx + 1; i < tbl->syscalls.nr_entries; ++i) {
|
|
|
|
if (strglobmatch(syscalls[i].name, syscall_glob)) {
|
|
|
|
*idx = i;
|
|
|
|
return syscalls[i].id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
|
|
|
|
{
|
|
|
|
*idx = -1;
|
|
|
|
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
|
|
|
|
}
|
|
|
|
|
2018-04-09 18:26:48 +08:00
|
|
|
#else /* HAVE_SYSCALL_TABLE_SUPPORT */
|
2016-04-04 17:52:18 -03:00
|
|
|
|
|
|
|
#include <libaudit.h>
|
|
|
|
|
|
|
|
struct syscalltbl *syscalltbl__new(void)
|
|
|
|
{
|
2020-05-28 17:19:17 -03:00
|
|
|
struct syscalltbl *tbl = zalloc(sizeof(*tbl));
|
2016-04-04 17:52:18 -03:00
|
|
|
if (tbl)
|
|
|
|
tbl->audit_machine = audit_detect_machine();
|
|
|
|
return tbl;
|
|
|
|
}
|
|
|
|
|
2016-04-04 13:32:20 -03:00
|
|
|
void syscalltbl__delete(struct syscalltbl *tbl)
|
|
|
|
{
|
|
|
|
free(tbl);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *syscalltbl__name(const struct syscalltbl *tbl, int id)
|
|
|
|
{
|
|
|
|
return audit_syscall_to_name(id, tbl->audit_machine);
|
|
|
|
}
|
|
|
|
|
|
|
|
int syscalltbl__id(struct syscalltbl *tbl, const char *name)
|
|
|
|
{
|
|
|
|
return audit_name_to_syscall(name, tbl->audit_machine);
|
|
|
|
}
|
2017-08-31 11:46:49 -03:00
|
|
|
|
2024-10-22 17:22:36 -03:00
|
|
|
int syscalltbl__id_at_idx(struct syscalltbl *tbl __maybe_unused, int idx)
|
|
|
|
{
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2017-08-31 11:46:49 -03:00
|
|
|
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused,
|
|
|
|
const char *syscall_glob __maybe_unused, int *idx __maybe_unused)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
|
|
|
|
{
|
|
|
|
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
|
|
|
|
}
|
2018-04-09 18:26:48 +08:00
|
|
|
#endif /* HAVE_SYSCALL_TABLE_SUPPORT */
|