2020-03-13 10:23:36 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2020 Facebook */
|
|
|
|
|
|
|
|
#include "vmlinux.h"
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
#include <bpf/bpf_tracing.h>
|
|
|
|
#include <bpf/bpf_core_read.h>
|
|
|
|
|
|
|
|
#define MY_TV_NSEC 1337
|
|
|
|
|
|
|
|
bool tp_called = false;
|
|
|
|
bool raw_tp_called = false;
|
|
|
|
bool tp_btf_called = false;
|
|
|
|
bool kprobe_called = false;
|
|
|
|
bool fentry_called = false;
|
|
|
|
|
|
|
|
SEC("tp/syscalls/sys_enter_nanosleep")
|
bpf: Change syscall_nr type to int in struct syscall_tp_t
linux-rt-devel tree contains a patch (b1773eac3f29c ("sched: Add support
for lazy preemption")) that adds an extra member to struct trace_entry.
This causes the offset of args field in struct trace_event_raw_sys_enter
be different from the one in struct syscall_trace_enter:
struct trace_event_raw_sys_enter {
struct trace_entry ent; /* 0 12 */
/* XXX last struct has 3 bytes of padding */
/* XXX 4 bytes hole, try to pack */
long int id; /* 16 8 */
long unsigned int args[6]; /* 24 48 */
/* --- cacheline 1 boundary (64 bytes) was 8 bytes ago --- */
char __data[]; /* 72 0 */
/* size: 72, cachelines: 2, members: 4 */
/* sum members: 68, holes: 1, sum holes: 4 */
/* paddings: 1, sum paddings: 3 */
/* last cacheline: 8 bytes */
};
struct syscall_trace_enter {
struct trace_entry ent; /* 0 12 */
/* XXX last struct has 3 bytes of padding */
int nr; /* 12 4 */
long unsigned int args[]; /* 16 0 */
/* size: 16, cachelines: 1, members: 3 */
/* paddings: 1, sum paddings: 3 */
/* last cacheline: 16 bytes */
};
This, in turn, causes perf_event_set_bpf_prog() fail while running bpf
test_profiler testcase because max_ctx_offset is calculated based on the
former struct, while off on the latter:
10488 if (is_tracepoint || is_syscall_tp) {
10489 int off = trace_event_get_offsets(event->tp_event);
10490
10491 if (prog->aux->max_ctx_offset > off)
10492 return -EACCES;
10493 }
What bpf program is actually getting is a pointer to struct
syscall_tp_t, defined in kernel/trace/trace_syscalls.c. This patch fixes
the problem by aligning struct syscall_tp_t with struct
syscall_trace_(enter|exit) and changing the tests to use these structs
to dereference context.
Signed-off-by: Artem Savkov <asavkov@redhat.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/bpf/20231013054219.172920-1-asavkov@redhat.com
2023-10-13 07:42:19 +02:00
|
|
|
int handle__tp(struct syscall_trace_enter *args)
|
2020-03-13 10:23:36 -07:00
|
|
|
{
|
|
|
|
struct __kernel_timespec *ts;
|
2020-08-18 14:33:54 -07:00
|
|
|
long tv_nsec;
|
2020-03-13 10:23:36 -07:00
|
|
|
|
bpf: Change syscall_nr type to int in struct syscall_tp_t
linux-rt-devel tree contains a patch (b1773eac3f29c ("sched: Add support
for lazy preemption")) that adds an extra member to struct trace_entry.
This causes the offset of args field in struct trace_event_raw_sys_enter
be different from the one in struct syscall_trace_enter:
struct trace_event_raw_sys_enter {
struct trace_entry ent; /* 0 12 */
/* XXX last struct has 3 bytes of padding */
/* XXX 4 bytes hole, try to pack */
long int id; /* 16 8 */
long unsigned int args[6]; /* 24 48 */
/* --- cacheline 1 boundary (64 bytes) was 8 bytes ago --- */
char __data[]; /* 72 0 */
/* size: 72, cachelines: 2, members: 4 */
/* sum members: 68, holes: 1, sum holes: 4 */
/* paddings: 1, sum paddings: 3 */
/* last cacheline: 8 bytes */
};
struct syscall_trace_enter {
struct trace_entry ent; /* 0 12 */
/* XXX last struct has 3 bytes of padding */
int nr; /* 12 4 */
long unsigned int args[]; /* 16 0 */
/* size: 16, cachelines: 1, members: 3 */
/* paddings: 1, sum paddings: 3 */
/* last cacheline: 16 bytes */
};
This, in turn, causes perf_event_set_bpf_prog() fail while running bpf
test_profiler testcase because max_ctx_offset is calculated based on the
former struct, while off on the latter:
10488 if (is_tracepoint || is_syscall_tp) {
10489 int off = trace_event_get_offsets(event->tp_event);
10490
10491 if (prog->aux->max_ctx_offset > off)
10492 return -EACCES;
10493 }
What bpf program is actually getting is a pointer to struct
syscall_tp_t, defined in kernel/trace/trace_syscalls.c. This patch fixes
the problem by aligning struct syscall_tp_t with struct
syscall_trace_(enter|exit) and changing the tests to use these structs
to dereference context.
Signed-off-by: Artem Savkov <asavkov@redhat.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/bpf/20231013054219.172920-1-asavkov@redhat.com
2023-10-13 07:42:19 +02:00
|
|
|
if (args->nr != __NR_nanosleep)
|
2020-03-13 10:23:36 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
ts = (void *)args->args[0];
|
2020-08-18 14:33:54 -07:00
|
|
|
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
|
|
|
|
tv_nsec != MY_TV_NSEC)
|
2020-03-13 10:23:36 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
tp_called = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
|
|
int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
|
|
|
|
{
|
|
|
|
struct __kernel_timespec *ts;
|
2020-08-18 14:33:54 -07:00
|
|
|
long tv_nsec;
|
2020-03-13 10:23:36 -07:00
|
|
|
|
|
|
|
if (id != __NR_nanosleep)
|
|
|
|
return 0;
|
|
|
|
|
2023-01-28 01:06:36 +01:00
|
|
|
ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
|
2020-08-18 14:33:54 -07:00
|
|
|
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
|
|
|
|
tv_nsec != MY_TV_NSEC)
|
2020-03-13 10:23:36 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
raw_tp_called = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tp_btf/sys_enter")
|
|
|
|
int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
|
|
|
|
{
|
|
|
|
struct __kernel_timespec *ts;
|
2020-08-18 14:33:54 -07:00
|
|
|
long tv_nsec;
|
2020-03-13 10:23:36 -07:00
|
|
|
|
|
|
|
if (id != __NR_nanosleep)
|
|
|
|
return 0;
|
|
|
|
|
2023-01-28 01:06:36 +01:00
|
|
|
ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
|
2020-08-18 14:33:54 -07:00
|
|
|
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
|
|
|
|
tv_nsec != MY_TV_NSEC)
|
2020-03-13 10:23:36 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
tp_btf_called = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
selftests/bpf: Switch test_vmlinux to use hrtimer_range_start_ns.
The test_vmlinux test uses hrtimer_nanosleep as hook to test tracing
programs. But in a kernel built by clang, which performs more aggresive
inlining, that function gets inlined into its caller SyS_nanosleep.
Therefore, even though fentry and kprobe do hook on the function,
they aren't triggered by the call to nanosleep in the test.
A possible fix is switching to use a function that is less likely to
be inlined, such as hrtimer_range_start_ns. The EXPORT_SYMBOL functions
shouldn't be inlined based on the description of [1], therefore safe
to use for this test. Also the arguments of this function include the
duration of sleep, therefore suitable for test verification.
[1] af3b56289be1 time: don't inline EXPORT_SYMBOL functions
Tested:
In a clang build kernel, before this change, the test fails:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:FAIL:kprobe not called
test_vmlinux:FAIL:fentry not called
After switching to hrtimer_range_start_ns, the test passes:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:PASS:kprobe 0 nsec
test_vmlinux:PASS:fentry 0 nsec
Signed-off-by: Hao Luo <haoluo@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200701175315.1161242-1-haoluo@google.com
2020-07-01 10:53:15 -07:00
|
|
|
SEC("kprobe/hrtimer_start_range_ns")
|
|
|
|
int BPF_KPROBE(handle__kprobe, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
|
|
|
|
const enum hrtimer_mode mode)
|
2020-03-13 10:23:36 -07:00
|
|
|
{
|
selftests/bpf: Switch test_vmlinux to use hrtimer_range_start_ns.
The test_vmlinux test uses hrtimer_nanosleep as hook to test tracing
programs. But in a kernel built by clang, which performs more aggresive
inlining, that function gets inlined into its caller SyS_nanosleep.
Therefore, even though fentry and kprobe do hook on the function,
they aren't triggered by the call to nanosleep in the test.
A possible fix is switching to use a function that is less likely to
be inlined, such as hrtimer_range_start_ns. The EXPORT_SYMBOL functions
shouldn't be inlined based on the description of [1], therefore safe
to use for this test. Also the arguments of this function include the
duration of sleep, therefore suitable for test verification.
[1] af3b56289be1 time: don't inline EXPORT_SYMBOL functions
Tested:
In a clang build kernel, before this change, the test fails:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:FAIL:kprobe not called
test_vmlinux:FAIL:fentry not called
After switching to hrtimer_range_start_ns, the test passes:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:PASS:kprobe 0 nsec
test_vmlinux:PASS:fentry 0 nsec
Signed-off-by: Hao Luo <haoluo@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200701175315.1161242-1-haoluo@google.com
2020-07-01 10:53:15 -07:00
|
|
|
if (tim == MY_TV_NSEC)
|
2020-03-13 10:23:36 -07:00
|
|
|
kprobe_called = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
selftests/bpf: Switch test_vmlinux to use hrtimer_range_start_ns.
The test_vmlinux test uses hrtimer_nanosleep as hook to test tracing
programs. But in a kernel built by clang, which performs more aggresive
inlining, that function gets inlined into its caller SyS_nanosleep.
Therefore, even though fentry and kprobe do hook on the function,
they aren't triggered by the call to nanosleep in the test.
A possible fix is switching to use a function that is less likely to
be inlined, such as hrtimer_range_start_ns. The EXPORT_SYMBOL functions
shouldn't be inlined based on the description of [1], therefore safe
to use for this test. Also the arguments of this function include the
duration of sleep, therefore suitable for test verification.
[1] af3b56289be1 time: don't inline EXPORT_SYMBOL functions
Tested:
In a clang build kernel, before this change, the test fails:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:FAIL:kprobe not called
test_vmlinux:FAIL:fentry not called
After switching to hrtimer_range_start_ns, the test passes:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:PASS:kprobe 0 nsec
test_vmlinux:PASS:fentry 0 nsec
Signed-off-by: Hao Luo <haoluo@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200701175315.1161242-1-haoluo@google.com
2020-07-01 10:53:15 -07:00
|
|
|
SEC("fentry/hrtimer_start_range_ns")
|
|
|
|
int BPF_PROG(handle__fentry, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
|
|
|
|
const enum hrtimer_mode mode)
|
2020-03-13 10:23:36 -07:00
|
|
|
{
|
selftests/bpf: Switch test_vmlinux to use hrtimer_range_start_ns.
The test_vmlinux test uses hrtimer_nanosleep as hook to test tracing
programs. But in a kernel built by clang, which performs more aggresive
inlining, that function gets inlined into its caller SyS_nanosleep.
Therefore, even though fentry and kprobe do hook on the function,
they aren't triggered by the call to nanosleep in the test.
A possible fix is switching to use a function that is less likely to
be inlined, such as hrtimer_range_start_ns. The EXPORT_SYMBOL functions
shouldn't be inlined based on the description of [1], therefore safe
to use for this test. Also the arguments of this function include the
duration of sleep, therefore suitable for test verification.
[1] af3b56289be1 time: don't inline EXPORT_SYMBOL functions
Tested:
In a clang build kernel, before this change, the test fails:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:FAIL:kprobe not called
test_vmlinux:FAIL:fentry not called
After switching to hrtimer_range_start_ns, the test passes:
test_vmlinux:PASS:skel_open 0 nsec
test_vmlinux:PASS:skel_attach 0 nsec
test_vmlinux:PASS:tp 0 nsec
test_vmlinux:PASS:raw_tp 0 nsec
test_vmlinux:PASS:tp_btf 0 nsec
test_vmlinux:PASS:kprobe 0 nsec
test_vmlinux:PASS:fentry 0 nsec
Signed-off-by: Hao Luo <haoluo@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200701175315.1161242-1-haoluo@google.com
2020-07-01 10:53:15 -07:00
|
|
|
if (tim == MY_TV_NSEC)
|
2020-03-13 10:23:36 -07:00
|
|
|
fentry_called = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
char _license[] SEC("license") = "GPL";
|