2019-05-21 20:14:21 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (c) 2019 Facebook
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <linux/bpf.h>
|
2020-01-20 14:06:45 +01:00
|
|
|
#include <bpf/bpf_helpers.h>
|
selftests/bpf: add bpf_for_each(), bpf_for(), and bpf_repeat() macros
Add bpf_for_each(), bpf_for(), and bpf_repeat() macros that make writing
open-coded iterator-based loops much more convenient and natural. These
macros utilize cleanup attribute to ensure proper destruction of the
iterator and thanks to that manage to provide the ergonomics that is
very close to C language's for() construct. Typical loop would look like:
int i;
int arr[N];
bpf_for(i, 0, N) {
/* verifier will know that i >= 0 && i < N, so could be used to
* directly access array elements with no extra checks
*/
arr[i] = i;
}
bpf_repeat() is very similar, but it doesn't expose iteration number and
is meant as a simple "repeat action N times" loop:
bpf_repeat(N) { /* whatever, N times */ }
Note that `break` and `continue` statements inside the {} block work as
expected.
bpf_for_each() is a generalization over any kind of BPF open-coded
iterator allowing to use for-each-like approach instead of calling
low-level bpf_iter_<type>_{new,next,destroy}() APIs explicitly. E.g.:
struct cgroup *cg;
bpf_for_each(cgroup, cg, some, input, args) {
/* do something with each cg */
}
would call (not-yet-implemented) bpf_iter_cgroup_{new,next,destroy}()
functions to form a loop over cgroups, where `some, input, args` are
passed verbatim into constructor as
bpf_iter_cgroup_new(&it, some, input, args).
As a first demonstration, add pyperf variant based on the bpf_for() loop.
Also clean up a few tests that either included bpf_misc.h header
unnecessarily from the user-space, which is unsupported, or included it
before any common types are defined (and thus leading to unnecessary
compilation warnings, potentially).
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230308184121.1165081-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-08 10:41:18 -08:00
|
|
|
#include "bpf_misc.h"
|
2024-02-08 21:36:12 +01:00
|
|
|
#include "bpf_compiler.h"
|
2019-05-21 20:14:21 -07:00
|
|
|
|
|
|
|
#define FUNCTION_NAME_LEN 64
|
|
|
|
#define FILE_NAME_LEN 128
|
|
|
|
#define TASK_COMM_LEN 16
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
int PyThreadState_frame;
|
|
|
|
int PyThreadState_thread;
|
|
|
|
int PyFrameObject_back;
|
|
|
|
int PyFrameObject_code;
|
|
|
|
int PyFrameObject_lineno;
|
|
|
|
int PyCodeObject_filename;
|
|
|
|
int PyCodeObject_name;
|
|
|
|
int String_data;
|
|
|
|
int String_size;
|
|
|
|
} OffsetConfig;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uintptr_t current_state_addr;
|
|
|
|
uintptr_t tls_key_addr;
|
|
|
|
OffsetConfig offsets;
|
|
|
|
bool use_tls;
|
|
|
|
} PidData;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t success;
|
|
|
|
} Stats;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
char name[FUNCTION_NAME_LEN];
|
|
|
|
char file[FILE_NAME_LEN];
|
|
|
|
} Symbol;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t pid;
|
|
|
|
uint32_t tid;
|
|
|
|
char comm[TASK_COMM_LEN];
|
|
|
|
int32_t kernel_stack_id;
|
|
|
|
int32_t user_stack_id;
|
|
|
|
bool thread_current;
|
|
|
|
bool pthread_match;
|
|
|
|
bool stack_complete;
|
|
|
|
int16_t stack_len;
|
|
|
|
int32_t stack[STACK_MAX_LEN];
|
|
|
|
|
|
|
|
int has_meta;
|
|
|
|
int metadata;
|
|
|
|
char dummy_safeguard;
|
|
|
|
} Event;
|
|
|
|
|
|
|
|
|
|
|
|
typedef int pid_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
void* f_back; // PyFrameObject.f_back, previous frame
|
|
|
|
void* f_code; // PyFrameObject.f_code, pointer to PyCodeObject
|
|
|
|
void* co_filename; // PyCodeObject.co_filename
|
|
|
|
void* co_name; // PyCodeObject.co_name
|
|
|
|
} FrameData;
|
|
|
|
|
2020-09-03 13:35:40 -07:00
|
|
|
#ifdef SUBPROGS
|
|
|
|
__noinline
|
|
|
|
#else
|
|
|
|
__always_inline
|
|
|
|
#endif
|
|
|
|
static void *get_thread_state(void *tls_base, PidData *pidData)
|
2019-05-21 20:14:21 -07:00
|
|
|
{
|
|
|
|
void* thread_state;
|
|
|
|
int key;
|
|
|
|
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&key, sizeof(key), (void*)(long)pidData->tls_key_addr);
|
|
|
|
bpf_probe_read_user(&thread_state, sizeof(thread_state),
|
|
|
|
tls_base + 0x310 + key * 0x10 + 0x08);
|
2019-05-21 20:14:21 -07:00
|
|
|
return thread_state;
|
|
|
|
}
|
|
|
|
|
2019-07-02 20:26:51 +02:00
|
|
|
static __always_inline bool get_frame_data(void *frame_ptr, PidData *pidData,
|
|
|
|
FrameData *frame, Symbol *symbol)
|
2019-05-21 20:14:21 -07:00
|
|
|
{
|
|
|
|
// read data from PyFrameObject
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&frame->f_back,
|
|
|
|
sizeof(frame->f_back),
|
|
|
|
frame_ptr + pidData->offsets.PyFrameObject_back);
|
|
|
|
bpf_probe_read_user(&frame->f_code,
|
|
|
|
sizeof(frame->f_code),
|
|
|
|
frame_ptr + pidData->offsets.PyFrameObject_code);
|
2019-05-21 20:14:21 -07:00
|
|
|
|
|
|
|
// read data from PyCodeObject
|
|
|
|
if (!frame->f_code)
|
|
|
|
return false;
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&frame->co_filename,
|
|
|
|
sizeof(frame->co_filename),
|
|
|
|
frame->f_code + pidData->offsets.PyCodeObject_filename);
|
|
|
|
bpf_probe_read_user(&frame->co_name,
|
|
|
|
sizeof(frame->co_name),
|
|
|
|
frame->f_code + pidData->offsets.PyCodeObject_name);
|
2019-05-21 20:14:21 -07:00
|
|
|
// read actual names into symbol
|
|
|
|
if (frame->co_filename)
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user_str(&symbol->file,
|
|
|
|
sizeof(symbol->file),
|
|
|
|
frame->co_filename +
|
|
|
|
pidData->offsets.String_data);
|
2019-05-21 20:14:21 -07:00
|
|
|
if (frame->co_name)
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user_str(&symbol->name,
|
|
|
|
sizeof(symbol->name),
|
|
|
|
frame->co_name +
|
|
|
|
pidData->offsets.String_data);
|
2019-05-21 20:14:21 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-05 08:50:12 -07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__uint(max_entries, 1);
|
|
|
|
__type(key, int);
|
|
|
|
__type(value, PidData);
|
|
|
|
} pidmap SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__uint(max_entries, 1);
|
|
|
|
__type(key, int);
|
|
|
|
__type(value, Event);
|
|
|
|
} eventmap SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__uint(max_entries, 1);
|
|
|
|
__type(key, Symbol);
|
|
|
|
__type(value, int);
|
|
|
|
} symbolmap SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
|
|
|
__uint(max_entries, 1);
|
|
|
|
__type(key, int);
|
|
|
|
__type(value, Stats);
|
|
|
|
} statsmap SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
|
|
|
__uint(max_entries, 32);
|
|
|
|
__uint(key_size, sizeof(int));
|
|
|
|
__uint(value_size, sizeof(int));
|
|
|
|
} perfmap SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
|
|
|
__uint(max_entries, 1000);
|
|
|
|
__uint(key_size, sizeof(int));
|
|
|
|
__uint(value_size, sizeof(long long) * 127);
|
|
|
|
} stackmap SEC(".maps");
|
2019-05-21 20:14:21 -07:00
|
|
|
|
2021-11-29 19:06:21 -08:00
|
|
|
#ifdef USE_BPF_LOOP
|
|
|
|
struct process_frame_ctx {
|
|
|
|
int cur_cpu;
|
|
|
|
int32_t *symbol_counter;
|
|
|
|
void *frame_ptr;
|
|
|
|
FrameData *frame;
|
|
|
|
PidData *pidData;
|
|
|
|
Symbol *sym;
|
|
|
|
Event *event;
|
|
|
|
bool done;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx)
|
|
|
|
{
|
|
|
|
int zero = 0;
|
|
|
|
void *frame_ptr = ctx->frame_ptr;
|
|
|
|
PidData *pidData = ctx->pidData;
|
|
|
|
FrameData *frame = ctx->frame;
|
|
|
|
int32_t *symbol_counter = ctx->symbol_counter;
|
|
|
|
int cur_cpu = ctx->cur_cpu;
|
|
|
|
Event *event = ctx->event;
|
|
|
|
Symbol *sym = ctx->sym;
|
|
|
|
|
|
|
|
if (frame_ptr && get_frame_data(frame_ptr, pidData, frame, sym)) {
|
|
|
|
int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
|
|
|
|
int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, sym);
|
|
|
|
|
|
|
|
if (!symbol_id) {
|
|
|
|
bpf_map_update_elem(&symbolmap, sym, &zero, 0);
|
|
|
|
symbol_id = bpf_map_lookup_elem(&symbolmap, sym);
|
|
|
|
if (!symbol_id) {
|
|
|
|
ctx->done = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (*symbol_id == new_symbol_id)
|
|
|
|
(*symbol_counter)++;
|
|
|
|
|
|
|
|
barrier_var(i);
|
|
|
|
if (i >= STACK_MAX_LEN)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
event->stack[i] = *symbol_id;
|
|
|
|
|
|
|
|
event->stack_len = i + 1;
|
|
|
|
frame_ptr = frame->f_back;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* USE_BPF_LOOP */
|
|
|
|
|
2020-01-09 22:41:22 -08:00
|
|
|
#ifdef GLOBAL_FUNC
|
2020-09-03 13:35:40 -07:00
|
|
|
__noinline
|
|
|
|
#elif defined(SUBPROGS)
|
|
|
|
static __noinline
|
2020-01-09 22:41:22 -08:00
|
|
|
#else
|
|
|
|
static __always_inline
|
|
|
|
#endif
|
|
|
|
int __on_event(struct bpf_raw_tracepoint_args *ctx)
|
2019-05-21 20:14:21 -07:00
|
|
|
{
|
|
|
|
uint64_t pid_tgid = bpf_get_current_pid_tgid();
|
|
|
|
pid_t pid = (pid_t)(pid_tgid >> 32);
|
|
|
|
PidData* pidData = bpf_map_lookup_elem(&pidmap, &pid);
|
|
|
|
if (!pidData)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
int zero = 0;
|
|
|
|
Event* event = bpf_map_lookup_elem(&eventmap, &zero);
|
|
|
|
if (!event)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
event->pid = pid;
|
|
|
|
|
|
|
|
event->tid = (pid_t)pid_tgid;
|
|
|
|
bpf_get_current_comm(&event->comm, sizeof(event->comm));
|
|
|
|
|
|
|
|
event->user_stack_id = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
|
|
|
|
event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0);
|
|
|
|
|
|
|
|
void* thread_state_current = (void*)0;
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&thread_state_current,
|
|
|
|
sizeof(thread_state_current),
|
|
|
|
(void*)(long)pidData->current_state_addr);
|
2019-05-21 20:14:21 -07:00
|
|
|
|
|
|
|
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
|
|
|
|
void* tls_base = (void*)task;
|
|
|
|
|
|
|
|
void* thread_state = pidData->use_tls ? get_thread_state(tls_base, pidData)
|
|
|
|
: thread_state_current;
|
|
|
|
event->thread_current = thread_state == thread_state_current;
|
|
|
|
|
|
|
|
if (pidData->use_tls) {
|
|
|
|
uint64_t pthread_created;
|
|
|
|
uint64_t pthread_self;
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&pthread_self, sizeof(pthread_self),
|
|
|
|
tls_base + 0x10);
|
2019-05-21 20:14:21 -07:00
|
|
|
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&pthread_created,
|
|
|
|
sizeof(pthread_created),
|
|
|
|
thread_state +
|
|
|
|
pidData->offsets.PyThreadState_thread);
|
2019-05-21 20:14:21 -07:00
|
|
|
event->pthread_match = pthread_created == pthread_self;
|
|
|
|
} else {
|
|
|
|
event->pthread_match = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (event->pthread_match || !pidData->use_tls) {
|
|
|
|
void* frame_ptr;
|
|
|
|
FrameData frame;
|
|
|
|
Symbol sym = {};
|
|
|
|
int cur_cpu = bpf_get_smp_processor_id();
|
|
|
|
|
2019-11-02 00:18:02 +01:00
|
|
|
bpf_probe_read_user(&frame_ptr,
|
|
|
|
sizeof(frame_ptr),
|
|
|
|
thread_state +
|
|
|
|
pidData->offsets.PyThreadState_frame);
|
2019-05-21 20:14:21 -07:00
|
|
|
|
|
|
|
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
|
|
|
|
if (symbol_counter == NULL)
|
|
|
|
return 0;
|
2021-11-29 19:06:21 -08:00
|
|
|
#ifdef USE_BPF_LOOP
|
|
|
|
struct process_frame_ctx ctx = {
|
|
|
|
.cur_cpu = cur_cpu,
|
|
|
|
.symbol_counter = symbol_counter,
|
|
|
|
.frame_ptr = frame_ptr,
|
|
|
|
.frame = &frame,
|
|
|
|
.pidData = pidData,
|
|
|
|
.sym = &sym,
|
|
|
|
.event = event,
|
|
|
|
};
|
|
|
|
|
|
|
|
bpf_loop(STACK_MAX_LEN, process_frame_callback, &ctx, 0);
|
|
|
|
if (ctx.done)
|
|
|
|
return 0;
|
|
|
|
#else
|
selftests/bpf: add bpf_for_each(), bpf_for(), and bpf_repeat() macros
Add bpf_for_each(), bpf_for(), and bpf_repeat() macros that make writing
open-coded iterator-based loops much more convenient and natural. These
macros utilize cleanup attribute to ensure proper destruction of the
iterator and thanks to that manage to provide the ergonomics that is
very close to C language's for() construct. Typical loop would look like:
int i;
int arr[N];
bpf_for(i, 0, N) {
/* verifier will know that i >= 0 && i < N, so could be used to
* directly access array elements with no extra checks
*/
arr[i] = i;
}
bpf_repeat() is very similar, but it doesn't expose iteration number and
is meant as a simple "repeat action N times" loop:
bpf_repeat(N) { /* whatever, N times */ }
Note that `break` and `continue` statements inside the {} block work as
expected.
bpf_for_each() is a generalization over any kind of BPF open-coded
iterator allowing to use for-each-like approach instead of calling
low-level bpf_iter_<type>_{new,next,destroy}() APIs explicitly. E.g.:
struct cgroup *cg;
bpf_for_each(cgroup, cg, some, input, args) {
/* do something with each cg */
}
would call (not-yet-implemented) bpf_iter_cgroup_{new,next,destroy}()
functions to form a loop over cgroups, where `some, input, args` are
passed verbatim into constructor as
bpf_iter_cgroup_new(&it, some, input, args).
As a first demonstration, add pyperf variant based on the bpf_for() loop.
Also clean up a few tests that either included bpf_misc.h header
unnecessarily from the user-space, which is unsupported, or included it
before any common types are defined (and thus leading to unnecessary
compilation warnings, potentially).
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230308184121.1165081-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-08 10:41:18 -08:00
|
|
|
#if defined(USE_ITER)
|
|
|
|
/* no for loop, no unrolling */
|
|
|
|
#elif defined(NO_UNROLL)
|
2024-02-08 21:36:12 +01:00
|
|
|
__pragma_loop_no_unroll
|
selftests/bpf: add bpf_for_each(), bpf_for(), and bpf_repeat() macros
Add bpf_for_each(), bpf_for(), and bpf_repeat() macros that make writing
open-coded iterator-based loops much more convenient and natural. These
macros utilize cleanup attribute to ensure proper destruction of the
iterator and thanks to that manage to provide the ergonomics that is
very close to C language's for() construct. Typical loop would look like:
int i;
int arr[N];
bpf_for(i, 0, N) {
/* verifier will know that i >= 0 && i < N, so could be used to
* directly access array elements with no extra checks
*/
arr[i] = i;
}
bpf_repeat() is very similar, but it doesn't expose iteration number and
is meant as a simple "repeat action N times" loop:
bpf_repeat(N) { /* whatever, N times */ }
Note that `break` and `continue` statements inside the {} block work as
expected.
bpf_for_each() is a generalization over any kind of BPF open-coded
iterator allowing to use for-each-like approach instead of calling
low-level bpf_iter_<type>_{new,next,destroy}() APIs explicitly. E.g.:
struct cgroup *cg;
bpf_for_each(cgroup, cg, some, input, args) {
/* do something with each cg */
}
would call (not-yet-implemented) bpf_iter_cgroup_{new,next,destroy}()
functions to form a loop over cgroups, where `some, input, args` are
passed verbatim into constructor as
bpf_iter_cgroup_new(&it, some, input, args).
As a first demonstration, add pyperf variant based on the bpf_for() loop.
Also clean up a few tests that either included bpf_misc.h header
unnecessarily from the user-space, which is unsupported, or included it
before any common types are defined (and thus leading to unnecessary
compilation warnings, potentially).
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230308184121.1165081-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-08 10:41:18 -08:00
|
|
|
#elif defined(UNROLL_COUNT)
|
2024-02-08 21:36:12 +01:00
|
|
|
__pragma_loop_unroll_count(UNROLL_COUNT)
|
2022-04-18 21:32:30 -07:00
|
|
|
#else
|
2024-02-08 21:36:12 +01:00
|
|
|
__pragma_loop_unroll_full
|
2021-11-29 19:06:21 -08:00
|
|
|
#endif /* NO_UNROLL */
|
2019-05-21 20:14:21 -07:00
|
|
|
/* Unwind python stack */
|
selftests/bpf: add bpf_for_each(), bpf_for(), and bpf_repeat() macros
Add bpf_for_each(), bpf_for(), and bpf_repeat() macros that make writing
open-coded iterator-based loops much more convenient and natural. These
macros utilize cleanup attribute to ensure proper destruction of the
iterator and thanks to that manage to provide the ergonomics that is
very close to C language's for() construct. Typical loop would look like:
int i;
int arr[N];
bpf_for(i, 0, N) {
/* verifier will know that i >= 0 && i < N, so could be used to
* directly access array elements with no extra checks
*/
arr[i] = i;
}
bpf_repeat() is very similar, but it doesn't expose iteration number and
is meant as a simple "repeat action N times" loop:
bpf_repeat(N) { /* whatever, N times */ }
Note that `break` and `continue` statements inside the {} block work as
expected.
bpf_for_each() is a generalization over any kind of BPF open-coded
iterator allowing to use for-each-like approach instead of calling
low-level bpf_iter_<type>_{new,next,destroy}() APIs explicitly. E.g.:
struct cgroup *cg;
bpf_for_each(cgroup, cg, some, input, args) {
/* do something with each cg */
}
would call (not-yet-implemented) bpf_iter_cgroup_{new,next,destroy}()
functions to form a loop over cgroups, where `some, input, args` are
passed verbatim into constructor as
bpf_iter_cgroup_new(&it, some, input, args).
As a first demonstration, add pyperf variant based on the bpf_for() loop.
Also clean up a few tests that either included bpf_misc.h header
unnecessarily from the user-space, which is unsupported, or included it
before any common types are defined (and thus leading to unnecessary
compilation warnings, potentially).
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230308184121.1165081-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-08 10:41:18 -08:00
|
|
|
#ifdef USE_ITER
|
|
|
|
int i;
|
|
|
|
bpf_for(i, 0, STACK_MAX_LEN) {
|
|
|
|
#else /* !USE_ITER */
|
2019-05-21 20:14:21 -07:00
|
|
|
for (int i = 0; i < STACK_MAX_LEN; ++i) {
|
selftests/bpf: add bpf_for_each(), bpf_for(), and bpf_repeat() macros
Add bpf_for_each(), bpf_for(), and bpf_repeat() macros that make writing
open-coded iterator-based loops much more convenient and natural. These
macros utilize cleanup attribute to ensure proper destruction of the
iterator and thanks to that manage to provide the ergonomics that is
very close to C language's for() construct. Typical loop would look like:
int i;
int arr[N];
bpf_for(i, 0, N) {
/* verifier will know that i >= 0 && i < N, so could be used to
* directly access array elements with no extra checks
*/
arr[i] = i;
}
bpf_repeat() is very similar, but it doesn't expose iteration number and
is meant as a simple "repeat action N times" loop:
bpf_repeat(N) { /* whatever, N times */ }
Note that `break` and `continue` statements inside the {} block work as
expected.
bpf_for_each() is a generalization over any kind of BPF open-coded
iterator allowing to use for-each-like approach instead of calling
low-level bpf_iter_<type>_{new,next,destroy}() APIs explicitly. E.g.:
struct cgroup *cg;
bpf_for_each(cgroup, cg, some, input, args) {
/* do something with each cg */
}
would call (not-yet-implemented) bpf_iter_cgroup_{new,next,destroy}()
functions to form a loop over cgroups, where `some, input, args` are
passed verbatim into constructor as
bpf_iter_cgroup_new(&it, some, input, args).
As a first demonstration, add pyperf variant based on the bpf_for() loop.
Also clean up a few tests that either included bpf_misc.h header
unnecessarily from the user-space, which is unsupported, or included it
before any common types are defined (and thus leading to unnecessary
compilation warnings, potentially).
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230308184121.1165081-6-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-08 10:41:18 -08:00
|
|
|
#endif
|
2019-05-21 20:14:21 -07:00
|
|
|
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
|
|
|
|
int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
|
|
|
|
int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym);
|
|
|
|
if (!symbol_id) {
|
|
|
|
bpf_map_update_elem(&symbolmap, &sym, &zero, 0);
|
|
|
|
symbol_id = bpf_map_lookup_elem(&symbolmap, &sym);
|
|
|
|
if (!symbol_id)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (*symbol_id == new_symbol_id)
|
|
|
|
(*symbol_counter)++;
|
|
|
|
event->stack[i] = *symbol_id;
|
|
|
|
event->stack_len = i + 1;
|
|
|
|
frame_ptr = frame.f_back;
|
|
|
|
}
|
|
|
|
}
|
2021-11-29 19:06:21 -08:00
|
|
|
#endif /* USE_BPF_LOOP */
|
2019-05-21 20:14:21 -07:00
|
|
|
event->stack_complete = frame_ptr == NULL;
|
|
|
|
} else {
|
|
|
|
event->stack_complete = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
Stats* stats = bpf_map_lookup_elem(&statsmap, &zero);
|
|
|
|
if (stats)
|
|
|
|
stats->success++;
|
|
|
|
|
|
|
|
event->has_meta = 0;
|
|
|
|
bpf_perf_event_output(ctx, &perfmap, 0, event, offsetof(Event, metadata));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("raw_tracepoint/kfree_skb")
|
2020-01-09 22:41:22 -08:00
|
|
|
int on_event(struct bpf_raw_tracepoint_args* ctx)
|
2019-05-21 20:14:21 -07:00
|
|
|
{
|
2023-03-08 21:40:14 -08:00
|
|
|
int ret = 0;
|
2019-05-21 20:14:21 -07:00
|
|
|
ret |= __on_event(ctx);
|
|
|
|
ret |= __on_event(ctx);
|
|
|
|
ret |= __on_event(ctx);
|
|
|
|
ret |= __on_event(ctx);
|
|
|
|
ret |= __on_event(ctx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
char _license[] SEC("license") = "GPL";
|