mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Logic to prevent callbacks from acquiring new references for the program (i.e. leaving acquired references), and releasing caller references (i.e. those acquired in parent frames) was introduced in commit9d9d00ac29
("bpf: Fix reference state management for synchronous callbacks"). This was necessary because back then, the verifier simulated each callback once (that could potentially be executed N times, where N can be zero). This meant that callbacks that left lingering resources or cleared caller resources could do it more than once, operating on undefined state or leaking memory. With the fixes to callback verification in commitab5cfac139
("bpf: verify callbacks as if they are called unknown number of times"), all of this extra logic is no longer necessary. Hence, drop it as part of this commit. Cc: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20241109231430.2475236-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
48 lines
1.5 KiB
C
48 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include "bpf/libbpf.h"
|
|
#include <test_progs.h>
|
|
#include <network_helpers.h>
|
|
|
|
#include "cb_refs.skel.h"
|
|
|
|
static char log_buf[1024 * 1024];
|
|
|
|
struct {
|
|
const char *prog_name;
|
|
const char *err_msg;
|
|
} cb_refs_tests[] = {
|
|
{ "underflow_prog", "must point to scalar, or struct with scalar" },
|
|
{ "leak_prog", "Possibly NULL pointer passed to helper arg2" },
|
|
{ "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */
|
|
{ "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */
|
|
};
|
|
|
|
void test_cb_refs(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
|
|
.kernel_log_size = sizeof(log_buf),
|
|
.kernel_log_level = 1);
|
|
struct bpf_program *prog;
|
|
struct cb_refs *skel;
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cb_refs_tests); i++) {
|
|
LIBBPF_OPTS(bpf_test_run_opts, run_opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
skel = cb_refs__open_opts(&opts);
|
|
if (!ASSERT_OK_PTR(skel, "cb_refs__open_and_load"))
|
|
return;
|
|
prog = bpf_object__find_program_by_name(skel->obj, cb_refs_tests[i].prog_name);
|
|
bpf_program__set_autoload(prog, true);
|
|
if (!ASSERT_ERR(cb_refs__load(skel), "cb_refs__load"))
|
|
bpf_prog_test_run_opts(bpf_program__fd(prog), &run_opts);
|
|
if (!ASSERT_OK_PTR(strstr(log_buf, cb_refs_tests[i].err_msg), "expected error message")) {
|
|
fprintf(stderr, "Expected: %s\n", cb_refs_tests[i].err_msg);
|
|
fprintf(stderr, "Verifier: %s\n", log_buf);
|
|
}
|
|
cb_refs__destroy(skel);
|
|
}
|
|
}
|