mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

This patch has a much simplified rbtree usage from the kernel sch_fq qdisc. It has a "struct node_data" which can be added to two different rbtrees which are ordered by different keys. The test first populates both rbtrees. Then search for a lookup_key from the "groot0" rbtree. Once the lookup_key is found, that node refcount is taken. The node is then removed from another "groot1" rbtree. While searching the lookup_key, the test will also try to remove all rbnodes in the path leading to the lookup_key. The test_{root,left,right}_spinlock_true tests ensure that the return value of the bpf_rbtree functions is a non_own_ref node pointer. This is done by forcing an verifier error by calling a helper bpf_jiffies64() while holding the spinlock. The tests then check for the verifier message "call bpf_rbtree...R0=rcu_ptr_or_null_node..." The other test_{root,left,right}_spinlock_false tests ensure that they must be called with spinlock held. Suggested-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> # Check non_own_ref marking Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://lore.kernel.org/r/20250506015857.817950-6-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
195 lines
5.5 KiB
C
195 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
|
|
|
#include <test_progs.h>
|
|
#include <network_helpers.h>
|
|
|
|
#include "rbtree.skel.h"
|
|
#include "rbtree_fail.skel.h"
|
|
#include "rbtree_btf_fail__wrong_node_type.skel.h"
|
|
#include "rbtree_btf_fail__add_wrong_type.skel.h"
|
|
#include "rbtree_search.skel.h"
|
|
|
|
static void test_rbtree_add_nodes(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
struct rbtree *skel;
|
|
int ret;
|
|
|
|
skel = rbtree__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
|
|
ASSERT_OK(ret, "rbtree_add_nodes run");
|
|
ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
|
|
ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
|
|
|
|
rbtree__destroy(skel);
|
|
}
|
|
|
|
static void test_rbtree_add_nodes_nested(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
struct rbtree *skel;
|
|
int ret;
|
|
|
|
skel = rbtree__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts);
|
|
ASSERT_OK(ret, "rbtree_add_nodes_nested run");
|
|
ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval");
|
|
ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran");
|
|
|
|
rbtree__destroy(skel);
|
|
}
|
|
|
|
static void test_rbtree_add_and_remove(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
struct rbtree *skel;
|
|
int ret;
|
|
|
|
skel = rbtree__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
|
|
ASSERT_OK(ret, "rbtree_add_and_remove");
|
|
ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
|
|
ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
|
|
|
|
rbtree__destroy(skel);
|
|
}
|
|
|
|
static void test_rbtree_add_and_remove_array(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
struct rbtree *skel;
|
|
int ret;
|
|
|
|
skel = rbtree__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts);
|
|
ASSERT_OK(ret, "rbtree_add_and_remove_array");
|
|
ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval");
|
|
|
|
rbtree__destroy(skel);
|
|
}
|
|
|
|
static void test_rbtree_first_and_remove(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
struct rbtree *skel;
|
|
int ret;
|
|
|
|
skel = rbtree__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
|
|
ASSERT_OK(ret, "rbtree_first_and_remove");
|
|
ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
|
|
ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
|
|
ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
|
|
ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
|
|
|
|
rbtree__destroy(skel);
|
|
}
|
|
|
|
static void test_rbtree_api_release_aliasing(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
struct rbtree *skel;
|
|
int ret;
|
|
|
|
skel = rbtree__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
|
|
ASSERT_OK(ret, "rbtree_api_release_aliasing");
|
|
ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
|
|
ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
|
|
ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
|
|
|
|
rbtree__destroy(skel);
|
|
}
|
|
|
|
void test_rbtree_success(void)
|
|
{
|
|
if (test__start_subtest("rbtree_add_nodes"))
|
|
test_rbtree_add_nodes();
|
|
if (test__start_subtest("rbtree_add_nodes_nested"))
|
|
test_rbtree_add_nodes_nested();
|
|
if (test__start_subtest("rbtree_add_and_remove"))
|
|
test_rbtree_add_and_remove();
|
|
if (test__start_subtest("rbtree_add_and_remove_array"))
|
|
test_rbtree_add_and_remove_array();
|
|
if (test__start_subtest("rbtree_first_and_remove"))
|
|
test_rbtree_first_and_remove();
|
|
if (test__start_subtest("rbtree_api_release_aliasing"))
|
|
test_rbtree_api_release_aliasing();
|
|
}
|
|
|
|
#define BTF_FAIL_TEST(suffix) \
|
|
void test_rbtree_btf_fail__##suffix(void) \
|
|
{ \
|
|
struct rbtree_btf_fail__##suffix *skel; \
|
|
\
|
|
skel = rbtree_btf_fail__##suffix##__open_and_load(); \
|
|
if (!ASSERT_ERR_PTR(skel, \
|
|
"rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
|
|
rbtree_btf_fail__##suffix##__destroy(skel); \
|
|
}
|
|
|
|
#define RUN_BTF_FAIL_TEST(suffix) \
|
|
if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
|
|
test_rbtree_btf_fail__##suffix();
|
|
|
|
BTF_FAIL_TEST(wrong_node_type);
|
|
BTF_FAIL_TEST(add_wrong_type);
|
|
|
|
void test_rbtree_btf_fail(void)
|
|
{
|
|
RUN_BTF_FAIL_TEST(wrong_node_type);
|
|
RUN_BTF_FAIL_TEST(add_wrong_type);
|
|
}
|
|
|
|
void test_rbtree_fail(void)
|
|
{
|
|
RUN_TESTS(rbtree_fail);
|
|
}
|
|
|
|
void test_rbtree_search(void)
|
|
{
|
|
RUN_TESTS(rbtree_search);
|
|
}
|