linux/tools/testing/selftests/bpf/progs/kfunc_call_test.c
Toke Høiland-Jørgensen d6212d82bf selftests/bpf: Consolidate kernel modules into common directory
The selftests build four kernel modules which use copy-pasted Makefile
targets. This is a bit messy, and doesn't scale so well when we add more
modules, so let's consolidate these rules into a single rule generated
for each module name, and move the module sources into a single
directory.

To avoid parallel builds of the different modules stepping on each
other's toes during the 'modpost' phase of the Kbuild 'make modules',
the module files should really be a grouped target. However, make only
added explicit support for grouped targets in version 4.3, which is
newer than the minimum version supported by the kernel. However, make
implicitly treats pattern matching rules with multiple targets as a
grouped target, so we can work around this by turning the rule into a
pattern matching target. We do this by replacing '.ko' with '%ko' in the
targets with subst().

Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Viktor Malik <vmalik@redhat.com>
Link: https://lore.kernel.org/bpf/20241204-bpf-selftests-mod-compile-v5-1-b96231134a49@redhat.com
2024-12-06 10:44:10 -08:00

217 lines
4.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
long tmp;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
return (tmp >> 32) + tmp;
}
SEC("tc")
int kfunc_call_test2(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
}
SEC("tc")
int kfunc_call_test1(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
__u64 a = 1ULL << 32;
__u32 ret;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
ret = a >> 32; /* ret should be 2 */
ret += (__u32)a; /* ret should be 12 */
return ret;
}
SEC("tc")
int kfunc_call_test_ref_btf_id(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
if (pt->a != 42 || pt->b != 108)
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("tc")
int kfunc_call_test_pass(struct __sk_buff *skb)
{
struct prog_test_pass1 p1 = {};
struct prog_test_pass2 p2 = {};
short a = 0;
__u64 b = 0;
long c = 0;
char d = 0;
int e = 0;
bpf_kfunc_call_test_pass_ctx(skb);
bpf_kfunc_call_test_pass1(&p1);
bpf_kfunc_call_test_pass2(&p2);
bpf_kfunc_call_test_mem_len_pass1(&a, sizeof(a));
bpf_kfunc_call_test_mem_len_pass1(&b, sizeof(b));
bpf_kfunc_call_test_mem_len_pass1(&c, sizeof(c));
bpf_kfunc_call_test_mem_len_pass1(&d, sizeof(d));
bpf_kfunc_call_test_mem_len_pass1(&e, sizeof(e));
bpf_kfunc_call_test_mem_len_fail2(&b, -1);
return 0;
}
struct syscall_test_args {
__u8 data[16];
size_t size;
};
SEC("syscall")
int kfunc_syscall_test(struct syscall_test_args *args)
{
const long size = args->size;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
return 0;
}
SEC("syscall")
int kfunc_syscall_test_null(struct syscall_test_args *args)
{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, 0);
return 0;
}
SEC("tc")
int kfunc_call_test_get_mem(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
if (ret >= 0) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0]; /* 42 */
else
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("tc")
int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
{
u32 expected = 5, actual;
actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
return actual != expected ? -1 : 0;
}
struct ctx_val {
struct bpf_testmod_ctx __kptr *ctx;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct ctx_val);
} ctx_map SEC(".maps");
SEC("tc")
int kfunc_call_ctx(struct __sk_buff *skb)
{
struct bpf_testmod_ctx *ctx;
int err = 0;
ctx = bpf_testmod_ctx_create(&err);
if (!ctx && !err)
err = -1;
if (ctx) {
int key = 0;
struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
/* Transfer ctx to map to be freed via implicit dtor call
* on cleanup.
*/
if (ctx_val)
ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
if (ctx) {
bpf_testmod_ctx_release(ctx);
err = -1;
}
}
return err;
}
char _license[] SEC("license") = "GPL";