2023-07-27 18:13:04 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
#include "bpf_misc.h"
|
|
|
|
|
2023-08-24 09:50:01 +00:00
|
|
|
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
2023-09-19 12:09:11 +02:00
|
|
|
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
selftests/bpf: Enable cpu v4 tests for LoongArch
Enable the cpu v4 tests for LoongArch. Currently, we don't have BPF
trampoline in LoongArch JIT, so the fentry test `test_ptr_struct_arg`
still failed, will followup.
Test result attached below:
# ./test_progs -t verifier_sdiv,verifier_movsx,verifier_ldsx,verifier_gotol,verifier_bswap
#316/1 verifier_bswap/BSWAP, 16:OK
#316/2 verifier_bswap/BSWAP, 16 @unpriv:OK
#316/3 verifier_bswap/BSWAP, 32:OK
#316/4 verifier_bswap/BSWAP, 32 @unpriv:OK
#316/5 verifier_bswap/BSWAP, 64:OK
#316/6 verifier_bswap/BSWAP, 64 @unpriv:OK
#316 verifier_bswap:OK
#330/1 verifier_gotol/gotol, small_imm:OK
#330/2 verifier_gotol/gotol, small_imm @unpriv:OK
#330 verifier_gotol:OK
#338/1 verifier_ldsx/LDSX, S8:OK
#338/2 verifier_ldsx/LDSX, S8 @unpriv:OK
#338/3 verifier_ldsx/LDSX, S16:OK
#338/4 verifier_ldsx/LDSX, S16 @unpriv:OK
#338/5 verifier_ldsx/LDSX, S32:OK
#338/6 verifier_ldsx/LDSX, S32 @unpriv:OK
#338/7 verifier_ldsx/LDSX, S8 range checking, privileged:OK
#338/8 verifier_ldsx/LDSX, S16 range checking:OK
#338/9 verifier_ldsx/LDSX, S16 range checking @unpriv:OK
#338/10 verifier_ldsx/LDSX, S32 range checking:OK
#338/11 verifier_ldsx/LDSX, S32 range checking @unpriv:OK
#338 verifier_ldsx:OK
#349/1 verifier_movsx/MOV32SX, S8:OK
#349/2 verifier_movsx/MOV32SX, S8 @unpriv:OK
#349/3 verifier_movsx/MOV32SX, S16:OK
#349/4 verifier_movsx/MOV32SX, S16 @unpriv:OK
#349/5 verifier_movsx/MOV64SX, S8:OK
#349/6 verifier_movsx/MOV64SX, S8 @unpriv:OK
#349/7 verifier_movsx/MOV64SX, S16:OK
#349/8 verifier_movsx/MOV64SX, S16 @unpriv:OK
#349/9 verifier_movsx/MOV64SX, S32:OK
#349/10 verifier_movsx/MOV64SX, S32 @unpriv:OK
#349/11 verifier_movsx/MOV32SX, S8, range_check:OK
#349/12 verifier_movsx/MOV32SX, S8, range_check @unpriv:OK
#349/13 verifier_movsx/MOV32SX, S16, range_check:OK
#349/14 verifier_movsx/MOV32SX, S16, range_check @unpriv:OK
#349/15 verifier_movsx/MOV32SX, S16, range_check 2:OK
#349/16 verifier_movsx/MOV32SX, S16, range_check 2 @unpriv:OK
#349/17 verifier_movsx/MOV64SX, S8, range_check:OK
#349/18 verifier_movsx/MOV64SX, S8, range_check @unpriv:OK
#349/19 verifier_movsx/MOV64SX, S16, range_check:OK
#349/20 verifier_movsx/MOV64SX, S16, range_check @unpriv:OK
#349/21 verifier_movsx/MOV64SX, S32, range_check:OK
#349/22 verifier_movsx/MOV64SX, S32, range_check @unpriv:OK
#349/23 verifier_movsx/MOV64SX, S16, R10 Sign Extension:OK
#349/24 verifier_movsx/MOV64SX, S16, R10 Sign Extension @unpriv:OK
#349 verifier_movsx:OK
#361/1 verifier_sdiv/SDIV32, non-zero imm divisor, check 1:OK
#361/2 verifier_sdiv/SDIV32, non-zero imm divisor, check 1 @unpriv:OK
#361/3 verifier_sdiv/SDIV32, non-zero imm divisor, check 2:OK
#361/4 verifier_sdiv/SDIV32, non-zero imm divisor, check 2 @unpriv:OK
#361/5 verifier_sdiv/SDIV32, non-zero imm divisor, check 3:OK
#361/6 verifier_sdiv/SDIV32, non-zero imm divisor, check 3 @unpriv:OK
#361/7 verifier_sdiv/SDIV32, non-zero imm divisor, check 4:OK
#361/8 verifier_sdiv/SDIV32, non-zero imm divisor, check 4 @unpriv:OK
#361/9 verifier_sdiv/SDIV32, non-zero imm divisor, check 5:OK
#361/10 verifier_sdiv/SDIV32, non-zero imm divisor, check 5 @unpriv:OK
#361/11 verifier_sdiv/SDIV32, non-zero imm divisor, check 6:OK
#361/12 verifier_sdiv/SDIV32, non-zero imm divisor, check 6 @unpriv:OK
#361/13 verifier_sdiv/SDIV32, non-zero imm divisor, check 7:OK
#361/14 verifier_sdiv/SDIV32, non-zero imm divisor, check 7 @unpriv:OK
#361/15 verifier_sdiv/SDIV32, non-zero imm divisor, check 8:OK
#361/16 verifier_sdiv/SDIV32, non-zero imm divisor, check 8 @unpriv:OK
#361/17 verifier_sdiv/SDIV32, non-zero reg divisor, check 1:OK
#361/18 verifier_sdiv/SDIV32, non-zero reg divisor, check 1 @unpriv:OK
#361/19 verifier_sdiv/SDIV32, non-zero reg divisor, check 2:OK
#361/20 verifier_sdiv/SDIV32, non-zero reg divisor, check 2 @unpriv:OK
#361/21 verifier_sdiv/SDIV32, non-zero reg divisor, check 3:OK
#361/22 verifier_sdiv/SDIV32, non-zero reg divisor, check 3 @unpriv:OK
#361/23 verifier_sdiv/SDIV32, non-zero reg divisor, check 4:OK
#361/24 verifier_sdiv/SDIV32, non-zero reg divisor, check 4 @unpriv:OK
#361/25 verifier_sdiv/SDIV32, non-zero reg divisor, check 5:OK
#361/26 verifier_sdiv/SDIV32, non-zero reg divisor, check 5 @unpriv:OK
#361/27 verifier_sdiv/SDIV32, non-zero reg divisor, check 6:OK
#361/28 verifier_sdiv/SDIV32, non-zero reg divisor, check 6 @unpriv:OK
#361/29 verifier_sdiv/SDIV32, non-zero reg divisor, check 7:OK
#361/30 verifier_sdiv/SDIV32, non-zero reg divisor, check 7 @unpriv:OK
#361/31 verifier_sdiv/SDIV32, non-zero reg divisor, check 8:OK
#361/32 verifier_sdiv/SDIV32, non-zero reg divisor, check 8 @unpriv:OK
#361/33 verifier_sdiv/SDIV64, non-zero imm divisor, check 1:OK
#361/34 verifier_sdiv/SDIV64, non-zero imm divisor, check 1 @unpriv:OK
#361/35 verifier_sdiv/SDIV64, non-zero imm divisor, check 2:OK
#361/36 verifier_sdiv/SDIV64, non-zero imm divisor, check 2 @unpriv:OK
#361/37 verifier_sdiv/SDIV64, non-zero imm divisor, check 3:OK
#361/38 verifier_sdiv/SDIV64, non-zero imm divisor, check 3 @unpriv:OK
#361/39 verifier_sdiv/SDIV64, non-zero imm divisor, check 4:OK
#361/40 verifier_sdiv/SDIV64, non-zero imm divisor, check 4 @unpriv:OK
#361/41 verifier_sdiv/SDIV64, non-zero imm divisor, check 5:OK
#361/42 verifier_sdiv/SDIV64, non-zero imm divisor, check 5 @unpriv:OK
#361/43 verifier_sdiv/SDIV64, non-zero imm divisor, check 6:OK
#361/44 verifier_sdiv/SDIV64, non-zero imm divisor, check 6 @unpriv:OK
#361/45 verifier_sdiv/SDIV64, non-zero reg divisor, check 1:OK
#361/46 verifier_sdiv/SDIV64, non-zero reg divisor, check 1 @unpriv:OK
#361/47 verifier_sdiv/SDIV64, non-zero reg divisor, check 2:OK
#361/48 verifier_sdiv/SDIV64, non-zero reg divisor, check 2 @unpriv:OK
#361/49 verifier_sdiv/SDIV64, non-zero reg divisor, check 3:OK
#361/50 verifier_sdiv/SDIV64, non-zero reg divisor, check 3 @unpriv:OK
#361/51 verifier_sdiv/SDIV64, non-zero reg divisor, check 4:OK
#361/52 verifier_sdiv/SDIV64, non-zero reg divisor, check 4 @unpriv:OK
#361/53 verifier_sdiv/SDIV64, non-zero reg divisor, check 5:OK
#361/54 verifier_sdiv/SDIV64, non-zero reg divisor, check 5 @unpriv:OK
#361/55 verifier_sdiv/SDIV64, non-zero reg divisor, check 6:OK
#361/56 verifier_sdiv/SDIV64, non-zero reg divisor, check 6 @unpriv:OK
#361/57 verifier_sdiv/SMOD32, non-zero imm divisor, check 1:OK
#361/58 verifier_sdiv/SMOD32, non-zero imm divisor, check 1 @unpriv:OK
#361/59 verifier_sdiv/SMOD32, non-zero imm divisor, check 2:OK
#361/60 verifier_sdiv/SMOD32, non-zero imm divisor, check 2 @unpriv:OK
#361/61 verifier_sdiv/SMOD32, non-zero imm divisor, check 3:OK
#361/62 verifier_sdiv/SMOD32, non-zero imm divisor, check 3 @unpriv:OK
#361/63 verifier_sdiv/SMOD32, non-zero imm divisor, check 4:OK
#361/64 verifier_sdiv/SMOD32, non-zero imm divisor, check 4 @unpriv:OK
#361/65 verifier_sdiv/SMOD32, non-zero imm divisor, check 5:OK
#361/66 verifier_sdiv/SMOD32, non-zero imm divisor, check 5 @unpriv:OK
#361/67 verifier_sdiv/SMOD32, non-zero imm divisor, check 6:OK
#361/68 verifier_sdiv/SMOD32, non-zero imm divisor, check 6 @unpriv:OK
#361/69 verifier_sdiv/SMOD32, non-zero reg divisor, check 1:OK
#361/70 verifier_sdiv/SMOD32, non-zero reg divisor, check 1 @unpriv:OK
#361/71 verifier_sdiv/SMOD32, non-zero reg divisor, check 2:OK
#361/72 verifier_sdiv/SMOD32, non-zero reg divisor, check 2 @unpriv:OK
#361/73 verifier_sdiv/SMOD32, non-zero reg divisor, check 3:OK
#361/74 verifier_sdiv/SMOD32, non-zero reg divisor, check 3 @unpriv:OK
#361/75 verifier_sdiv/SMOD32, non-zero reg divisor, check 4:OK
#361/76 verifier_sdiv/SMOD32, non-zero reg divisor, check 4 @unpriv:OK
#361/77 verifier_sdiv/SMOD32, non-zero reg divisor, check 5:OK
#361/78 verifier_sdiv/SMOD32, non-zero reg divisor, check 5 @unpriv:OK
#361/79 verifier_sdiv/SMOD32, non-zero reg divisor, check 6:OK
#361/80 verifier_sdiv/SMOD32, non-zero reg divisor, check 6 @unpriv:OK
#361/81 verifier_sdiv/SMOD64, non-zero imm divisor, check 1:OK
#361/82 verifier_sdiv/SMOD64, non-zero imm divisor, check 1 @unpriv:OK
#361/83 verifier_sdiv/SMOD64, non-zero imm divisor, check 2:OK
#361/84 verifier_sdiv/SMOD64, non-zero imm divisor, check 2 @unpriv:OK
#361/85 verifier_sdiv/SMOD64, non-zero imm divisor, check 3:OK
#361/86 verifier_sdiv/SMOD64, non-zero imm divisor, check 3 @unpriv:OK
#361/87 verifier_sdiv/SMOD64, non-zero imm divisor, check 4:OK
#361/88 verifier_sdiv/SMOD64, non-zero imm divisor, check 4 @unpriv:OK
#361/89 verifier_sdiv/SMOD64, non-zero imm divisor, check 5:OK
#361/90 verifier_sdiv/SMOD64, non-zero imm divisor, check 5 @unpriv:OK
#361/91 verifier_sdiv/SMOD64, non-zero imm divisor, check 6:OK
#361/92 verifier_sdiv/SMOD64, non-zero imm divisor, check 6 @unpriv:OK
#361/93 verifier_sdiv/SMOD64, non-zero imm divisor, check 7:OK
#361/94 verifier_sdiv/SMOD64, non-zero imm divisor, check 7 @unpriv:OK
#361/95 verifier_sdiv/SMOD64, non-zero imm divisor, check 8:OK
#361/96 verifier_sdiv/SMOD64, non-zero imm divisor, check 8 @unpriv:OK
#361/97 verifier_sdiv/SMOD64, non-zero reg divisor, check 1:OK
#361/98 verifier_sdiv/SMOD64, non-zero reg divisor, check 1 @unpriv:OK
#361/99 verifier_sdiv/SMOD64, non-zero reg divisor, check 2:OK
#361/100 verifier_sdiv/SMOD64, non-zero reg divisor, check 2 @unpriv:OK
#361/101 verifier_sdiv/SMOD64, non-zero reg divisor, check 3:OK
#361/102 verifier_sdiv/SMOD64, non-zero reg divisor, check 3 @unpriv:OK
#361/103 verifier_sdiv/SMOD64, non-zero reg divisor, check 4:OK
#361/104 verifier_sdiv/SMOD64, non-zero reg divisor, check 4 @unpriv:OK
#361/105 verifier_sdiv/SMOD64, non-zero reg divisor, check 5:OK
#361/106 verifier_sdiv/SMOD64, non-zero reg divisor, check 5 @unpriv:OK
#361/107 verifier_sdiv/SMOD64, non-zero reg divisor, check 6:OK
#361/108 verifier_sdiv/SMOD64, non-zero reg divisor, check 6 @unpriv:OK
#361/109 verifier_sdiv/SMOD64, non-zero reg divisor, check 7:OK
#361/110 verifier_sdiv/SMOD64, non-zero reg divisor, check 7 @unpriv:OK
#361/111 verifier_sdiv/SMOD64, non-zero reg divisor, check 8:OK
#361/112 verifier_sdiv/SMOD64, non-zero reg divisor, check 8 @unpriv:OK
#361/113 verifier_sdiv/SDIV32, zero divisor:OK
#361/114 verifier_sdiv/SDIV32, zero divisor @unpriv:OK
#361/115 verifier_sdiv/SDIV64, zero divisor:OK
#361/116 verifier_sdiv/SDIV64, zero divisor @unpriv:OK
#361/117 verifier_sdiv/SMOD32, zero divisor:OK
#361/118 verifier_sdiv/SMOD32, zero divisor @unpriv:OK
#361/119 verifier_sdiv/SMOD64, zero divisor:OK
#361/120 verifier_sdiv/SMOD64, zero divisor @unpriv:OK
#361 verifier_sdiv:OK
Summary: 5/163 PASSED, 0 SKIPPED, 0 FAILED
# ./test_progs -t ldsx_insn
test_map_val_and_probed_memory:PASS:test_ldsx_insn__open 0 nsec
test_map_val_and_probed_memory:PASS:test_ldsx_insn__load 0 nsec
libbpf: prog 'test_ptr_struct_arg': failed to attach: ERROR: strerror_r(-524)=22
libbpf: prog 'test_ptr_struct_arg': failed to auto-attach: -524
test_map_val_and_probed_memory:FAIL:test_ldsx_insn__attach unexpected error: -524 (errno 524)
#116/1 ldsx_insn/map_val and probed_memory:FAIL
#116/2 ldsx_insn/ctx_member_sign_ext:OK
#116/3 ldsx_insn/ctx_member_narrow_sign_ext:OK
#116 ldsx_insn:FAIL
All error logs:
test_map_val_and_probed_memory:PASS:test_ldsx_insn__open 0 nsec
test_map_val_and_probed_memory:PASS:test_ldsx_insn__load 0 nsec
libbpf: prog 'test_ptr_struct_arg': failed to attach: ERROR: strerror_r(-524)=22
libbpf: prog 'test_ptr_struct_arg': failed to auto-attach: -524
test_map_val_and_probed_memory:FAIL:test_ldsx_insn__attach unexpected error: -524 (errno 524)
#116/1 ldsx_insn/map_val and probed_memory:FAIL
#116 ldsx_insn:FAIL
Summary: 0/2 PASSED, 0 SKIPPED, 1 FAILED
Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2023-11-08 14:12:21 +08:00
|
|
|
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
|
|
|
defined(__TARGET_ARCH_loongarch)) && \
|
2023-09-07 23:05:48 +00:00
|
|
|
__clang_major__ >= 18
|
2023-07-27 18:13:04 -07:00
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("LDSX, S8")
|
|
|
|
__success __success_unpriv __retval(-2)
|
|
|
|
__naked void ldsx_s8(void)
|
|
|
|
{
|
2023-09-19 12:09:05 +02:00
|
|
|
asm volatile (
|
|
|
|
"r1 = 0x3fe;"
|
|
|
|
"*(u64 *)(r10 - 8) = r1;"
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
"r0 = *(s8 *)(r10 - 8);"
|
|
|
|
#else
|
|
|
|
"r0 = *(s8 *)(r10 - 1);"
|
|
|
|
#endif
|
|
|
|
"exit;"
|
|
|
|
::: __clobber_all);
|
2023-07-27 18:13:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("LDSX, S16")
|
|
|
|
__success __success_unpriv __retval(-2)
|
|
|
|
__naked void ldsx_s16(void)
|
|
|
|
{
|
2023-09-19 12:09:05 +02:00
|
|
|
asm volatile (
|
|
|
|
"r1 = 0x3fffe;"
|
|
|
|
"*(u64 *)(r10 - 8) = r1;"
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
"r0 = *(s16 *)(r10 - 8);"
|
|
|
|
#else
|
|
|
|
"r0 = *(s16 *)(r10 - 2);"
|
|
|
|
#endif
|
|
|
|
"exit;"
|
|
|
|
::: __clobber_all);
|
2023-07-27 18:13:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("LDSX, S32")
|
|
|
|
__success __success_unpriv __retval(-1)
|
|
|
|
__naked void ldsx_s32(void)
|
|
|
|
{
|
2023-09-19 12:09:05 +02:00
|
|
|
asm volatile (
|
|
|
|
"r1 = 0xfffffffe;"
|
|
|
|
"*(u64 *)(r10 - 8) = r1;"
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
"r0 = *(s32 *)(r10 - 8);"
|
|
|
|
#else
|
|
|
|
"r0 = *(s32 *)(r10 - 4);"
|
|
|
|
#endif
|
|
|
|
"r0 >>= 1;"
|
|
|
|
"exit;"
|
|
|
|
::: __clobber_all);
|
2023-07-27 18:13:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("LDSX, S8 range checking, privileged")
|
|
|
|
__log_level(2) __success __retval(1)
|
bpf: Disambiguate SCALAR register state output in verifier logs
Currently the way that verifier prints SCALAR_VALUE register state (and
PTR_TO_PACKET, which can have var_off and ranges info as well) is very
ambiguous.
In the name of brevity we are trying to eliminate "unnecessary" output
of umin/umax, smin/smax, u32_min/u32_max, and s32_min/s32_max values, if
possible. Current rules are that if any of those have their default
value (which for mins is the minimal value of its respective types: 0,
S32_MIN, or S64_MIN, while for maxs it's U32_MAX, S32_MAX, S64_MAX, or
U64_MAX) *OR* if there is another min/max value that as matching value.
E.g., if smin=100 and umin=100, we'll emit only umin=10, omitting smin
altogether. This approach has a few problems, being both ambiguous and
sort-of incorrect in some cases.
Ambiguity is due to missing value could be either default value or value
of umin/umax or smin/smax. This is especially confusing when we mix
signed and unsigned ranges. Quite often, umin=0 and smin=0, and so we'll
have only `umin=0` leaving anyone reading verifier log to guess whether
smin is actually 0 or it's actually -9223372036854775808 (S64_MIN). And
often times it's important to know, especially when debugging tricky
issues.
"Sort-of incorrectness" comes from mixing negative and positive values.
E.g., if umin is some large positive number, it can be equal to smin
which is, interpreted as signed value, is actually some negative value.
Currently, that smin will be omitted and only umin will be emitted with
a large positive value, giving an impression that smin is also positive.
Anyway, ambiguity is the biggest issue making it impossible to have an
exact understanding of register state, preventing any sort of automated
testing of verifier state based on verifier log. This patch is
attempting to rectify the situation by removing ambiguity, while
minimizing the verboseness of register state output.
The rules are straightforward:
- if some of the values are missing, then it definitely has a default
value. I.e., `umin=0` means that umin is zero, but smin is actually
S64_MIN;
- all the various boundaries that happen to have the same value are
emitted in one equality separated sequence. E.g., if umin and smin are
both 100, we'll emit `smin=umin=100`, making this explicit;
- we do not mix negative and positive values together, and even if
they happen to have the same bit-level value, they will be emitted
separately with proper sign. I.e., if both umax and smax happen to be
0xffffffffffffffff, we'll emit them both separately as
`smax=-1,umax=18446744073709551615`;
- in the name of a bit more uniformity and consistency,
{u32,s32}_{min,max} are renamed to {s,u}{min,max}32, which seems to
improve readability.
The above means that in case of all 4 ranges being, say, [50, 100] range,
we'd previously see hugely ambiguous:
R1=scalar(umin=50,umax=100)
Now, we'll be more explicit:
R1=scalar(smin=umin=smin32=umin32=50,smax=umax=smax32=umax32=100)
This is slightly more verbose, but distinct from the case when we don't
know anything about signed boundaries and 32-bit boundaries, which under
new rules will match the old case:
R1=scalar(umin=50,umax=100)
Also, in the name of simplicity of implementation and consistency, order
for {s,u}32_{min,max} are emitted *before* var_off. Previously they were
emitted afterwards, for unclear reasons.
This patch also includes a few fixes to selftests that expect exact
register state to accommodate slight changes to verifier format. You can
see that the changes are pretty minimal in common cases.
Note, the special case when SCALAR_VALUE register is a known constant
isn't changed, we'll emit constant value once, interpreted as signed
value.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20231011223728.3188086-5-andrii@kernel.org
2023-10-11 15:37:27 -07:00
|
|
|
__msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)")
|
2023-07-27 18:13:04 -07:00
|
|
|
__naked void ldsx_s8_range_priv(void)
|
|
|
|
{
|
2023-09-19 12:09:05 +02:00
|
|
|
asm volatile (
|
|
|
|
"call %[bpf_get_prandom_u32];"
|
|
|
|
"*(u64 *)(r10 - 8) = r0;"
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
"r1 = *(s8 *)(r10 - 8);"
|
|
|
|
#else
|
|
|
|
"r1 = *(s8 *)(r10 - 1);"
|
|
|
|
#endif
|
|
|
|
/* r1 with s8 range */
|
|
|
|
"if r1 s> 0x7f goto l0_%=;"
|
|
|
|
"if r1 s< -0x80 goto l0_%=;"
|
|
|
|
"r0 = 1;"
|
|
|
|
"l1_%=:"
|
|
|
|
"exit;"
|
|
|
|
"l0_%=:"
|
|
|
|
"r0 = 2;"
|
|
|
|
"goto l1_%=;"
|
|
|
|
:
|
2023-07-27 18:13:04 -07:00
|
|
|
: __imm(bpf_get_prandom_u32)
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("LDSX, S16 range checking")
|
|
|
|
__success __success_unpriv __retval(1)
|
|
|
|
__naked void ldsx_s16_range(void)
|
|
|
|
{
|
2023-09-19 12:09:05 +02:00
|
|
|
asm volatile (
|
|
|
|
"call %[bpf_get_prandom_u32];"
|
|
|
|
"*(u64 *)(r10 - 8) = r0;"
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
"r1 = *(s16 *)(r10 - 8);"
|
|
|
|
#else
|
|
|
|
"r1 = *(s16 *)(r10 - 2);"
|
|
|
|
#endif
|
|
|
|
/* r1 with s16 range */
|
|
|
|
"if r1 s> 0x7fff goto l0_%=;"
|
|
|
|
"if r1 s< -0x8000 goto l0_%=;"
|
|
|
|
"r0 = 1;"
|
|
|
|
"l1_%=:"
|
|
|
|
"exit;"
|
|
|
|
"l0_%=:"
|
|
|
|
"r0 = 2;"
|
|
|
|
"goto l1_%=;"
|
|
|
|
:
|
2023-07-27 18:13:04 -07:00
|
|
|
: __imm(bpf_get_prandom_u32)
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("LDSX, S32 range checking")
|
|
|
|
__success __success_unpriv __retval(1)
|
|
|
|
__naked void ldsx_s32_range(void)
|
|
|
|
{
|
2023-09-19 12:09:05 +02:00
|
|
|
asm volatile (
|
|
|
|
"call %[bpf_get_prandom_u32];"
|
|
|
|
"*(u64 *)(r10 - 8) = r0;"
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
"r1 = *(s32 *)(r10 - 8);"
|
|
|
|
#else
|
|
|
|
"r1 = *(s32 *)(r10 - 4);"
|
|
|
|
#endif
|
|
|
|
/* r1 with s16 range */
|
|
|
|
"if r1 s> 0x7fffFFFF goto l0_%=;"
|
|
|
|
"if r1 s< -0x80000000 goto l0_%=;"
|
|
|
|
"r0 = 1;"
|
|
|
|
"l1_%=:"
|
|
|
|
"exit;"
|
|
|
|
"l0_%=:"
|
|
|
|
"r0 = 2;"
|
|
|
|
"goto l1_%=;"
|
|
|
|
:
|
2023-07-27 18:13:04 -07:00
|
|
|
: __imm(bpf_get_prandom_u32)
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
2024-07-23 08:34:44 -07:00
|
|
|
SEC("xdp")
|
|
|
|
__description("LDSX, xdp s32 xdp_md->data")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_1(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[xdp_md_data]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("xdp")
|
|
|
|
__description("LDSX, xdp s32 xdp_md->data_end")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_2(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[xdp_md_data_end]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("xdp")
|
|
|
|
__description("LDSX, xdp s32 xdp_md->data_meta")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_3(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tcx/ingress")
|
|
|
|
__description("LDSX, tcx s32 __sk_buff->data")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_4(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[sk_buff_data]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tcx/ingress")
|
|
|
|
__description("LDSX, tcx s32 __sk_buff->data_end")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_5(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tcx/ingress")
|
|
|
|
__description("LDSX, tcx s32 __sk_buff->data_meta")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_6(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("flow_dissector")
|
|
|
|
__description("LDSX, flow_dissector s32 __sk_buff->data")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_7(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[sk_buff_data]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("flow_dissector")
|
|
|
|
__description("LDSX, flow_dissector s32 __sk_buff->data_end")
|
|
|
|
__failure __msg("invalid bpf_context access")
|
|
|
|
__naked void ldsx_ctx_8(void)
|
|
|
|
{
|
|
|
|
asm volatile (
|
|
|
|
"r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
|
|
|
|
"r0 = 0;"
|
|
|
|
"exit;"
|
|
|
|
:
|
|
|
|
: __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
|
|
|
: __clobber_all);
|
|
|
|
}
|
|
|
|
|
2023-07-27 18:13:04 -07:00
|
|
|
#else
|
|
|
|
|
|
|
|
SEC("socket")
|
|
|
|
__description("cpuv4 is not supported by compiler or jit, use a dummy test")
|
|
|
|
__success
|
|
|
|
int dummy_test(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
char _license[] SEC("license") = "GPL";
|