linux/tools/testing/selftests/bpf/progs/exceptions.c
Alexei Starovoitov a8b242d77b bpf: Introduce "volatile compare" macros
Compilers optimize conditional operators at will, but often bpf programmers
want to force compilers to keep the same operator in asm as it's written in C.
Introduce bpf_cmp_likely/unlikely(var1, conditional_op, var2) macros that can be used as:

-               if (seen >= 1000)
+               if (bpf_cmp_unlikely(seen, >=, 1000))

The macros take advantage of BPF assembly that is C like.

The macros check the sign of variable 'seen' and emits either
signed or unsigned compare.

For example:
int a;
bpf_cmp_unlikely(a, >, 0) will be translated to 'if rX s> 0 goto' in BPF assembly.

unsigned int a;
bpf_cmp_unlikely(a, >, 0) will be translated to 'if rX > 0 goto' in BPF assembly.

C type conversions coupled with comparison operator are tricky.
  int i = -1;
  unsigned int j = 1;
  if (i < j) // this is false.

  long i = -1;
  unsigned int j = 1;
  if (i < j) // this is true.

Make sure BPF program is compiled with -Wsign-compare then the macros will catch
the mistake.

The macros check LHS (left hand side) only to figure out the sign of compare.

'if 0 < rX goto' is not allowed in the assembly, so the users
have to use a variable on LHS anyway.

The patch updates few tests to demonstrate the use of the macros.

The macro allows to use BPF_JSET in C code, since LLVM doesn't generate it at
present. For example:

if (i & j) compiles into r0 &= r1; if r0 == 0 goto

while

if (bpf_cmp_unlikely(i, &, j)) compiles into if r0 & r1 goto

Note that the macros has to be careful with RHS assembly predicate.
Since:
u64 __rhs = 1ull << 42;
asm goto("if r0 < %[rhs] goto +1" :: [rhs] "ri" (__rhs));
LLVM will silently truncate 64-bit constant into s32 imm.

Note that [lhs] "r"((short)LHS) the type cast is a workaround for LLVM issue.
When LHS is exactly 32-bit LLVM emits redundant <<=32, >>=32 to zero upper 32-bits.
When LHS is 64 or 16 or 8-bit variable there are no shifts.
When LHS is 32-bit the (u64) cast doesn't help. Hence use (short) cast.
It does _not_ truncate the variable before it's assigned to a register.

Traditional likely()/unlikely() macros that use __builtin_expect(!!(x), 1 or 0)
have no effect on these macros, hence macros implement the logic manually.
bpf_cmp_unlikely() macro preserves compare operator as-is while
bpf_cmp_likely() macro flips the compare.

Consider two cases:
A.
  for() {
    if (foo >= 10) {
      bar += foo;
    }
    other code;
  }

B.
  for() {
    if (foo >= 10)
       break;
    other code;
  }

It's ok to use either bpf_cmp_likely or bpf_cmp_unlikely macros in both cases,
but consider that 'break' is effectively 'goto out_of_the_loop'.
Hence it's better to use bpf_cmp_unlikely in the B case.
While 'bar += foo' is better to keep as 'fallthrough' == likely code path in the A case.

When it's written as:
A.
  for() {
    if (bpf_cmp_likely(foo, >=, 10)) {
      bar += foo;
    }
    other code;
  }

B.
  for() {
    if (bpf_cmp_unlikely(foo, >=, 10))
       break;
    other code;
  }

The assembly will look like:
A.
  for() {
    if r1 < 10 goto L1;
      bar += foo;
  L1:
    other code;
  }

B.
  for() {
    if r1 >= 10 goto L2;
    other code;
  }
  L2:

The bpf_cmp_likely vs bpf_cmp_unlikely changes basic block layout, hence it will
greatly influence the verification process. The number of processed instructions
will be different, since the verifier walks the fallthrough first.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/bpf/20231226191148.48536-3-alexei.starovoitov@gmail.com
2024-01-03 10:58:42 -08:00

368 lines
7.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_endian.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
#ifndef ETH_P_IP
#define ETH_P_IP 0x0800
#endif
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 4);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static __noinline int static_func(u64 i)
{
bpf_throw(32);
return i;
}
__noinline int global2static_simple(u64 i)
{
static_func(i + 2);
return i - 1;
}
__noinline int global2static(u64 i)
{
if (i == ETH_P_IP)
bpf_throw(16);
return static_func(i);
}
static __noinline int static2global(u64 i)
{
return global2static(i) + i;
}
SEC("tc")
int exception_throw_always_1(struct __sk_buff *ctx)
{
bpf_throw(64);
return 0;
}
/* In this case, the global func will never be seen executing after call to
* static subprog, hence verifier will DCE the remaining instructions. Ensure we
* are resilient to that.
*/
SEC("tc")
int exception_throw_always_2(struct __sk_buff *ctx)
{
return global2static_simple(ctx->protocol);
}
SEC("tc")
int exception_throw_unwind_1(struct __sk_buff *ctx)
{
return static2global(bpf_ntohs(ctx->protocol));
}
SEC("tc")
int exception_throw_unwind_2(struct __sk_buff *ctx)
{
return static2global(bpf_ntohs(ctx->protocol) - 1);
}
SEC("tc")
int exception_throw_default(struct __sk_buff *ctx)
{
bpf_throw(0);
return 1;
}
SEC("tc")
int exception_throw_default_value(struct __sk_buff *ctx)
{
bpf_throw(5);
return 1;
}
SEC("tc")
int exception_tail_call_target(struct __sk_buff *ctx)
{
bpf_throw(16);
return 0;
}
static __noinline
int exception_tail_call_subprog(struct __sk_buff *ctx)
{
volatile int ret = 10;
bpf_tail_call_static(ctx, &jmp_table, 0);
return ret;
}
SEC("tc")
int exception_tail_call(struct __sk_buff *ctx) {
volatile int ret = 0;
ret = exception_tail_call_subprog(ctx);
return ret + 8;
}
__noinline int exception_ext_global(struct __sk_buff *ctx)
{
volatile int ret = 0;
return ret;
}
static __noinline int exception_ext_static(struct __sk_buff *ctx)
{
return exception_ext_global(ctx);
}
SEC("tc")
int exception_ext(struct __sk_buff *ctx)
{
return exception_ext_static(ctx);
}
__noinline int exception_cb_mod_global(u64 cookie)
{
volatile int ret = 0;
return ret;
}
/* Example of how the exception callback supplied during verification can still
* introduce extensions by calling to dummy global functions, and alter runtime
* behavior.
*
* Right now we don't allow freplace attachment to exception callback itself,
* but if the need arises this restriction is technically feasible to relax in
* the future.
*/
__noinline int exception_cb_mod(u64 cookie)
{
return exception_cb_mod_global(cookie) + cookie + 10;
}
SEC("tc")
__exception_cb(exception_cb_mod)
int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
{
bpf_throw(25);
return 0;
}
__noinline static int subprog(struct __sk_buff *ctx)
{
return bpf_ktime_get_ns();
}
__noinline static int throwing_subprog(struct __sk_buff *ctx)
{
if (ctx->tstamp)
bpf_throw(0);
return bpf_ktime_get_ns();
}
__noinline int global_subprog(struct __sk_buff *ctx)
{
return bpf_ktime_get_ns();
}
__noinline int throwing_global_subprog(struct __sk_buff *ctx)
{
if (ctx->tstamp)
bpf_throw(0);
return bpf_ktime_get_ns();
}
SEC("tc")
int exception_throw_subprog(struct __sk_buff *ctx)
{
switch (ctx->protocol) {
case 1:
return subprog(ctx);
case 2:
return global_subprog(ctx);
case 3:
return throwing_subprog(ctx);
case 4:
return throwing_global_subprog(ctx);
default:
break;
}
bpf_throw(1);
return 0;
}
__noinline int assert_nz_gfunc(u64 c)
{
volatile u64 cookie = c;
bpf_assert(cookie != 0);
return 0;
}
__noinline int assert_zero_gfunc(u64 c)
{
volatile u64 cookie = c;
bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
return 0;
}
__noinline int assert_neg_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
return 0;
}
__noinline int assert_pos_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
return 0;
}
__noinline int assert_negeq_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
return 0;
}
__noinline int assert_poseq_gfunc(s64 c)
{
volatile s64 cookie = c;
bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
return 0;
}
__noinline int assert_nz_gfunc_with(u64 c)
{
volatile u64 cookie = c;
bpf_assert_with(cookie != 0, cookie + 100);
return 0;
}
__noinline int assert_zero_gfunc_with(u64 c)
{
volatile u64 cookie = c;
bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
return 0;
}
__noinline int assert_neg_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
return 0;
}
__noinline int assert_pos_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
return 0;
}
__noinline int assert_negeq_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
return 0;
}
__noinline int assert_poseq_gfunc_with(s64 c)
{
volatile s64 cookie = c;
bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
return 0;
}
#define check_assert(name, cookie, tag) \
SEC("tc") \
int exception##tag##name(struct __sk_buff *ctx) \
{ \
return name(cookie) + 1; \
}
check_assert(assert_nz_gfunc, 5, _);
check_assert(assert_zero_gfunc, 0, _);
check_assert(assert_neg_gfunc, -100, _);
check_assert(assert_pos_gfunc, 100, _);
check_assert(assert_negeq_gfunc, -1, _);
check_assert(assert_poseq_gfunc, 1, _);
check_assert(assert_nz_gfunc_with, 5, _);
check_assert(assert_zero_gfunc_with, 0, _);
check_assert(assert_neg_gfunc_with, -100, _);
check_assert(assert_pos_gfunc_with, 100, _);
check_assert(assert_negeq_gfunc_with, -1, _);
check_assert(assert_poseq_gfunc_with, 1, _);
check_assert(assert_nz_gfunc, 0, _bad_);
check_assert(assert_zero_gfunc, 5, _bad_);
check_assert(assert_neg_gfunc, 100, _bad_);
check_assert(assert_pos_gfunc, -100, _bad_);
check_assert(assert_negeq_gfunc, 1, _bad_);
check_assert(assert_poseq_gfunc, -1, _bad_);
check_assert(assert_nz_gfunc_with, 0, _bad_);
check_assert(assert_zero_gfunc_with, 5, _bad_);
check_assert(assert_neg_gfunc_with, 100, _bad_);
check_assert(assert_pos_gfunc_with, -100, _bad_);
check_assert(assert_negeq_gfunc_with, 1, _bad_);
check_assert(assert_poseq_gfunc_with, -1, _bad_);
SEC("tc")
int exception_assert_range(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range(time, 0, ~0ULL);
return 1;
}
SEC("tc")
int exception_assert_range_with(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range_with(time, 0, ~0ULL, 10);
return 1;
}
SEC("tc")
int exception_bad_assert_range(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range(time, -100, 100);
return 1;
}
SEC("tc")
int exception_bad_assert_range_with(struct __sk_buff *ctx)
{
u64 time = bpf_ktime_get_ns();
bpf_assert_range_with(time, -1000, 1000, 10);
return 1;
}
char _license[] SEC("license") = "GPL";