2021-10-02 06:47:57 +05:30
|
|
|
{
|
|
|
|
"calls: invalid kfunc call not eliminated",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "invalid kernel function call not eliminated in verifier pass",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid kfunc call unreachable",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
2022-01-14 22:09:52 +05:30
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_fail1", 2 },
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_fail2", 2 },
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: ptr_to_mem to struct with FAM",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_fail3", 2 },
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: reg->type != PTR_TO_CTX",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
2024-09-09 16:39:09 +03:00
|
|
|
.errstr = "arg#0 expected pointer to ctx, but got fp",
|
2022-01-14 22:09:52 +05:30
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_pass_ctx", 2 },
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "arg#0 pointer type UNKNOWN must point to scalar",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_mem_len_fail1", 2 },
|
|
|
|
},
|
|
|
|
},
|
2022-02-20 08:01:38 +05:30
|
|
|
{
|
|
|
|
"calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
bpf: Treat KF_RELEASE kfuncs as KF_TRUSTED_ARGS
KF_RELEASE kfuncs are not currently treated as having KF_TRUSTED_ARGS,
even though they have a superset of the requirements of KF_TRUSTED_ARGS.
Like KF_TRUSTED_ARGS, KF_RELEASE kfuncs require a 0-offset argument, and
don't allow NULL-able arguments. Unlike KF_TRUSTED_ARGS which require
_either_ an argument with ref_obj_id > 0, _or_ (ref->type &
BPF_REG_TRUSTED_MODIFIERS) (and no unsafe modifiers allowed), KF_RELEASE
only allows for ref_obj_id > 0. Because KF_RELEASE today doesn't
automatically imply KF_TRUSTED_ARGS, some of these requirements are
enforced in different ways that can make the behavior of the verifier
feel unpredictable. For example, a KF_RELEASE kfunc with a NULL-able
argument will currently fail in the verifier with a message like, "arg#0
is ptr_or_null_ expected ptr_ or socket" rather than "Possibly NULL
pointer passed to trusted arg0". Our intention is the same, but the
semantics are different due to implemenetation details that kfunc authors
and BPF program writers should not need to care about.
Let's make the behavior of the verifier more consistent and intuitive by
having KF_RELEASE kfuncs imply the presence of KF_TRUSTED_ARGS. Our
eventual goal is to have all kfuncs assume KF_TRUSTED_ARGS by default
anyways, so this takes us a step in that direction.
Note that it does not make sense to assume KF_TRUSTED_ARGS for all
KF_ACQUIRE kfuncs. KF_ACQUIRE kfuncs can have looser semantics than
KF_RELEASE, with e.g. KF_RCU | KF_RET_NULL. We may want to have
KF_ACQUIRE imply KF_TRUSTED_ARGS _unless_ KF_RCU is specified, but that
can be left to another patch set, and there are no such subtleties to
address for KF_RELEASE.
Signed-off-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230325213144.486885-4-void@manifault.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-25 16:31:46 -05:00
|
|
|
.errstr = "Possibly NULL pointer passed to trusted arg0",
|
2022-02-20 08:01:38 +05:30
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_acquire", 3 },
|
|
|
|
{ "bpf_kfunc_call_test_release", 5 },
|
|
|
|
},
|
|
|
|
},
|
2022-03-05 04:16:45 +05:30
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "R1 must have zero offset when passed to release func",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_acquire", 3 },
|
|
|
|
{ "bpf_kfunc_call_memb_release", 8 },
|
|
|
|
},
|
|
|
|
},
|
2022-04-25 03:19:01 +05:30
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: don't match first member type when passed to release kfunc",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_memb_acquire", 1 },
|
|
|
|
{ "bpf_kfunc_call_memb1_release", 5 },
|
|
|
|
},
|
|
|
|
},
|
2022-03-05 04:16:45 +05:30
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
bpf: Treat KF_RELEASE kfuncs as KF_TRUSTED_ARGS
KF_RELEASE kfuncs are not currently treated as having KF_TRUSTED_ARGS,
even though they have a superset of the requirements of KF_TRUSTED_ARGS.
Like KF_TRUSTED_ARGS, KF_RELEASE kfuncs require a 0-offset argument, and
don't allow NULL-able arguments. Unlike KF_TRUSTED_ARGS which require
_either_ an argument with ref_obj_id > 0, _or_ (ref->type &
BPF_REG_TRUSTED_MODIFIERS) (and no unsafe modifiers allowed), KF_RELEASE
only allows for ref_obj_id > 0. Because KF_RELEASE today doesn't
automatically imply KF_TRUSTED_ARGS, some of these requirements are
enforced in different ways that can make the behavior of the verifier
feel unpredictable. For example, a KF_RELEASE kfunc with a NULL-able
argument will currently fail in the verifier with a message like, "arg#0
is ptr_or_null_ expected ptr_ or socket" rather than "Possibly NULL
pointer passed to trusted arg0". Our intention is the same, but the
semantics are different due to implemenetation details that kfunc authors
and BPF program writers should not need to care about.
Let's make the behavior of the verifier more consistent and intuitive by
having KF_RELEASE kfuncs imply the presence of KF_TRUSTED_ARGS. Our
eventual goal is to have all kfuncs assume KF_TRUSTED_ARGS by default
anyways, so this takes us a step in that direction.
Note that it does not make sense to assume KF_TRUSTED_ARGS for all
KF_ACQUIRE kfuncs. KF_ACQUIRE kfuncs can have looser semantics than
KF_RELEASE, with e.g. KF_RCU | KF_RET_NULL. We may want to have
KF_ACQUIRE imply KF_TRUSTED_ARGS _unless_ KF_RCU is specified, but that
can be left to another patch set, and there are no such subtleties to
address for KF_RELEASE.
Signed-off-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230325213144.486885-4-void@manifault.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-25 16:31:46 -05:00
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
2022-03-05 04:16:45 +05:30
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
bpf: Treat KF_RELEASE kfuncs as KF_TRUSTED_ARGS
KF_RELEASE kfuncs are not currently treated as having KF_TRUSTED_ARGS,
even though they have a superset of the requirements of KF_TRUSTED_ARGS.
Like KF_TRUSTED_ARGS, KF_RELEASE kfuncs require a 0-offset argument, and
don't allow NULL-able arguments. Unlike KF_TRUSTED_ARGS which require
_either_ an argument with ref_obj_id > 0, _or_ (ref->type &
BPF_REG_TRUSTED_MODIFIERS) (and no unsafe modifiers allowed), KF_RELEASE
only allows for ref_obj_id > 0. Because KF_RELEASE today doesn't
automatically imply KF_TRUSTED_ARGS, some of these requirements are
enforced in different ways that can make the behavior of the verifier
feel unpredictable. For example, a KF_RELEASE kfunc with a NULL-able
argument will currently fail in the verifier with a message like, "arg#0
is ptr_or_null_ expected ptr_ or socket" rather than "Possibly NULL
pointer passed to trusted arg0". Our intention is the same, but the
semantics are different due to implemenetation details that kfunc authors
and BPF program writers should not need to care about.
Let's make the behavior of the verifier more consistent and intuitive by
having KF_RELEASE kfuncs imply the presence of KF_TRUSTED_ARGS. Our
eventual goal is to have all kfuncs assume KF_TRUSTED_ARGS by default
anyways, so this takes us a step in that direction.
Note that it does not make sense to assume KF_TRUSTED_ARGS for all
KF_ACQUIRE kfuncs. KF_ACQUIRE kfuncs can have looser semantics than
KF_RELEASE, with e.g. KF_RCU | KF_RET_NULL. We may want to have
KF_ACQUIRE imply KF_TRUSTED_ARGS _unless_ KF_RCU is specified, but that
can be left to another patch set, and there are no such subtleties to
address for KF_RELEASE.
Signed-off-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230325213144.486885-4-void@manifault.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-25 16:31:46 -05:00
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
2022-03-05 04:16:45 +05:30
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_acquire", 3 },
|
bpf: Treat KF_RELEASE kfuncs as KF_TRUSTED_ARGS
KF_RELEASE kfuncs are not currently treated as having KF_TRUSTED_ARGS,
even though they have a superset of the requirements of KF_TRUSTED_ARGS.
Like KF_TRUSTED_ARGS, KF_RELEASE kfuncs require a 0-offset argument, and
don't allow NULL-able arguments. Unlike KF_TRUSTED_ARGS which require
_either_ an argument with ref_obj_id > 0, _or_ (ref->type &
BPF_REG_TRUSTED_MODIFIERS) (and no unsafe modifiers allowed), KF_RELEASE
only allows for ref_obj_id > 0. Because KF_RELEASE today doesn't
automatically imply KF_TRUSTED_ARGS, some of these requirements are
enforced in different ways that can make the behavior of the verifier
feel unpredictable. For example, a KF_RELEASE kfunc with a NULL-able
argument will currently fail in the verifier with a message like, "arg#0
is ptr_or_null_ expected ptr_ or socket" rather than "Possibly NULL
pointer passed to trusted arg0". Our intention is the same, but the
semantics are different due to implemenetation details that kfunc authors
and BPF program writers should not need to care about.
Let's make the behavior of the verifier more consistent and intuitive by
having KF_RELEASE kfuncs imply the presence of KF_TRUSTED_ARGS. Our
eventual goal is to have all kfuncs assume KF_TRUSTED_ARGS by default
anyways, so this takes us a step in that direction.
Note that it does not make sense to assume KF_TRUSTED_ARGS for all
KF_ACQUIRE kfuncs. KF_ACQUIRE kfuncs can have looser semantics than
KF_RELEASE, with e.g. KF_RCU | KF_RET_NULL. We may want to have
KF_ACQUIRE imply KF_TRUSTED_ARGS _unless_ KF_RCU is specified, but that
can be left to another patch set, and there are no such subtleties to
address for KF_RELEASE.
Signed-off-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230325213144.486885-4-void@manifault.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-03-25 16:31:46 -05:00
|
|
|
{ "bpf_kfunc_call_test_offset", 9 },
|
|
|
|
{ "bpf_kfunc_call_test_release", 12 },
|
2022-03-05 04:16:45 +05:30
|
|
|
},
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = REJECT,
|
2023-03-02 20:14:46 -08:00
|
|
|
.errstr = "ptr R1 off=-4 disallowed",
|
2022-03-05 04:16:45 +05:30
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_acquire", 3 },
|
|
|
|
{ "bpf_kfunc_call_test_release", 9 },
|
|
|
|
{ "bpf_kfunc_call_test_release", 13 },
|
|
|
|
{ "bpf_kfunc_call_test_release", 17 },
|
|
|
|
},
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
|
|
|
|
},
|
2022-07-21 15:42:42 +02:00
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_acquire", 3 },
|
|
|
|
{ "bpf_kfunc_call_test_ref", 8 },
|
|
|
|
{ "bpf_kfunc_call_test_ref", 10 },
|
|
|
|
},
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = REJECT,
|
2023-03-02 20:14:43 -08:00
|
|
|
.errstr = "R1 must be",
|
2022-07-21 15:42:42 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_kfunc_call_test_acquire", 3 },
|
|
|
|
{ "bpf_kfunc_call_test_ref", 8 },
|
|
|
|
{ "bpf_kfunc_call_test_release", 10 },
|
|
|
|
},
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
2024-07-11 22:58:19 +08:00
|
|
|
{
|
|
|
|
"calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_EXT,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "Tracing programs must provide btf_id",
|
|
|
|
.fixup_kfunc_btf_id = {
|
|
|
|
{ "bpf_dynptr_from_skb", 0 },
|
|
|
|
},
|
|
|
|
},
|
2019-01-25 15:24:44 -08:00
|
|
|
{
|
|
|
|
"calls: basic sanity",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
2022-09-28 23:15:55 +01:00
|
|
|
"calls: not on unprivileged",
|
2019-01-25 15:24:44 -08:00
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-24 18:51:42 -07:00
|
|
|
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: div by 0 in subprog",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV32_IMM(BPF_REG_2, 0),
|
|
|
|
BPF_MOV32_IMM(BPF_REG_3, 1),
|
|
|
|
BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: multiple ret types in subprog 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_MOV32_IMM(BPF_REG_0, 42),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = REJECT,
|
2022-03-01 14:27:45 -08:00
|
|
|
.errstr = "R0 invalid mem access 'scalar'",
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: multiple ret types in subprog 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_map_hash_8b = { 16 },
|
|
|
|
.result = REJECT,
|
bpf: Implement BPF ring buffer and verifier support for it
This commit adds a new MPSC ring buffer implementation into BPF ecosystem,
which allows multiple CPUs to submit data to a single shared ring buffer. On
the consumption side, only single consumer is assumed.
Motivation
----------
There are two distinctive motivators for this work, which are not satisfied by
existing perf buffer, which prompted creation of a new ring buffer
implementation.
- more efficient memory utilization by sharing ring buffer across CPUs;
- preserving ordering of events that happen sequentially in time, even
across multiple CPUs (e.g., fork/exec/exit events for a task).
These two problems are independent, but perf buffer fails to satisfy both.
Both are a result of a choice to have per-CPU perf ring buffer. Both can be
also solved by having an MPSC implementation of ring buffer. The ordering
problem could technically be solved for perf buffer with some in-kernel
counting, but given the first one requires an MPSC buffer, the same solution
would solve the second problem automatically.
Semantics and APIs
------------------
Single ring buffer is presented to BPF programs as an instance of BPF map of
type BPF_MAP_TYPE_RINGBUF. Two other alternatives considered, but ultimately
rejected.
One way would be to, similar to BPF_MAP_TYPE_PERF_EVENT_ARRAY, make
BPF_MAP_TYPE_RINGBUF could represent an array of ring buffers, but not enforce
"same CPU only" rule. This would be more familiar interface compatible with
existing perf buffer use in BPF, but would fail if application needed more
advanced logic to lookup ring buffer by arbitrary key. HASH_OF_MAPS addresses
this with current approach. Additionally, given the performance of BPF
ringbuf, many use cases would just opt into a simple single ring buffer shared
among all CPUs, for which current approach would be an overkill.
Another approach could introduce a new concept, alongside BPF map, to
represent generic "container" object, which doesn't necessarily have key/value
interface with lookup/update/delete operations. This approach would add a lot
of extra infrastructure that has to be built for observability and verifier
support. It would also add another concept that BPF developers would have to
familiarize themselves with, new syntax in libbpf, etc. But then would really
provide no additional benefits over the approach of using a map.
BPF_MAP_TYPE_RINGBUF doesn't support lookup/update/delete operations, but so
doesn't few other map types (e.g., queue and stack; array doesn't support
delete, etc).
The approach chosen has an advantage of re-using existing BPF map
infrastructure (introspection APIs in kernel, libbpf support, etc), being
familiar concept (no need to teach users a new type of object in BPF program),
and utilizing existing tooling (bpftool). For common scenario of using
a single ring buffer for all CPUs, it's as simple and straightforward, as
would be with a dedicated "container" object. On the other hand, by being
a map, it can be combined with ARRAY_OF_MAPS and HASH_OF_MAPS map-in-maps to
implement a wide variety of topologies, from one ring buffer for each CPU
(e.g., as a replacement for perf buffer use cases), to a complicated
application hashing/sharding of ring buffers (e.g., having a small pool of
ring buffers with hashed task's tgid being a look up key to preserve order,
but reduce contention).
Key and value sizes are enforced to be zero. max_entries is used to specify
the size of ring buffer and has to be a power of 2 value.
There are a bunch of similarities between perf buffer
(BPF_MAP_TYPE_PERF_EVENT_ARRAY) and new BPF ring buffer semantics:
- variable-length records;
- if there is no more space left in ring buffer, reservation fails, no
blocking;
- memory-mappable data area for user-space applications for ease of
consumption and high performance;
- epoll notifications for new incoming data;
- but still the ability to do busy polling for new data to achieve the
lowest latency, if necessary.
BPF ringbuf provides two sets of APIs to BPF programs:
- bpf_ringbuf_output() allows to *copy* data from one place to a ring
buffer, similarly to bpf_perf_event_output();
- bpf_ringbuf_reserve()/bpf_ringbuf_commit()/bpf_ringbuf_discard() APIs
split the whole process into two steps. First, a fixed amount of space is
reserved. If successful, a pointer to a data inside ring buffer data area
is returned, which BPF programs can use similarly to a data inside
array/hash maps. Once ready, this piece of memory is either committed or
discarded. Discard is similar to commit, but makes consumer ignore the
record.
bpf_ringbuf_output() has disadvantage of incurring extra memory copy, because
record has to be prepared in some other place first. But it allows to submit
records of the length that's not known to verifier beforehand. It also closely
matches bpf_perf_event_output(), so will simplify migration significantly.
bpf_ringbuf_reserve() avoids the extra copy of memory by providing a memory
pointer directly to ring buffer memory. In a lot of cases records are larger
than BPF stack space allows, so many programs have use extra per-CPU array as
a temporary heap for preparing sample. bpf_ringbuf_reserve() avoid this needs
completely. But in exchange, it only allows a known constant size of memory to
be reserved, such that verifier can verify that BPF program can't access
memory outside its reserved record space. bpf_ringbuf_output(), while slightly
slower due to extra memory copy, covers some use cases that are not suitable
for bpf_ringbuf_reserve().
The difference between commit and discard is very small. Discard just marks
a record as discarded, and such records are supposed to be ignored by consumer
code. Discard is useful for some advanced use-cases, such as ensuring
all-or-nothing multi-record submission, or emulating temporary malloc()/free()
within single BPF program invocation.
Each reserved record is tracked by verifier through existing
reference-tracking logic, similar to socket ref-tracking. It is thus
impossible to reserve a record, but forget to submit (or discard) it.
bpf_ringbuf_query() helper allows to query various properties of ring buffer.
Currently 4 are supported:
- BPF_RB_AVAIL_DATA returns amount of unconsumed data in ring buffer;
- BPF_RB_RING_SIZE returns the size of ring buffer;
- BPF_RB_CONS_POS/BPF_RB_PROD_POS returns current logical possition of
consumer/producer, respectively.
Returned values are momentarily snapshots of ring buffer state and could be
off by the time helper returns, so this should be used only for
debugging/reporting reasons or for implementing various heuristics, that take
into account highly-changeable nature of some of those characteristics.
One such heuristic might involve more fine-grained control over poll/epoll
notifications about new data availability in ring buffer. Together with
BPF_RB_NO_WAKEUP/BPF_RB_FORCE_WAKEUP flags for output/commit/discard helpers,
it allows BPF program a high degree of control and, e.g., more efficient
batched notifications. Default self-balancing strategy, though, should be
adequate for most applications and will work reliable and efficiently already.
Design and implementation
-------------------------
This reserve/commit schema allows a natural way for multiple producers, either
on different CPUs or even on the same CPU/in the same BPF program, to reserve
independent records and work with them without blocking other producers. This
means that if BPF program was interruped by another BPF program sharing the
same ring buffer, they will both get a record reserved (provided there is
enough space left) and can work with it and submit it independently. This
applies to NMI context as well, except that due to using a spinlock during
reservation, in NMI context, bpf_ringbuf_reserve() might fail to get a lock,
in which case reservation will fail even if ring buffer is not full.
The ring buffer itself internally is implemented as a power-of-2 sized
circular buffer, with two logical and ever-increasing counters (which might
wrap around on 32-bit architectures, that's not a problem):
- consumer counter shows up to which logical position consumer consumed the
data;
- producer counter denotes amount of data reserved by all producers.
Each time a record is reserved, producer that "owns" the record will
successfully advance producer counter. At that point, data is still not yet
ready to be consumed, though. Each record has 8 byte header, which contains
the length of reserved record, as well as two extra bits: busy bit to denote
that record is still being worked on, and discard bit, which might be set at
commit time if record is discarded. In the latter case, consumer is supposed
to skip the record and move on to the next one. Record header also encodes
record's relative offset from the beginning of ring buffer data area (in
pages). This allows bpf_ringbuf_commit()/bpf_ringbuf_discard() to accept only
the pointer to the record itself, without requiring also the pointer to ring
buffer itself. Ring buffer memory location will be restored from record
metadata header. This significantly simplifies verifier, as well as improving
API usability.
Producer counter increments are serialized under spinlock, so there is
a strict ordering between reservations. Commits, on the other hand, are
completely lockless and independent. All records become available to consumer
in the order of reservations, but only after all previous records where
already committed. It is thus possible for slow producers to temporarily hold
off submitted records, that were reserved later.
Reservation/commit/consumer protocol is verified by litmus tests in
Documentation/litmus-test/bpf-rb.
One interesting implementation bit, that significantly simplifies (and thus
speeds up as well) implementation of both producers and consumers is how data
area is mapped twice contiguously back-to-back in the virtual memory. This
allows to not take any special measures for samples that have to wrap around
at the end of the circular buffer data area, because the next page after the
last data page would be first data page again, and thus the sample will still
appear completely contiguous in virtual memory. See comment and a simple ASCII
diagram showing this visually in bpf_ringbuf_area_alloc().
Another feature that distinguishes BPF ringbuf from perf ring buffer is
a self-pacing notifications of new data being availability.
bpf_ringbuf_commit() implementation will send a notification of new record
being available after commit only if consumer has already caught up right up
to the record being committed. If not, consumer still has to catch up and thus
will see new data anyways without needing an extra poll notification.
Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbuf.c) show that
this allows to achieve a very high throughput without having to resort to
tricks like "notify only every Nth sample", which are necessary with perf
buffer. For extreme cases, when BPF program wants more manual control of
notifications, commit/discard/output helpers accept BPF_RB_NO_WAKEUP and
BPF_RB_FORCE_WAKEUP flags, which give full control over notifications of data
availability, but require extra caution and diligence in using this API.
Comparison to alternatives
--------------------------
Before considering implementing BPF ring buffer from scratch existing
alternatives in kernel were evaluated, but didn't seem to meet the needs. They
largely fell into few categores:
- per-CPU buffers (perf, ftrace, etc), which don't satisfy two motivations
outlined above (ordering and memory consumption);
- linked list-based implementations; while some were multi-producer designs,
consuming these from user-space would be very complicated and most
probably not performant; memory-mapping contiguous piece of memory is
simpler and more performant for user-space consumers;
- io_uring is SPSC, but also requires fixed-sized elements. Naively turning
SPSC queue into MPSC w/ lock would have subpar performance compared to
locked reserve + lockless commit, as with BPF ring buffer. Fixed sized
elements would be too limiting for BPF programs, given existing BPF
programs heavily rely on variable-sized perf buffer already;
- specialized implementations (like a new printk ring buffer, [0]) with lots
of printk-specific limitations and implications, that didn't seem to fit
well for intended use with BPF programs.
[0] https://lwn.net/Articles/779550/
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200529075424.3139988-2-andriin@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2020-05-29 00:54:20 -07:00
|
|
|
.errstr = "R0 min value is outside of the allowed memory range",
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: overlapping caller/callee",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "last insn is not an exit or jmp",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: wrong recursive calls",
|
|
|
|
.insns = {
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "jump out of range",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: wrong src reg",
|
|
|
|
.insns = {
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-24 18:51:42 -07:00
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
|
2019-01-25 15:24:44 -08:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "BPF_CALL uses reserved fields",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: wrong off value",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "BPF_CALL uses reserved fields",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: jump back loop",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
bpf: fix control-flow graph checking in privileged mode
When BPF program is verified in privileged mode, BPF verifier allows
bounded loops. This means that from CFG point of view there are
definitely some back-edges. Original commit adjusted check_cfg() logic
to not detect back-edges in control flow graph if they are resulting
from conditional jumps, which the idea that subsequent full BPF
verification process will determine whether such loops are bounded or
not, and either accept or reject the BPF program. At least that's my
reading of the intent.
Unfortunately, the implementation of this idea doesn't work correctly in
all possible situations. Conditional jump might not result in immediate
back-edge, but just a few unconditional instructions later we can arrive
at back-edge. In such situations check_cfg() would reject BPF program
even in privileged mode, despite it might be bounded loop. Next patch
adds one simple program demonstrating such scenario.
To keep things simple, instead of trying to detect back edges in
privileged mode, just assume every back edge is valid and let subsequent
BPF verification prove or reject bounded loops.
Note a few test changes. For unknown reason, we have a few tests that
are specified to detect a back-edge in a privileged mode, but looking at
their code it seems like the right outcome is passing check_cfg() and
letting subsequent verification to make a decision about bounded or not
bounded looping.
Bounded recursion case is also interesting. The example should pass, as
recursion is limited to just a few levels and so we never reach maximum
number of nested frames and never exhaust maximum stack depth. But the
way that max stack depth logic works today it falsely detects this as
exceeding max nested frame count. This patch series doesn't attempt to
fix this orthogonal problem, so we just adjust expected verifier failure.
Suggested-by: Alexei Starovoitov <ast@kernel.org>
Fixes: 2589726d12a1 ("bpf: introduce bounded loops")
Reported-by: Hao Sun <sunhao.th@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231110061412.2995786-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-09 22:14:10 -08:00
|
|
|
.errstr = "the call stack of 9 frames is too deep",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: conditional call",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "jump out of range",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: conditional call 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: conditional call 3",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
|
|
|
|
},
|
2019-06-15 12:12:22 -07:00
|
|
|
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
|
|
|
|
.errstr_unpriv = "back-edge from insn",
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 1,
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: conditional call 4",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: conditional call 5",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
2019-06-15 12:12:22 -07:00
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 1,
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: conditional call 6",
|
|
|
|
.insns = {
|
2019-06-15 12:12:22 -07:00
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
2019-01-25 15:24:44 -08:00
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
2019-06-15 12:12:22 -07:00
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
|
2019-01-25 15:24:44 -08:00
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
2019-06-15 12:12:22 -07:00
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "infinite loop detected",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: using r0 returned by callee",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: using uninit r0 from callee",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "!read_ok",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: callee is using r1",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = TEST_DATA_LEN,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: callee using args1",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
2020-05-13 16:03:55 -07:00
|
|
|
.errstr_unpriv = "allowed for",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = POINTER_VALUE,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: callee using wrong args2",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "R2 !read_ok",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: callee using two args",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
2020-05-13 16:03:55 -07:00
|
|
|
.errstr_unpriv = "allowed for",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: callee changing pkt pointers",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
|
|
|
offsetof(struct xdp_md, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
/* clear_all_pkt_pointers() has to walk all frames
|
|
|
|
* to make sure that pkt pointers in the caller
|
|
|
|
* are cleared when callee is calling a helper that
|
|
|
|
* adjusts packet size
|
|
|
|
*/
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
|
|
|
BPF_MOV32_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_2, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
2022-03-01 14:27:45 -08:00
|
|
|
.errstr = "R6 invalid mem access 'scalar'",
|
2019-01-25 15:24:44 -08:00
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
2019-04-24 21:51:26 +02:00
|
|
|
{
|
|
|
|
"calls: ptr null check in subprog",
|
|
|
|
.insns = {
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-24 18:51:42 -07:00
|
|
|
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
|
2019-04-24 21:51:26 +02:00
|
|
|
.fixup_map_hash_48b = { 3 },
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 0,
|
|
|
|
},
|
2019-01-25 15:24:44 -08:00
|
|
|
{
|
|
|
|
"calls: two calls with args",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = TEST_DATA_LEN + TEST_DATA_LEN,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: calls with stack arith",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 42,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: calls with misaligned stack access",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
.errstr = "misaligned stack access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: calls control flow, jump test",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 43),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 43,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: calls control flow, jump test 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 43),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "jump out of range from insn 1 to 4",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls with bad jump",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "jump out of range from insn 11 to 9",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: recursive call. test1",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
bpf: fix control-flow graph checking in privileged mode
When BPF program is verified in privileged mode, BPF verifier allows
bounded loops. This means that from CFG point of view there are
definitely some back-edges. Original commit adjusted check_cfg() logic
to not detect back-edges in control flow graph if they are resulting
from conditional jumps, which the idea that subsequent full BPF
verification process will determine whether such loops are bounded or
not, and either accept or reject the BPF program. At least that's my
reading of the intent.
Unfortunately, the implementation of this idea doesn't work correctly in
all possible situations. Conditional jump might not result in immediate
back-edge, but just a few unconditional instructions later we can arrive
at back-edge. In such situations check_cfg() would reject BPF program
even in privileged mode, despite it might be bounded loop. Next patch
adds one simple program demonstrating such scenario.
To keep things simple, instead of trying to detect back edges in
privileged mode, just assume every back edge is valid and let subsequent
BPF verification prove or reject bounded loops.
Note a few test changes. For unknown reason, we have a few tests that
are specified to detect a back-edge in a privileged mode, but looking at
their code it seems like the right outcome is passing check_cfg() and
letting subsequent verification to make a decision about bounded or not
bounded looping.
Bounded recursion case is also interesting. The example should pass, as
recursion is limited to just a few levels and so we never reach maximum
number of nested frames and never exhaust maximum stack depth. But the
way that max stack depth logic works today it falsely detects this as
exceeding max nested frame count. This patch series doesn't attempt to
fix this orthogonal problem, so we just adjust expected verifier failure.
Suggested-by: Alexei Starovoitov <ast@kernel.org>
Fixes: 2589726d12a1 ("bpf: introduce bounded loops")
Reported-by: Hao Sun <sunhao.th@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231110061412.2995786-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-09 22:14:10 -08:00
|
|
|
.errstr = "the call stack of 9 frames is too deep",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: recursive call. test2",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
bpf: fix control-flow graph checking in privileged mode
When BPF program is verified in privileged mode, BPF verifier allows
bounded loops. This means that from CFG point of view there are
definitely some back-edges. Original commit adjusted check_cfg() logic
to not detect back-edges in control flow graph if they are resulting
from conditional jumps, which the idea that subsequent full BPF
verification process will determine whether such loops are bounded or
not, and either accept or reject the BPF program. At least that's my
reading of the intent.
Unfortunately, the implementation of this idea doesn't work correctly in
all possible situations. Conditional jump might not result in immediate
back-edge, but just a few unconditional instructions later we can arrive
at back-edge. In such situations check_cfg() would reject BPF program
even in privileged mode, despite it might be bounded loop. Next patch
adds one simple program demonstrating such scenario.
To keep things simple, instead of trying to detect back edges in
privileged mode, just assume every back edge is valid and let subsequent
BPF verification prove or reject bounded loops.
Note a few test changes. For unknown reason, we have a few tests that
are specified to detect a back-edge in a privileged mode, but looking at
their code it seems like the right outcome is passing check_cfg() and
letting subsequent verification to make a decision about bounded or not
bounded looping.
Bounded recursion case is also interesting. The example should pass, as
recursion is limited to just a few levels and so we never reach maximum
number of nested frames and never exhaust maximum stack depth. But the
way that max stack depth logic works today it falsely detects this as
exceeding max nested frame count. This patch series doesn't attempt to
fix this orthogonal problem, so we just adjust expected verifier failure.
Suggested-by: Alexei Starovoitov <ast@kernel.org>
Fixes: 2589726d12a1 ("bpf: introduce bounded loops")
Reported-by: Hao Sun <sunhao.th@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231110061412.2995786-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-11-09 22:14:10 -08:00
|
|
|
.errstr = "the call stack of 9 frames is too deep",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: unreachable code",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "unreachable insn 6",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid call",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "invalid destination",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: invalid call 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "invalid destination",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: jumping across function bodies. test1",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "jump out of range",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: jumping across function bodies. test2",
|
|
|
|
.insns = {
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "jump out of range",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: call without exit",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "not an exit",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: call into middle of ld_imm64",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LD_IMM64(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "last insn",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: call into middle of other call",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "last insn",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
2020-09-17 19:09:18 -07:00
|
|
|
"calls: subprog call with ld_abs in main prog",
|
2019-01-25 15:24:44 -08:00
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_LD_ABS(BPF_B, 0),
|
|
|
|
BPF_LD_ABS(BPF_H, 0),
|
|
|
|
BPF_LD_ABS(BPF_W, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
|
2020-09-17 19:09:18 -07:00
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
2019-01-25 15:24:44 -08:00
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
|
|
|
|
BPF_LD_ABS(BPF_B, 0),
|
|
|
|
BPF_LD_ABS(BPF_H, 0),
|
|
|
|
BPF_LD_ABS(BPF_W, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_2, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
2020-09-17 19:09:18 -07:00
|
|
|
.result = ACCEPT,
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls with bad fallthrough",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
|
|
.errstr = "not an exit",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls with stack read",
|
|
|
|
.insns = {
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls with stack write",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
|
|
|
|
/* write into stack frame of main prog */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* read from stack frame of main prog */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack overflow using two frames (pre-call access)",
|
|
|
|
.insns = {
|
|
|
|
/* prog 1 */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* prog 2 */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.errstr = "combined stack size",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack overflow using two frames (post-call access)",
|
|
|
|
.insns = {
|
|
|
|
/* prog 1 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* prog 2 */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.errstr = "combined stack size",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack depth check using three frames. test1",
|
|
|
|
.insns = {
|
|
|
|
/* main */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* A */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* B */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
/* stack_main=32, stack_A=256, stack_B=64
|
|
|
|
* and max(main+A, main+A+B) < 512
|
|
|
|
*/
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack depth check using three frames. test2",
|
|
|
|
.insns = {
|
|
|
|
/* main */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* A */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* B */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
/* stack_main=32, stack_A=64, stack_B=256
|
|
|
|
* and max(main+A, main+A+B) < 512
|
|
|
|
*/
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack depth check using three frames. test3",
|
|
|
|
.insns = {
|
|
|
|
/* main */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* A */
|
|
|
|
BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
|
|
|
|
/* B */
|
|
|
|
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
/* stack_main=64, stack_A=224, stack_B=256
|
|
|
|
* and max(main+A, main+A+B) > 512
|
|
|
|
*/
|
|
|
|
.errstr = "combined stack",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack depth check using three frames. test4",
|
|
|
|
/* void main(void) {
|
|
|
|
* func1(0);
|
|
|
|
* func1(1);
|
|
|
|
* func2(1);
|
|
|
|
* }
|
|
|
|
* void func1(int alloc_or_recurse) {
|
|
|
|
* if (alloc_or_recurse) {
|
|
|
|
* frame_pointer[-300] = 1;
|
|
|
|
* } else {
|
|
|
|
* func2(alloc_or_recurse);
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* void func2(int alloc_or_recurse) {
|
|
|
|
* if (alloc_or_recurse) {
|
|
|
|
* frame_pointer[-300] = 1;
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
.insns = {
|
|
|
|
/* main */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_1, 1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_1, 1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* A */
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* B */
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
|
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "combined stack",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack depth check using three frames. test5",
|
|
|
|
.insns = {
|
|
|
|
/* main */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* A */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* B */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* C */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* D */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* E */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* F */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* G */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* H */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.errstr = "call stack",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
2019-03-20 13:58:50 +01:00
|
|
|
{
|
|
|
|
"calls: stack depth check in dead code",
|
|
|
|
.insns = {
|
|
|
|
/* main */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* A */
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* B */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* C */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* D */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* E */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* F */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* G */
|
|
|
|
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* H */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.errstr = "call stack",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
2019-01-25 15:24:44 -08:00
|
|
|
{
|
|
|
|
"calls: spill into caller stack frame",
|
|
|
|
.insns = {
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.errstr = "cannot spill",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: write into caller stack frame",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 42,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: write into callee stack frame",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.errstr = "cannot return stack pointer",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls with stack write and void return",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* write into stack frame of main prog */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
|
|
|
|
BPF_EXIT_INSN(), /* void return */
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: ambiguous return value",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
2020-05-13 16:03:55 -07:00
|
|
|
.errstr_unpriv = "allowed for",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.errstr = "R0 !read_ok",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that return map_value",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
|
|
|
|
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
/* fetch secound map_value_ptr from the stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
/* call 3rd function twice */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* first time with fp-8 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
|
|
/* second time with fp-16 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
/* lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
/* write map_value_ptr into stack frame of main prog */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(), /* return 0 */
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.fixup_map_hash_8b = { 23 },
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that return map_value with bool condition",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
/* call 3rd function twice */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* first time with fp-8 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
|
|
/* second time with fp-16 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
|
|
|
|
/* fetch secound map_value_ptr from the stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
/* lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(), /* return 0 */
|
|
|
|
/* write map_value_ptr into stack frame of main prog */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(), /* return 1 */
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.fixup_map_hash_8b = { 23 },
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that return map_value with incorrect bool check",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
/* call 3rd function twice */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* first time with fp-8 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
|
|
/* second time with fp-16 */
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
/* fetch secound map_value_ptr from the stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
/* lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(), /* return 0 */
|
|
|
|
/* write map_value_ptr into stack frame of main prog */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
|
|
BPF_EXIT_INSN(), /* return 1 */
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
.fixup_map_hash_8b = { 23 },
|
|
|
|
.result = REJECT,
|
bpf: Fix accesses to uninit stack slots
Privileged programs are supposed to be able to read uninitialized stack
memory (ever since 6715df8d5) but, before this patch, these accesses
were permitted inconsistently. In particular, accesses were permitted
above state->allocated_stack, but not below it. In other words, if the
stack was already "large enough", the access was permitted, but
otherwise the access was rejected instead of being allowed to "grow the
stack". This undesired rejection was happening in two places:
- in check_stack_slot_within_bounds()
- in check_stack_range_initialized()
This patch arranges for these accesses to be permitted. A bunch of tests
that were relying on the old rejection had to change; all of them were
changed to add also run unprivileged, in which case the old behavior
persists. One tests couldn't be updated - global_func16 - because it
can't run unprivileged for other reasons.
This patch also fixes the tracking of the stack size for variable-offset
reads. This second fix is bundled in the same commit as the first one
because they're inter-related. Before this patch, writes to the stack
using registers containing a variable offset (as opposed to registers
with fixed, known values) were not properly contributing to the
function's needed stack size. As a result, it was possible for a program
to verify, but then to attempt to read out-of-bounds data at runtime
because a too small stack had been allocated for it.
Each function tracks the size of the stack it needs in
bpf_subprog_info.stack_depth, which is maintained by
update_stack_depth(). For regular memory accesses, check_mem_access()
was calling update_state_depth() but it was passing in only the fixed
part of the offset register, ignoring the variable offset. This was
incorrect; the minimum possible value of that register should be used
instead.
This tracking is now fixed by centralizing the tracking of stack size in
grow_stack_state(), and by lifting the calls to grow_stack_state() to
check_stack_access_within_bounds() as suggested by Andrii. The code is
now simpler and more convincingly tracks the correct maximum stack size.
check_stack_range_initialized() can now rely on enough stack having been
allocated for the access; this helps with the fix for the first issue.
A few tests were changed to also check the stack depth computation. The
one that fails without this patch is verifier_var_off:stack_write_priv_vs_unpriv.
Fixes: 01f810ace9ed3 ("bpf: Allow variable-offset stack access")
Reported-by: Hao Sun <sunhao.th@gmail.com>
Signed-off-by: Andrei Matei <andreimatei1@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20231208032519.260451-3-andreimatei1@gmail.com
Closes: https://lore.kernel.org/bpf/CABWLsev9g8UP_c3a=1qbuZUi20tGoUXoU07FPf-5FLvhOKOY+Q@mail.gmail.com/
2023-12-07 22:25:18 -05:00
|
|
|
.errstr = "R0 invalid mem access 'scalar'",
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.errstr_unpriv = "invalid read from stack R7 off=-16 size=8",
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* 1st lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
/* write map_value_ptr into stack frame of main prog at fp-8 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
|
|
|
|
/* 2nd lookup from map */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
|
|
|
|
BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
/* write map_value_ptr into stack frame of main prog at fp-16 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
|
|
|
|
|
|
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* if arg2 == 1 do *arg1 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
|
|
|
|
/* if arg4 == 1 do *arg3 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_map_hash_8b = { 12, 22 },
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* 1st lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
/* write map_value_ptr into stack frame of main prog at fp-8 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
|
|
|
|
/* 2nd lookup from map */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
|
|
|
|
BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
/* write map_value_ptr into stack frame of main prog at fp-16 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
|
|
|
|
|
|
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* if arg2 == 1 do *arg1 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
|
|
|
|
/* if arg4 == 1 do *arg3 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_map_hash_8b = { 12, 22 },
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* 1st lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
/* write map_value_ptr into stack frame of main prog at fp-8 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
|
|
|
|
/* 2nd lookup from map */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 0), // 26
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
|
|
|
/* write map_value_ptr into stack frame of main prog at fp-16 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
|
|
|
|
|
|
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -30),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* if arg2 == 1 do *arg1 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
|
|
|
|
/* if arg4 == 1 do *arg3 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -8),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_map_hash_8b = { 12, 22 },
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that receive map_value_ptr_or_null via arg. test1",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* 1st lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
|
|
|
|
/* 2nd lookup from map */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
|
|
|
|
|
|
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* if arg2 == 1 do *arg1 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
|
|
|
|
/* if arg4 == 1 do *arg3 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_map_hash_8b = { 12, 22 },
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: two calls that receive map_value_ptr_or_null via arg. test2",
|
|
|
|
.insns = {
|
|
|
|
/* main prog */
|
|
|
|
/* pass fp-16, fp-8 into a function */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
|
|
|
/* 1st lookup from map */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
|
|
|
|
/* 2nd lookup from map */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
|
|
|
|
|
|
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 2 */
|
|
|
|
/* if arg2 == 1 do *arg1 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
|
|
|
|
/* if arg4 == 0 do *arg3 = 0 */
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
|
|
|
|
/* fetch map_value_ptr from the stack of this function */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
|
|
/* write into map value */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.fixup_map_hash_8b = { 12, 22 },
|
|
|
|
.result = REJECT,
|
2022-03-01 14:27:45 -08:00
|
|
|
.errstr = "R0 invalid mem access 'scalar'",
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
/* spill unchecked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
|
|
|
|
/* now the pkt range is verified, read pkt_ptr from stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.retval = POINTER_VALUE,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
/* Marking is still kept, but not in all cases safe. */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
/* spill unchecked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
|
|
|
|
/* now the pkt range is verified, read pkt_ptr from stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "invalid access to packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 3",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
|
|
|
/* Marking is still kept and safe here. */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
/* spill unchecked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* now the pkt range is verified, read pkt_ptr from stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 1,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 4",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
|
|
|
/* Check marking propagated. */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
/* spill unchecked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* don't read back pkt_ptr from stack here */
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.retval = 1,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 5",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
|
|
|
|
/* spill checked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* don't read back pkt_ptr from stack here */
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "same insn cannot be used with different",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 6",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
|
|
|
|
/* spill checked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* don't read back pkt_ptr from stack here */
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "R4 invalid mem access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 7",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
|
|
|
|
/* spill checked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* don't read back pkt_ptr from stack here */
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "R4 invalid mem access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 8",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
|
|
|
|
/* spill checked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* don't read back pkt_ptr from stack here */
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: pkt_ptr spill into caller stack 9",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 0),
|
|
|
|
/* spill unchecked pkt_ptr into stack of caller */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_5, 1),
|
|
|
|
/* don't read back pkt_ptr from stack here */
|
|
|
|
/* write 4 bytes into packet */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "invalid access to packet",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: caller stack init to zero or map_value_or_null",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
/* fetch map_value_or_null or const_zero from stack */
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
|
|
|
/* store into map_value */
|
|
|
|
BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
|
|
|
|
/* subprog 1 */
|
|
|
|
/* if (ctx == 0) return; */
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
|
|
|
|
/* else bpf_map_lookup() and *(fp - 8) = r0 */
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.fixup_map_hash_8b = { 13 },
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: stack init to zero and pruning",
|
|
|
|
.insns = {
|
|
|
|
/* first make allocated_stack 16 byte */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
|
|
|
|
/* now fork the execution such that the false branch
|
|
|
|
* of JGT insn will be verified second and it skisp zero
|
|
|
|
* init of fp-8 stack slot. If stack liveness marking
|
|
|
|
* is missing live_read marks from call map_lookup
|
|
|
|
* processing then pruning will incorrectly assume
|
|
|
|
* that fp-8 stack slot was unused in the fall-through
|
|
|
|
* branch and will accept the program incorrectly
|
|
|
|
*/
|
bpf: Allow reads from uninit stack
This commits updates the following functions to allow reads from
uninitialized stack locations when env->allow_uninit_stack option is
enabled:
- check_stack_read_fixed_off()
- check_stack_range_initialized(), called from:
- check_stack_read_var_off()
- check_helper_mem_access()
Such change allows to relax logic in stacksafe() to treat STACK_MISC
and STACK_INVALID in a same way and make the following stack slot
configurations equivalent:
| Cached state | Current state |
| stack slot | stack slot |
|------------------+------------------|
| STACK_INVALID or | STACK_INVALID or |
| STACK_MISC | STACK_SPILL or |
| | STACK_MISC or |
| | STACK_ZERO or |
| | STACK_DYNPTR |
This leads to significant verification speed gains (see below).
The idea was suggested by Andrii Nakryiko [1] and initial patch was
created by Alexei Starovoitov [2].
Currently the env->allow_uninit_stack is allowed for programs loaded
by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities.
A number of test cases from verifier/*.c were expecting uninitialized
stack access to be an error. These test cases were updated to execute
in unprivileged mode (thus preserving the tests).
The test progs/test_global_func10.c expected "invalid indirect read
from stack" error message because of the access to uninitialized
memory region. This error is no longer possible in privileged mode.
The test is updated to provoke an error "invalid indirect access to
stack" because of access to invalid stack address (such error is not
verified by progs/test_global_func*.c series of tests).
The following tests had to be removed because these can't be made
unprivileged:
- verifier/sock.c:
- "sk_storage_get(map, skb->sk, &stack_value, 1): partially init
stack_value"
BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode.
- verifier/var_off.c:
- "indirect variable-offset stack access, max_off+size > max_initialized"
- "indirect variable-offset stack access, uninitialized"
These tests verify that access to uninitialized stack values is
detected when stack offset is not a constant. However, variable
stack access is prohibited in unprivileged mode, thus these tests
are no longer valid.
* * *
Here is veristat log comparing this patch with current master on a
set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg
and cilium BPF binaries (see [3]):
$ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log
File Program States (A) States (B) States (DIFF)
-------------------------- -------------------------- ---------- ---------- ----------------
bpf_host.o tail_handle_ipv6_from_host 349 244 -105 (-30.09%)
bpf_host.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_lxc.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_sock.o cil_sock4_connect 70 48 -22 (-31.43%)
bpf_sock.o cil_sock4_sendmsg 68 46 -22 (-32.35%)
bpf_xdp.o tail_handle_nat_fwd_ipv4 1554 803 -751 (-48.33%)
bpf_xdp.o tail_lb_ipv4 6457 2473 -3984 (-61.70%)
bpf_xdp.o tail_lb_ipv6 7249 3908 -3341 (-46.09%)
pyperf600_bpf_loop.bpf.o on_event 287 145 -142 (-49.48%)
strobemeta.bpf.o on_event 15915 4772 -11143 (-70.02%)
strobemeta_nounroll2.bpf.o on_event 17087 3820 -13267 (-77.64%)
xdp_synproxy_kern.bpf.o syncookie_tc 21271 6635 -14636 (-68.81%)
xdp_synproxy_kern.bpf.o syncookie_xdp 23122 6024 -17098 (-73.95%)
-------------------------- -------------------------- ---------- ---------- ----------------
Note: I limited selection by states_pct<-30%.
Inspection of differences in pyperf600_bpf_loop behavior shows that
the following patch for the test removes almost all differences:
- a/tools/testing/selftests/bpf/progs/pyperf.h
+ b/tools/testing/selftests/bpf/progs/pyperf.h
@ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx)
}
if (event->pthread_match || !pidData->use_tls) {
- void* frame_ptr;
- FrameData frame;
+ void* frame_ptr = 0;
+ FrameData frame = {};
Symbol sym = {};
int cur_cpu = bpf_get_smp_processor_id();
W/o this patch the difference comes from the following pattern
(for different variables):
static bool get_frame_data(... FrameData *frame ...)
{
...
bpf_probe_read_user(&frame->f_code, ...);
if (!frame->f_code)
return false;
...
bpf_probe_read_user(&frame->co_name, ...);
if (frame->co_name)
...;
}
int __on_event(struct bpf_raw_tracepoint_args *ctx)
{
FrameData frame;
...
get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback
...
}
SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx)
{
...
ret |= __on_event(ctx);
ret |= __on_event(ctx);
...
}
With regards to value `frame->co_name` the following is important:
- Because of the conditional `if (!frame->f_code)` each call to
__on_event() produces two states, one with `frame->co_name` marked
as STACK_MISC, another with it as is (and marked STACK_INVALID on a
first call).
- The call to bpf_probe_read_user() does not mark stack slots
corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks
these slots as BPF_MISC, this happens because of the following loop
in the check_helper_call():
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err)
return err;
}
Note the size of the write, it is a one byte write for each byte
touched by a helper. The BPF_B write does not lead to write marks
for the target stack slot.
- Which means that w/o this patch when second __on_event() call is
verified `if (frame->co_name)` will propagate read marks first to a
stack slot with STACK_MISC marks and second to a stack slot with
STACK_INVALID marks and these states would be considered different.
[1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/
[2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/
[3] git@github.com:anakryiko/cilium.git
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Co-developed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-19 22:04:26 +02:00
|
|
|
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
|
|
|
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
|
2019-01-25 15:24:44 -08:00
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
bpf: Allow reads from uninit stack
This commits updates the following functions to allow reads from
uninitialized stack locations when env->allow_uninit_stack option is
enabled:
- check_stack_read_fixed_off()
- check_stack_range_initialized(), called from:
- check_stack_read_var_off()
- check_helper_mem_access()
Such change allows to relax logic in stacksafe() to treat STACK_MISC
and STACK_INVALID in a same way and make the following stack slot
configurations equivalent:
| Cached state | Current state |
| stack slot | stack slot |
|------------------+------------------|
| STACK_INVALID or | STACK_INVALID or |
| STACK_MISC | STACK_SPILL or |
| | STACK_MISC or |
| | STACK_ZERO or |
| | STACK_DYNPTR |
This leads to significant verification speed gains (see below).
The idea was suggested by Andrii Nakryiko [1] and initial patch was
created by Alexei Starovoitov [2].
Currently the env->allow_uninit_stack is allowed for programs loaded
by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities.
A number of test cases from verifier/*.c were expecting uninitialized
stack access to be an error. These test cases were updated to execute
in unprivileged mode (thus preserving the tests).
The test progs/test_global_func10.c expected "invalid indirect read
from stack" error message because of the access to uninitialized
memory region. This error is no longer possible in privileged mode.
The test is updated to provoke an error "invalid indirect access to
stack" because of access to invalid stack address (such error is not
verified by progs/test_global_func*.c series of tests).
The following tests had to be removed because these can't be made
unprivileged:
- verifier/sock.c:
- "sk_storage_get(map, skb->sk, &stack_value, 1): partially init
stack_value"
BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode.
- verifier/var_off.c:
- "indirect variable-offset stack access, max_off+size > max_initialized"
- "indirect variable-offset stack access, uninitialized"
These tests verify that access to uninitialized stack values is
detected when stack offset is not a constant. However, variable
stack access is prohibited in unprivileged mode, thus these tests
are no longer valid.
* * *
Here is veristat log comparing this patch with current master on a
set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg
and cilium BPF binaries (see [3]):
$ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log
File Program States (A) States (B) States (DIFF)
-------------------------- -------------------------- ---------- ---------- ----------------
bpf_host.o tail_handle_ipv6_from_host 349 244 -105 (-30.09%)
bpf_host.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_lxc.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_sock.o cil_sock4_connect 70 48 -22 (-31.43%)
bpf_sock.o cil_sock4_sendmsg 68 46 -22 (-32.35%)
bpf_xdp.o tail_handle_nat_fwd_ipv4 1554 803 -751 (-48.33%)
bpf_xdp.o tail_lb_ipv4 6457 2473 -3984 (-61.70%)
bpf_xdp.o tail_lb_ipv6 7249 3908 -3341 (-46.09%)
pyperf600_bpf_loop.bpf.o on_event 287 145 -142 (-49.48%)
strobemeta.bpf.o on_event 15915 4772 -11143 (-70.02%)
strobemeta_nounroll2.bpf.o on_event 17087 3820 -13267 (-77.64%)
xdp_synproxy_kern.bpf.o syncookie_tc 21271 6635 -14636 (-68.81%)
xdp_synproxy_kern.bpf.o syncookie_xdp 23122 6024 -17098 (-73.95%)
-------------------------- -------------------------- ---------- ---------- ----------------
Note: I limited selection by states_pct<-30%.
Inspection of differences in pyperf600_bpf_loop behavior shows that
the following patch for the test removes almost all differences:
- a/tools/testing/selftests/bpf/progs/pyperf.h
+ b/tools/testing/selftests/bpf/progs/pyperf.h
@ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx)
}
if (event->pthread_match || !pidData->use_tls) {
- void* frame_ptr;
- FrameData frame;
+ void* frame_ptr = 0;
+ FrameData frame = {};
Symbol sym = {};
int cur_cpu = bpf_get_smp_processor_id();
W/o this patch the difference comes from the following pattern
(for different variables):
static bool get_frame_data(... FrameData *frame ...)
{
...
bpf_probe_read_user(&frame->f_code, ...);
if (!frame->f_code)
return false;
...
bpf_probe_read_user(&frame->co_name, ...);
if (frame->co_name)
...;
}
int __on_event(struct bpf_raw_tracepoint_args *ctx)
{
FrameData frame;
...
get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback
...
}
SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx)
{
...
ret |= __on_event(ctx);
ret |= __on_event(ctx);
...
}
With regards to value `frame->co_name` the following is important:
- Because of the conditional `if (!frame->f_code)` each call to
__on_event() produces two states, one with `frame->co_name` marked
as STACK_MISC, another with it as is (and marked STACK_INVALID on a
first call).
- The call to bpf_probe_read_user() does not mark stack slots
corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks
these slots as BPF_MISC, this happens because of the following loop
in the check_helper_call():
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err)
return err;
}
Note the size of the write, it is a one byte write for each byte
touched by a helper. The BPF_B write does not lead to write marks
for the target stack slot.
- Which means that w/o this patch when second __on_event() call is
verified `if (frame->co_name)` will propagate read marks first to a
stack slot with STACK_MISC marks and second to a stack slot with
STACK_INVALID marks and these states would be considered different.
[1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/
[2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/
[3] git@github.com:anakryiko/cilium.git
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Co-developed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-19 22:04:26 +02:00
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
2019-01-25 15:24:44 -08:00
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
bpf: Allow reads from uninit stack
This commits updates the following functions to allow reads from
uninitialized stack locations when env->allow_uninit_stack option is
enabled:
- check_stack_read_fixed_off()
- check_stack_range_initialized(), called from:
- check_stack_read_var_off()
- check_helper_mem_access()
Such change allows to relax logic in stacksafe() to treat STACK_MISC
and STACK_INVALID in a same way and make the following stack slot
configurations equivalent:
| Cached state | Current state |
| stack slot | stack slot |
|------------------+------------------|
| STACK_INVALID or | STACK_INVALID or |
| STACK_MISC | STACK_SPILL or |
| | STACK_MISC or |
| | STACK_ZERO or |
| | STACK_DYNPTR |
This leads to significant verification speed gains (see below).
The idea was suggested by Andrii Nakryiko [1] and initial patch was
created by Alexei Starovoitov [2].
Currently the env->allow_uninit_stack is allowed for programs loaded
by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities.
A number of test cases from verifier/*.c were expecting uninitialized
stack access to be an error. These test cases were updated to execute
in unprivileged mode (thus preserving the tests).
The test progs/test_global_func10.c expected "invalid indirect read
from stack" error message because of the access to uninitialized
memory region. This error is no longer possible in privileged mode.
The test is updated to provoke an error "invalid indirect access to
stack" because of access to invalid stack address (such error is not
verified by progs/test_global_func*.c series of tests).
The following tests had to be removed because these can't be made
unprivileged:
- verifier/sock.c:
- "sk_storage_get(map, skb->sk, &stack_value, 1): partially init
stack_value"
BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode.
- verifier/var_off.c:
- "indirect variable-offset stack access, max_off+size > max_initialized"
- "indirect variable-offset stack access, uninitialized"
These tests verify that access to uninitialized stack values is
detected when stack offset is not a constant. However, variable
stack access is prohibited in unprivileged mode, thus these tests
are no longer valid.
* * *
Here is veristat log comparing this patch with current master on a
set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg
and cilium BPF binaries (see [3]):
$ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log
File Program States (A) States (B) States (DIFF)
-------------------------- -------------------------- ---------- ---------- ----------------
bpf_host.o tail_handle_ipv6_from_host 349 244 -105 (-30.09%)
bpf_host.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_lxc.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_sock.o cil_sock4_connect 70 48 -22 (-31.43%)
bpf_sock.o cil_sock4_sendmsg 68 46 -22 (-32.35%)
bpf_xdp.o tail_handle_nat_fwd_ipv4 1554 803 -751 (-48.33%)
bpf_xdp.o tail_lb_ipv4 6457 2473 -3984 (-61.70%)
bpf_xdp.o tail_lb_ipv6 7249 3908 -3341 (-46.09%)
pyperf600_bpf_loop.bpf.o on_event 287 145 -142 (-49.48%)
strobemeta.bpf.o on_event 15915 4772 -11143 (-70.02%)
strobemeta_nounroll2.bpf.o on_event 17087 3820 -13267 (-77.64%)
xdp_synproxy_kern.bpf.o syncookie_tc 21271 6635 -14636 (-68.81%)
xdp_synproxy_kern.bpf.o syncookie_xdp 23122 6024 -17098 (-73.95%)
-------------------------- -------------------------- ---------- ---------- ----------------
Note: I limited selection by states_pct<-30%.
Inspection of differences in pyperf600_bpf_loop behavior shows that
the following patch for the test removes almost all differences:
- a/tools/testing/selftests/bpf/progs/pyperf.h
+ b/tools/testing/selftests/bpf/progs/pyperf.h
@ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx)
}
if (event->pthread_match || !pidData->use_tls) {
- void* frame_ptr;
- FrameData frame;
+ void* frame_ptr = 0;
+ FrameData frame = {};
Symbol sym = {};
int cur_cpu = bpf_get_smp_processor_id();
W/o this patch the difference comes from the following pattern
(for different variables):
static bool get_frame_data(... FrameData *frame ...)
{
...
bpf_probe_read_user(&frame->f_code, ...);
if (!frame->f_code)
return false;
...
bpf_probe_read_user(&frame->co_name, ...);
if (frame->co_name)
...;
}
int __on_event(struct bpf_raw_tracepoint_args *ctx)
{
FrameData frame;
...
get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback
...
}
SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx)
{
...
ret |= __on_event(ctx);
ret |= __on_event(ctx);
...
}
With regards to value `frame->co_name` the following is important:
- Because of the conditional `if (!frame->f_code)` each call to
__on_event() produces two states, one with `frame->co_name` marked
as STACK_MISC, another with it as is (and marked STACK_INVALID on a
first call).
- The call to bpf_probe_read_user() does not mark stack slots
corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks
these slots as BPF_MISC, this happens because of the following loop
in the check_helper_call():
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err)
return err;
}
Note the size of the write, it is a one byte write for each byte
touched by a helper. The BPF_B write does not lead to write marks
for the target stack slot.
- Which means that w/o this patch when second __on_event() call is
verified `if (frame->co_name)` will propagate read marks first to a
stack slot with STACK_MISC marks and second to a stack slot with
STACK_INVALID marks and these states would be considered different.
[1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/
[2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/
[3] git@github.com:anakryiko/cilium.git
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Co-developed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-19 22:04:26 +02:00
|
|
|
.fixup_map_hash_48b = { 7 },
|
2025-01-14 13:28:44 -07:00
|
|
|
.errstr_unpriv = "invalid read from stack R2 off -8+0 size 8",
|
bpf: Allow reads from uninit stack
This commits updates the following functions to allow reads from
uninitialized stack locations when env->allow_uninit_stack option is
enabled:
- check_stack_read_fixed_off()
- check_stack_range_initialized(), called from:
- check_stack_read_var_off()
- check_helper_mem_access()
Such change allows to relax logic in stacksafe() to treat STACK_MISC
and STACK_INVALID in a same way and make the following stack slot
configurations equivalent:
| Cached state | Current state |
| stack slot | stack slot |
|------------------+------------------|
| STACK_INVALID or | STACK_INVALID or |
| STACK_MISC | STACK_SPILL or |
| | STACK_MISC or |
| | STACK_ZERO or |
| | STACK_DYNPTR |
This leads to significant verification speed gains (see below).
The idea was suggested by Andrii Nakryiko [1] and initial patch was
created by Alexei Starovoitov [2].
Currently the env->allow_uninit_stack is allowed for programs loaded
by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities.
A number of test cases from verifier/*.c were expecting uninitialized
stack access to be an error. These test cases were updated to execute
in unprivileged mode (thus preserving the tests).
The test progs/test_global_func10.c expected "invalid indirect read
from stack" error message because of the access to uninitialized
memory region. This error is no longer possible in privileged mode.
The test is updated to provoke an error "invalid indirect access to
stack" because of access to invalid stack address (such error is not
verified by progs/test_global_func*.c series of tests).
The following tests had to be removed because these can't be made
unprivileged:
- verifier/sock.c:
- "sk_storage_get(map, skb->sk, &stack_value, 1): partially init
stack_value"
BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode.
- verifier/var_off.c:
- "indirect variable-offset stack access, max_off+size > max_initialized"
- "indirect variable-offset stack access, uninitialized"
These tests verify that access to uninitialized stack values is
detected when stack offset is not a constant. However, variable
stack access is prohibited in unprivileged mode, thus these tests
are no longer valid.
* * *
Here is veristat log comparing this patch with current master on a
set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg
and cilium BPF binaries (see [3]):
$ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log
File Program States (A) States (B) States (DIFF)
-------------------------- -------------------------- ---------- ---------- ----------------
bpf_host.o tail_handle_ipv6_from_host 349 244 -105 (-30.09%)
bpf_host.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_lxc.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%)
bpf_sock.o cil_sock4_connect 70 48 -22 (-31.43%)
bpf_sock.o cil_sock4_sendmsg 68 46 -22 (-32.35%)
bpf_xdp.o tail_handle_nat_fwd_ipv4 1554 803 -751 (-48.33%)
bpf_xdp.o tail_lb_ipv4 6457 2473 -3984 (-61.70%)
bpf_xdp.o tail_lb_ipv6 7249 3908 -3341 (-46.09%)
pyperf600_bpf_loop.bpf.o on_event 287 145 -142 (-49.48%)
strobemeta.bpf.o on_event 15915 4772 -11143 (-70.02%)
strobemeta_nounroll2.bpf.o on_event 17087 3820 -13267 (-77.64%)
xdp_synproxy_kern.bpf.o syncookie_tc 21271 6635 -14636 (-68.81%)
xdp_synproxy_kern.bpf.o syncookie_xdp 23122 6024 -17098 (-73.95%)
-------------------------- -------------------------- ---------- ---------- ----------------
Note: I limited selection by states_pct<-30%.
Inspection of differences in pyperf600_bpf_loop behavior shows that
the following patch for the test removes almost all differences:
- a/tools/testing/selftests/bpf/progs/pyperf.h
+ b/tools/testing/selftests/bpf/progs/pyperf.h
@ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx)
}
if (event->pthread_match || !pidData->use_tls) {
- void* frame_ptr;
- FrameData frame;
+ void* frame_ptr = 0;
+ FrameData frame = {};
Symbol sym = {};
int cur_cpu = bpf_get_smp_processor_id();
W/o this patch the difference comes from the following pattern
(for different variables):
static bool get_frame_data(... FrameData *frame ...)
{
...
bpf_probe_read_user(&frame->f_code, ...);
if (!frame->f_code)
return false;
...
bpf_probe_read_user(&frame->co_name, ...);
if (frame->co_name)
...;
}
int __on_event(struct bpf_raw_tracepoint_args *ctx)
{
FrameData frame;
...
get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback
...
}
SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx)
{
...
ret |= __on_event(ctx);
ret |= __on_event(ctx);
...
}
With regards to value `frame->co_name` the following is important:
- Because of the conditional `if (!frame->f_code)` each call to
__on_event() produces two states, one with `frame->co_name` marked
as STACK_MISC, another with it as is (and marked STACK_INVALID on a
first call).
- The call to bpf_probe_read_user() does not mark stack slots
corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks
these slots as BPF_MISC, this happens because of the following loop
in the check_helper_call():
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err)
return err;
}
Note the size of the write, it is a one byte write for each byte
touched by a helper. The BPF_B write does not lead to write marks
for the target stack slot.
- Which means that w/o this patch when second __on_event() call is
verified `if (frame->co_name)` will propagate read marks first to a
stack slot with STACK_MISC marks and second to a stack slot with
STACK_INVALID marks and these states would be considered different.
[1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/
[2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/
[3] git@github.com:anakryiko/cilium.git
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Co-developed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-19 22:04:26 +02:00
|
|
|
.result_unpriv = REJECT,
|
|
|
|
/* in privileged mode reads from uninitialized stack locations are permitted */
|
|
|
|
.result = ACCEPT,
|
2019-01-25 15:24:44 -08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: ctx read at start of subprog",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
|
|
|
|
BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-24 18:51:42 -07:00
|
|
|
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
|
2019-01-25 15:24:44 -08:00
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"calls: cross frame pruning",
|
|
|
|
.insns = {
|
|
|
|
/* r8 = !!random();
|
|
|
|
* call pruner()
|
|
|
|
* if (r8)
|
|
|
|
* do something bad;
|
|
|
|
*/
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-24 18:51:42 -07:00
|
|
|
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
|
2019-01-25 15:24:44 -08:00
|
|
|
.errstr = "!read_ok",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
2019-03-21 14:34:36 -07:00
|
|
|
{
|
|
|
|
"calls: cross frame pruning - liveness propagation",
|
|
|
|
.insns = {
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
|
bpf: Support bpf program calling kernel function
This patch adds support to BPF verifier to allow bpf program calling
kernel function directly.
The use case included in this set is to allow bpf-tcp-cc to directly
call some tcp-cc helper functions (e.g. "tcp_cong_avoid_ai()"). Those
functions have already been used by some kernel tcp-cc implementations.
This set will also allow the bpf-tcp-cc program to directly call the
kernel tcp-cc implementation, For example, a bpf_dctcp may only want to
implement its own dctcp_cwnd_event() and reuse other dctcp_*() directly
from the kernel tcp_dctcp.c instead of reimplementing (or
copy-and-pasting) them.
The tcp-cc kernel functions mentioned above will be white listed
for the struct_ops bpf-tcp-cc programs to use in a later patch.
The white listed functions are not bounded to a fixed ABI contract.
Those functions have already been used by the existing kernel tcp-cc.
If any of them has changed, both in-tree and out-of-tree kernel tcp-cc
implementations have to be changed. The same goes for the struct_ops
bpf-tcp-cc programs which have to be adjusted accordingly.
This patch is to make the required changes in the bpf verifier.
First change is in btf.c, it adds a case in "btf_check_func_arg_match()".
When the passed in "btf->kernel_btf == true", it means matching the
verifier regs' states with a kernel function. This will handle the
PTR_TO_BTF_ID reg. It also maps PTR_TO_SOCK_COMMON, PTR_TO_SOCKET,
and PTR_TO_TCP_SOCK to its kernel's btf_id.
In the later libbpf patch, the insn calling a kernel function will
look like:
insn->code == (BPF_JMP | BPF_CALL)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL /* <- new in this patch */
insn->imm == func_btf_id /* btf_id of the running kernel */
[ For the future calling function-in-kernel-module support, an array
of module btf_fds can be passed at the load time and insn->off
can be used to index into this array. ]
At the early stage of verifier, the verifier will collect all kernel
function calls into "struct bpf_kfunc_desc". Those
descriptors are stored in "prog->aux->kfunc_tab" and will
be available to the JIT. Since this "add" operation is similar
to the current "add_subprog()" and looking for the same insn->code,
they are done together in the new "add_subprog_and_kfunc()".
In the "do_check()" stage, the new "check_kfunc_call()" is added
to verify the kernel function call instruction:
1. Ensure the kernel function can be used by a particular BPF_PROG_TYPE.
A new bpf_verifier_ops "check_kfunc_call" is added to do that.
The bpf-tcp-cc struct_ops program will implement this function in
a later patch.
2. Call "btf_check_kfunc_args_match()" to ensure the regs can be
used as the args of a kernel function.
3. Mark the regs' type, subreg_def, and zext_dst.
At the later do_misc_fixups() stage, the new fixup_kfunc_call()
will replace the insn->imm with the function address (relative
to __bpf_call_base). If needed, the jit can find the btf_func_model
by calling the new bpf_jit_find_kfunc_model(prog, insn).
With the imm set to the function address, "bpftool prog dump xlated"
will be able to display the kernel function calls the same way as
it displays other bpf helper calls.
gpl_compatible program is required to call kernel function.
This feature currently requires JIT.
The verifier selftests are adjusted because of the changes in
the verbose log in add_subprog_and_kfunc().
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015142.1544736-1-kafai@fb.com
2021-03-24 18:51:42 -07:00
|
|
|
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
|
2019-03-21 14:34:36 -07:00
|
|
|
.errstr = "!read_ok",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
2022-12-09 15:57:30 +02:00
|
|
|
/* Make sure that verifier.c:states_equal() considers IDs from all
|
|
|
|
* frames when building 'idmap' for check_ids().
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
"calls: check_ids() across call boundary",
|
|
|
|
.insns = {
|
|
|
|
/* Function main() */
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
/* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1,
|
|
|
|
0),
|
|
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24),
|
|
|
|
/* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1,
|
|
|
|
0),
|
|
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32),
|
|
|
|
/* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current
|
|
|
|
* ; stack frame
|
|
|
|
*/
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
|
|
|
|
BPF_CALL_REL(2),
|
|
|
|
/* exit 0 */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
/* Function foo()
|
|
|
|
*
|
|
|
|
* r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers,
|
|
|
|
* r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value
|
|
|
|
*/
|
|
|
|
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_8, BPF_REG_2),
|
|
|
|
/* r7 = ktime_get_ns() */
|
|
|
|
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
|
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
|
|
/* r6 = ktime_get_ns() */
|
|
|
|
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
|
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
|
|
/* if r6 > r7 goto +1 ; no new information about the state is derived from
|
|
|
|
* ; this check, thus produced verifier states differ
|
|
|
|
* ; only in 'insn_idx'
|
|
|
|
* r9 = r8
|
|
|
|
*/
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
|
|
|
|
BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
|
|
|
|
/* r9 = *r9 ; verifier get's to this point via two paths:
|
|
|
|
* ; (I) one including r9 = r8, verified first;
|
|
|
|
* ; (II) one excluding r9 = r8, verified next.
|
|
|
|
* ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
|
|
|
|
* ; Suppose that checkpoint is created here via path (I).
|
|
|
|
* ; When verifying via (II) the r9.id must be compared against
|
|
|
|
* ; frame[0].fp[-24].id, otherwise (I) and (II) would be
|
|
|
|
* ; incorrectly deemed equivalent.
|
|
|
|
* if r9 == 0 goto <exit>
|
|
|
|
*/
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0),
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
|
|
|
|
/* r8 = *r8 ; read map value via r8, this is not safe
|
|
|
|
* r0 = *r8 ; because r8 might be not equal to r9.
|
|
|
|
*/
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
|
|
|
|
/* exit 0 */
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.flags = BPF_F_TEST_STATE_FREQ,
|
|
|
|
.fixup_map_hash_8b = { 3, 9 },
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "R8 invalid mem access 'map_value_or_null'",
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.errstr_unpriv = "",
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
2025-07-02 15:53:23 +02:00
|
|
|
{
|
|
|
|
"calls: several args with ref_obj_id",
|
|
|
|
.insns = {
|
|
|
|
/* Reserve at least sizeof(struct iphdr) bytes in the ring buffer.
|
|
|
|
* With a smaller size, the verifier would reject the call to
|
|
|
|
* bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the
|
|
|
|
* ref_obj_id error.
|
|
|
|
*/
|
|
|
|
BPF_MOV64_IMM(BPF_REG_2, 20),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
|
|
|
/* if r0 == 0 goto <exit> */
|
|
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.fixup_map_ringbuf = { 2 },
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "more than one arg with ref_obj_id",
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
},
|