linux/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c

236 lines
6.6 KiB
C
Raw Permalink Normal View History

{
"atomic compare-and-exchange smoketest - 64bit",
.insns = {
/* val = 3; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
/* old = atomic_cmpxchg(&val, 2, 4); */
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 3) exit(2); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* if (val != 3) exit(3); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* old = atomic_cmpxchg(&val, 3, 4); */
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 3) exit(4); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 4),
BPF_EXIT_INSN(),
/* if (val != 4) exit(5); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"atomic compare-and-exchange smoketest - 32bit",
.insns = {
/* val = 3; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
/* old = atomic_cmpxchg(&val, 2, 4); */
BPF_MOV32_IMM(BPF_REG_1, 4),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 3) exit(2); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* if (val != 3) exit(3); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* old = atomic_cmpxchg(&val, 3, 4); */
BPF_MOV32_IMM(BPF_REG_1, 4),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 3) exit(4); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 4),
BPF_EXIT_INSN(),
/* if (val != 4) exit(5); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
BPF_MOV32_IMM(BPF_REG_0, 5),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"Can't use cmpxchg on uninit src reg",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "!read_ok",
},
bpf: Explicitly zero-extend R0 after 32-bit cmpxchg As pointed out by Ilya and explained in the new comment, there's a discrepancy between x86 and BPF CMPXCHG semantics: BPF always loads the value from memory into r0, while x86 only does so when r0 and the value in memory are different. The same issue affects s390. At first this might sound like pure semantics, but it makes a real difference when the comparison is 32-bit, since the load will zero-extend r0/rax. The fix is to explicitly zero-extend rax after doing such a CMPXCHG. Since this problem affects multiple archs, this is done in the verifier by patching in a BPF_ZEXT_REG instruction after every 32-bit cmpxchg. Any archs that don't need such manual zero-extension can do a look-ahead with insn_is_zext to skip the unnecessary mov. Note this still goes on top of Ilya's patch: https://lore.kernel.org/bpf/20210301154019.129110-1-iii@linux.ibm.com/T/#u Differences v5->v6[1]: - Moved is_cmpxchg_insn and ensured it can be safely re-used. Also renamed it and removed 'inline' to match the style of the is_*_function helpers. - Fixed up comments in verifier test (thanks for the careful review, Martin!) Differences v4->v5[1]: - Moved the logic entirely into opt_subreg_zext_lo32_rnd_hi32, thanks to Martin for suggesting this. Differences v3->v4[1]: - Moved the optimization against pointless zext into the correct place: opt_subreg_zext_lo32_rnd_hi32 is called _after_ fixup_bpf_calls. Differences v2->v3[1]: - Moved patching into fixup_bpf_calls (patch incoming to rename this function) - Added extra commentary on bpf_jit_needs_zext - Added check to avoid adding a pointless zext(r0) if there's already one there. Difference v1->v2[1]: Now solved centrally in the verifier instead of specifically for the x86 JIT. Thanks to Ilya and Daniel for the suggestions! [1] v5: https://lore.kernel.org/bpf/CA+i-1C3ytZz6FjcPmUg5s4L51pMQDxWcZNvM86w4RHZ_o2khwg@mail.gmail.com/T/#t v4: https://lore.kernel.org/bpf/CA+i-1C3ytZz6FjcPmUg5s4L51pMQDxWcZNvM86w4RHZ_o2khwg@mail.gmail.com/T/#t v3: https://lore.kernel.org/bpf/08669818-c99d-0d30-e1db-53160c063611@iogearbox.net/T/#t v2: https://lore.kernel.org/bpf/08669818-c99d-0d30-e1db-53160c063611@iogearbox.net/T/#t v1: https://lore.kernel.org/bpf/d7ebaefb-bfd6-a441-3ff2-2fdfe699b1d2@iogearbox.net/T/#t Reported-by: Ilya Leoshkevich <iii@linux.ibm.com> Fixes: 5ffa25502b5a ("bpf: Add instructions for atomic_[cmp]xchg") Signed-off-by: Brendan Jackman <jackmanb@google.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Ilya Leoshkevich <iii@linux.ibm.com> Tested-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2021-03-04 18:56:46 -08:00
{
"BPF_W cmpxchg should zero top 32 bits",
.insns = {
/* r0 = U64_MAX; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
/* u64 val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r0 = (u32)atomic_cmpxchg((u32 *)&val, r0, 1); */
BPF_MOV32_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* r1 = 0x00000000FFFFFFFFull; */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
/* if (r0 != r1) exit(1); */
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 2),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"Dest pointer in r0 - fail",
.insns = {
/* val = 0; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, 1); */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (r0 != 0) exit(1); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r0 = atomic_cmpxchg(&val, r0, 0); */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
bpf, selftests: Add test case for atomic fetch on spilled pointer Test whether unprivileged would be able to leak the spilled pointer either by exporting the returned value from the atomic{32,64} operation or by reading and exporting the value from the stack after the atomic operation took place. Note that for unprivileged, the below atomic cmpxchg test case named "Dest pointer in r0 - succeed" is failing. The reason is that in the dst memory location (r10 -8) there is the spilled register r10: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (bf) r0 = r10 1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (7b) *(u64 *)(r10 -8) = r0 2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp 2: (b7) r1 = 0 3: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=fp 3: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r1) 4: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=mmmmmmmm 4: (79) r1 = *(u64 *)(r0 -8) 5: R0_w=fp0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm 5: (b7) r0 = 0 6: R0_w=invP0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm 6: (95) exit However, allowing this case for unprivileged is a bit useless given an update with a new pointer will fail anyway: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (bf) r0 = r10 1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (7b) *(u64 *)(r10 -8) = r0 2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp 2: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r10) R10 leaks addr into mem Acked-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2021-12-07 10:07:04 +00:00
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr into mem",
bpf, selftests: Add test case for atomic fetch on spilled pointer Test whether unprivileged would be able to leak the spilled pointer either by exporting the returned value from the atomic{32,64} operation or by reading and exporting the value from the stack after the atomic operation took place. Note that for unprivileged, the below atomic cmpxchg test case named "Dest pointer in r0 - succeed" is failing. The reason is that in the dst memory location (r10 -8) there is the spilled register r10: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (bf) r0 = r10 1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (7b) *(u64 *)(r10 -8) = r0 2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp 2: (b7) r1 = 0 3: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=fp 3: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r1) 4: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=mmmmmmmm 4: (79) r1 = *(u64 *)(r0 -8) 5: R0_w=fp0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm 5: (b7) r0 = 0 6: R0_w=invP0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm 6: (95) exit However, allowing this case for unprivileged is a bit useless given an update with a new pointer will fail anyway: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (bf) r0 = r10 1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (7b) *(u64 *)(r10 -8) = r0 2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp 2: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r10) R10 leaks addr into mem Acked-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2021-12-07 10:07:04 +00:00
},
{
"Dest pointer in r0 - succeed, check 2",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed, check 3",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid size of register fill",
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed, check 4",
.insns = {
/* r0 = &val */
BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r10 */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R10 partial copy of pointer",
},
{
"Dest pointer in r0 - succeed, check 5",
.insns = {
/* r0 = &val */
BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 invalid mem access",
.errstr_unpriv = "R10 partial copy of pointer",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},