mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
bpf: Maintain FIFO property for rqspinlock unlock
Since out-of-order unlocks are unsupported for rqspinlock, and irqsave variants enforce strict FIFO ordering anyway, make the same change for normal non-irqsave variants, such that FIFO ordering is enforced. Two new verifier state fields (active_lock_id, active_lock_ptr) are used to denote the top of the stack, and prev_id and prev_ptr are ascertained whenever popping the topmost entry through an unlock. Take special care to make these fields part of the state comparison in refsafe. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250316040541.108729-25-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
0de2046137
commit
ea21771c07
2 changed files with 31 additions and 5 deletions
|
@ -268,6 +268,7 @@ struct bpf_reference_state {
|
|||
REF_TYPE_LOCK = (1 << 3),
|
||||
REF_TYPE_RES_LOCK = (1 << 4),
|
||||
REF_TYPE_RES_LOCK_IRQ = (1 << 5),
|
||||
REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
|
||||
} type;
|
||||
/* Track each reference created with a unique id, even if the same
|
||||
* instruction creates the reference multiple times (eg, via CALL).
|
||||
|
@ -434,6 +435,8 @@ struct bpf_verifier_state {
|
|||
u32 active_locks;
|
||||
u32 active_preempt_locks;
|
||||
u32 active_irq_id;
|
||||
u32 active_lock_id;
|
||||
void *active_lock_ptr;
|
||||
bool active_rcu_lock;
|
||||
|
||||
bool speculative;
|
||||
|
|
|
@ -1428,6 +1428,8 @@ static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf
|
|||
dst->active_preempt_locks = src->active_preempt_locks;
|
||||
dst->active_rcu_lock = src->active_rcu_lock;
|
||||
dst->active_irq_id = src->active_irq_id;
|
||||
dst->active_lock_id = src->active_lock_id;
|
||||
dst->active_lock_ptr = src->active_lock_ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1527,6 +1529,8 @@ static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum r
|
|||
s->ptr = ptr;
|
||||
|
||||
state->active_locks++;
|
||||
state->active_lock_id = id;
|
||||
state->active_lock_ptr = ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1577,16 +1581,24 @@ static bool find_reference_state(struct bpf_verifier_state *state, int ptr_id)
|
|||
|
||||
static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
|
||||
{
|
||||
void *prev_ptr = NULL;
|
||||
u32 prev_id = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
if (state->refs[i].type != type)
|
||||
continue;
|
||||
if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
|
||||
if (state->refs[i].type == type && state->refs[i].id == id &&
|
||||
state->refs[i].ptr == ptr) {
|
||||
release_reference_state(state, i);
|
||||
state->active_locks--;
|
||||
/* Reassign active lock (id, ptr). */
|
||||
state->active_lock_id = prev_id;
|
||||
state->active_lock_ptr = prev_ptr;
|
||||
return 0;
|
||||
}
|
||||
if (state->refs[i].type & REF_TYPE_LOCK_MASK) {
|
||||
prev_id = state->refs[i].id;
|
||||
prev_ptr = state->refs[i].ptr;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -8342,6 +8354,14 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, int flags)
|
|||
type = REF_TYPE_RES_LOCK;
|
||||
else
|
||||
type = REF_TYPE_LOCK;
|
||||
if (!find_lock_state(cur, type, reg->id, ptr)) {
|
||||
verbose(env, "%s_unlock of different lock\n", lock_str);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) {
|
||||
verbose(env, "%s_unlock cannot be out of order\n", lock_str);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (release_lock_state(cur, type, reg->id, ptr)) {
|
||||
verbose(env, "%s_unlock of different lock\n", lock_str);
|
||||
return -EINVAL;
|
||||
|
@ -12534,8 +12554,7 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
|
|||
|
||||
if (!env->cur_state->active_locks)
|
||||
return -EINVAL;
|
||||
s = find_lock_state(env->cur_state, REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
|
||||
id, ptr);
|
||||
s = find_lock_state(env->cur_state, REF_TYPE_LOCK_MASK, id, ptr);
|
||||
if (!s) {
|
||||
verbose(env, "held lock and object are not in the same allocation\n");
|
||||
return -EINVAL;
|
||||
|
@ -18591,6 +18610,10 @@ static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *c
|
|||
if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
|
||||
return false;
|
||||
|
||||
if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) ||
|
||||
old->active_lock_ptr != cur->active_lock_ptr)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < old->acquired_refs; i++) {
|
||||
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
|
||||
old->refs[i].type != cur->refs[i].type)
|
||||
|
|
Loading…
Add table
Reference in a new issue