mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
bpf: replace bpf_timer_set_callback with a generic helper
In the same way we have a generic __bpf_async_init(), we also need to share code between timer and workqueue for the set_callback call. We just add an unused flags parameter, as it will be used for workqueues. Signed-off-by: Benjamin Tissoires <bentiss@kernel.org> Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-3-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
56b4a177ae
commit
073f11b026
1 changed files with 18 additions and 11 deletions
|
@ -1262,22 +1262,23 @@ static const struct bpf_func_proto bpf_timer_init_proto = {
|
||||||
.arg3_type = ARG_ANYTHING,
|
.arg3_type = ARG_ANYTHING,
|
||||||
};
|
};
|
||||||
|
|
||||||
BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
|
static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
|
||||||
struct bpf_prog_aux *, aux)
|
struct bpf_prog_aux *aux, unsigned int flags,
|
||||||
|
enum bpf_async_type type)
|
||||||
{
|
{
|
||||||
struct bpf_prog *prev, *prog = aux->prog;
|
struct bpf_prog *prev, *prog = aux->prog;
|
||||||
struct bpf_hrtimer *t;
|
struct bpf_async_cb *cb;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (in_nmi())
|
if (in_nmi())
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
__bpf_spin_lock_irqsave(&timer->lock);
|
__bpf_spin_lock_irqsave(&async->lock);
|
||||||
t = timer->timer;
|
cb = async->cb;
|
||||||
if (!t) {
|
if (!cb) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (!atomic64_read(&t->cb.map->usercnt)) {
|
if (!atomic64_read(&cb->map->usercnt)) {
|
||||||
/* maps with timers must be either held by user space
|
/* maps with timers must be either held by user space
|
||||||
* or pinned in bpffs. Otherwise timer might still be
|
* or pinned in bpffs. Otherwise timer might still be
|
||||||
* running even when bpf prog is detached and user space
|
* running even when bpf prog is detached and user space
|
||||||
|
@ -1286,7 +1287,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callb
|
||||||
ret = -EPERM;
|
ret = -EPERM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
prev = t->cb.prog;
|
prev = cb->prog;
|
||||||
if (prev != prog) {
|
if (prev != prog) {
|
||||||
/* Bump prog refcnt once. Every bpf_timer_set_callback()
|
/* Bump prog refcnt once. Every bpf_timer_set_callback()
|
||||||
* can pick different callback_fn-s within the same prog.
|
* can pick different callback_fn-s within the same prog.
|
||||||
|
@ -1299,14 +1300,20 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callb
|
||||||
if (prev)
|
if (prev)
|
||||||
/* Drop prev prog refcnt when swapping with new prog */
|
/* Drop prev prog refcnt when swapping with new prog */
|
||||||
bpf_prog_put(prev);
|
bpf_prog_put(prev);
|
||||||
t->cb.prog = prog;
|
cb->prog = prog;
|
||||||
}
|
}
|
||||||
rcu_assign_pointer(t->cb.callback_fn, callback_fn);
|
rcu_assign_pointer(cb->callback_fn, callback_fn);
|
||||||
out:
|
out:
|
||||||
__bpf_spin_unlock_irqrestore(&timer->lock);
|
__bpf_spin_unlock_irqrestore(&async->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
|
||||||
|
struct bpf_prog_aux *, aux)
|
||||||
|
{
|
||||||
|
return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct bpf_func_proto bpf_timer_set_callback_proto = {
|
static const struct bpf_func_proto bpf_timer_set_callback_proto = {
|
||||||
.func = bpf_timer_set_callback,
|
.func = bpf_timer_set_callback,
|
||||||
.gpl_only = true,
|
.gpl_only = true,
|
||||||
|
|
Loading…
Add table
Reference in a new issue