mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
perf/core: Simplify the perf_event_alloc() error path
The error cleanup sequence in perf_event_alloc() is a subset of the existing _free_event() function (it must of course be). Split this out into __free_event() and simplify the error path. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com> Link: https://lore.kernel.org/r/20241104135517.967889521@infradead.org
This commit is contained in:
parent
061c991697
commit
c70ca29803
2 changed files with 79 additions and 77 deletions
|
@ -673,13 +673,15 @@ struct swevent_hlist {
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PERF_ATTACH_CONTEXT 0x01
|
#define PERF_ATTACH_CONTEXT 0x0001
|
||||||
#define PERF_ATTACH_GROUP 0x02
|
#define PERF_ATTACH_GROUP 0x0002
|
||||||
#define PERF_ATTACH_TASK 0x04
|
#define PERF_ATTACH_TASK 0x0004
|
||||||
#define PERF_ATTACH_TASK_DATA 0x08
|
#define PERF_ATTACH_TASK_DATA 0x0008
|
||||||
#define PERF_ATTACH_ITRACE 0x10
|
#define PERF_ATTACH_ITRACE 0x0010
|
||||||
#define PERF_ATTACH_SCHED_CB 0x20
|
#define PERF_ATTACH_SCHED_CB 0x0020
|
||||||
#define PERF_ATTACH_CHILD 0x40
|
#define PERF_ATTACH_CHILD 0x0040
|
||||||
|
#define PERF_ATTACH_EXCLUSIVE 0x0080
|
||||||
|
#define PERF_ATTACH_CALLCHAIN 0x0100
|
||||||
|
|
||||||
struct bpf_prog;
|
struct bpf_prog;
|
||||||
struct perf_cgroup;
|
struct perf_cgroup;
|
||||||
|
|
|
@ -5289,6 +5289,8 @@ static int exclusive_event_init(struct perf_event *event)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event->attach_state |= PERF_ATTACH_EXCLUSIVE;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5296,14 +5298,13 @@ static void exclusive_event_destroy(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct pmu *pmu = event->pmu;
|
struct pmu *pmu = event->pmu;
|
||||||
|
|
||||||
if (!is_exclusive_pmu(pmu))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* see comment in exclusive_event_init() */
|
/* see comment in exclusive_event_init() */
|
||||||
if (event->attach_state & PERF_ATTACH_TASK)
|
if (event->attach_state & PERF_ATTACH_TASK)
|
||||||
atomic_dec(&pmu->exclusive_cnt);
|
atomic_dec(&pmu->exclusive_cnt);
|
||||||
else
|
else
|
||||||
atomic_inc(&pmu->exclusive_cnt);
|
atomic_inc(&pmu->exclusive_cnt);
|
||||||
|
|
||||||
|
event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
|
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
|
||||||
|
@ -5362,6 +5363,54 @@ static void perf_pending_task_sync(struct perf_event *event)
|
||||||
rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
|
rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* vs perf_event_alloc() error */
|
||||||
|
static void __free_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if (event->attach_state & PERF_ATTACH_CALLCHAIN)
|
||||||
|
put_callchain_buffers();
|
||||||
|
|
||||||
|
kfree(event->addr_filter_ranges);
|
||||||
|
|
||||||
|
if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
|
||||||
|
exclusive_event_destroy(event);
|
||||||
|
|
||||||
|
if (is_cgroup_event(event))
|
||||||
|
perf_detach_cgroup(event);
|
||||||
|
|
||||||
|
if (event->destroy)
|
||||||
|
event->destroy(event);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be after ->destroy(), due to uprobe_perf_close() using
|
||||||
|
* hw.target.
|
||||||
|
*/
|
||||||
|
if (event->hw.target)
|
||||||
|
put_task_struct(event->hw.target);
|
||||||
|
|
||||||
|
if (event->pmu_ctx) {
|
||||||
|
/*
|
||||||
|
* put_pmu_ctx() needs an event->ctx reference, because of
|
||||||
|
* epc->ctx.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(!event->ctx);
|
||||||
|
WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
|
||||||
|
put_pmu_ctx(event->pmu_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* perf_event_free_task() relies on put_ctx() being 'last', in
|
||||||
|
* particular all task references must be cleaned up.
|
||||||
|
*/
|
||||||
|
if (event->ctx)
|
||||||
|
put_ctx(event->ctx);
|
||||||
|
|
||||||
|
if (event->pmu)
|
||||||
|
module_put(event->pmu->module);
|
||||||
|
|
||||||
|
call_rcu(&event->rcu_head, free_event_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* vs perf_event_alloc() success */
|
||||||
static void _free_event(struct perf_event *event)
|
static void _free_event(struct perf_event *event)
|
||||||
{
|
{
|
||||||
irq_work_sync(&event->pending_irq);
|
irq_work_sync(&event->pending_irq);
|
||||||
|
@ -5384,42 +5433,10 @@ static void _free_event(struct perf_event *event)
|
||||||
mutex_unlock(&event->mmap_mutex);
|
mutex_unlock(&event->mmap_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_cgroup_event(event))
|
|
||||||
perf_detach_cgroup(event);
|
|
||||||
|
|
||||||
if (!event->parent) {
|
|
||||||
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
|
|
||||||
put_callchain_buffers();
|
|
||||||
}
|
|
||||||
|
|
||||||
perf_event_free_bpf_prog(event);
|
perf_event_free_bpf_prog(event);
|
||||||
perf_addr_filters_splice(event, NULL);
|
perf_addr_filters_splice(event, NULL);
|
||||||
kfree(event->addr_filter_ranges);
|
|
||||||
|
|
||||||
if (event->destroy)
|
__free_event(event);
|
||||||
event->destroy(event);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be after ->destroy(), due to uprobe_perf_close() using
|
|
||||||
* hw.target.
|
|
||||||
*/
|
|
||||||
if (event->hw.target)
|
|
||||||
put_task_struct(event->hw.target);
|
|
||||||
|
|
||||||
if (event->pmu_ctx)
|
|
||||||
put_pmu_ctx(event->pmu_ctx);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* perf_event_free_task() relies on put_ctx() being 'last', in particular
|
|
||||||
* all task references must be cleaned up.
|
|
||||||
*/
|
|
||||||
if (event->ctx)
|
|
||||||
put_ctx(event->ctx);
|
|
||||||
|
|
||||||
exclusive_event_destroy(event);
|
|
||||||
module_put(event->pmu->module);
|
|
||||||
|
|
||||||
call_rcu(&event->rcu_head, free_event_rcu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -12093,8 +12110,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
|
||||||
event->destroy(event);
|
event->destroy(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
event->pmu = NULL;
|
||||||
module_put(pmu->module);
|
module_put(pmu->module);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -12422,7 +12441,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
* See perf_output_read().
|
* See perf_output_read().
|
||||||
*/
|
*/
|
||||||
if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
|
if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
|
||||||
goto err_ns;
|
goto err;
|
||||||
|
|
||||||
if (!has_branch_stack(event))
|
if (!has_branch_stack(event))
|
||||||
event->attr.branch_sample_type = 0;
|
event->attr.branch_sample_type = 0;
|
||||||
|
@ -12430,7 +12449,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
pmu = perf_init_event(event);
|
pmu = perf_init_event(event);
|
||||||
if (IS_ERR(pmu)) {
|
if (IS_ERR(pmu)) {
|
||||||
err = PTR_ERR(pmu);
|
err = PTR_ERR(pmu);
|
||||||
goto err_ns;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -12440,25 +12459,25 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
*/
|
*/
|
||||||
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
|
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto err_pmu;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event->attr.aux_output &&
|
if (event->attr.aux_output &&
|
||||||
(!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
|
(!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
|
||||||
event->attr.aux_pause || event->attr.aux_resume)) {
|
event->attr.aux_pause || event->attr.aux_resume)) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto err_pmu;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event->attr.aux_pause && event->attr.aux_resume) {
|
if (event->attr.aux_pause && event->attr.aux_resume) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto err_pmu;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event->attr.aux_start_paused) {
|
if (event->attr.aux_start_paused) {
|
||||||
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
|
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto err_pmu;
|
goto err;
|
||||||
}
|
}
|
||||||
event->hw.aux_paused = 1;
|
event->hw.aux_paused = 1;
|
||||||
}
|
}
|
||||||
|
@ -12466,12 +12485,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
if (cgroup_fd != -1) {
|
if (cgroup_fd != -1) {
|
||||||
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
|
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_pmu;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = exclusive_event_init(event);
|
err = exclusive_event_init(event);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_pmu;
|
goto err;
|
||||||
|
|
||||||
if (has_addr_filter(event)) {
|
if (has_addr_filter(event)) {
|
||||||
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
|
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
|
||||||
|
@ -12479,7 +12498,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!event->addr_filter_ranges) {
|
if (!event->addr_filter_ranges) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_per_task;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -12504,41 +12523,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||||
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
|
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||||
err = get_callchain_buffers(attr->sample_max_stack);
|
err = get_callchain_buffers(attr->sample_max_stack);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_addr_filters;
|
goto err;
|
||||||
|
event->attach_state |= PERF_ATTACH_CALLCHAIN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = security_perf_event_alloc(event);
|
err = security_perf_event_alloc(event);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_callchain_buffer;
|
goto err;
|
||||||
|
|
||||||
/* symmetric to unaccount_event() in _free_event() */
|
/* symmetric to unaccount_event() in _free_event() */
|
||||||
account_event(event);
|
account_event(event);
|
||||||
|
|
||||||
return event;
|
return event;
|
||||||
|
|
||||||
err_callchain_buffer:
|
err:
|
||||||
if (!event->parent) {
|
__free_event(event);
|
||||||
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
|
|
||||||
put_callchain_buffers();
|
|
||||||
}
|
|
||||||
err_addr_filters:
|
|
||||||
kfree(event->addr_filter_ranges);
|
|
||||||
|
|
||||||
err_per_task:
|
|
||||||
exclusive_event_destroy(event);
|
|
||||||
|
|
||||||
err_pmu:
|
|
||||||
if (is_cgroup_event(event))
|
|
||||||
perf_detach_cgroup(event);
|
|
||||||
if (event->destroy)
|
|
||||||
event->destroy(event);
|
|
||||||
module_put(pmu->module);
|
|
||||||
err_ns:
|
|
||||||
if (event->hw.target)
|
|
||||||
put_task_struct(event->hw.target);
|
|
||||||
call_rcu(&event->rcu_head, free_event_rcu);
|
|
||||||
|
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue