2021-02-02 12:09:09 -08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <stdio.h>
|
2021-11-02 11:01:12 +05:30
|
|
|
#include <stdlib.h>
|
2021-02-02 12:09:09 -08:00
|
|
|
#include "util/evsel.h"
|
2021-11-02 11:01:12 +05:30
|
|
|
#include "util/env.h"
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-16 22:27:23 -07:00
|
|
|
#include "util/pmu.h"
|
2023-05-27 00:22:03 -07:00
|
|
|
#include "util/pmus.h"
|
2021-11-02 11:01:12 +05:30
|
|
|
#include "linux/string.h"
|
2024-09-13 08:47:07 +00:00
|
|
|
#include "topdown.h"
|
perf record: Support sample-read topdown metric group for hybrid platforms
With the hardware TopDown metrics feature, the sample-read feature should
be supported for a TopDown group, e.g., sample a non-topdown event and read
a Topdown metric group. But the current perf record code errors are out.
For a TopDown metric group,the slots event must be the leader of the group,
but the leader slots event doesn't support sampling. To support sample-read
the TopDown metric group, uses the 2nd event of the group as the "leader"
for the purposes of sampling.
Only the platform with the TopDown metric feature supports sample-read the
topdown group. In commit acb65150a47c ("perf record: Support sample-read
topdown metric group"), it adds arch_topdown_sample_read() to indicate
whether the TopDown group supports sample-read, it should only work on the
non-hybrid systems, this patch extends the support for hybrid platforms.
Before:
# ./perf record -e "{cpu_core/slots/,cpu_core/cycles/,cpu_core/topdown-retiring/}:S" -a sleep 1
Error:
The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (cpu_core/topdown-retiring/).
/bin/dmesg | grep -i perf may provide additional information.
After:
# ./perf record -e "{cpu_core/slots/,cpu_core/cycles/,cpu_core/topdown-retiring/}:S" -a sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.238 MB perf.data (369 samples) ]
Fixes: acb65150a47c2bae ("perf record: Support sample-read topdown metric group")
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Zhengjun Xing <zhengjun.xing@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220602153603.1884710-1-zhengjun.xing@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-06-02 23:36:03 +08:00
|
|
|
#include "evsel.h"
|
2022-06-04 10:15:12 +05:30
|
|
|
#include "util/debug.h"
|
2023-06-13 15:25:04 +05:30
|
|
|
#include "env.h"
|
2022-06-04 10:15:12 +05:30
|
|
|
|
|
|
|
#define IBS_FETCH_L3MISSONLY (1ULL << 59)
|
|
|
|
#define IBS_OP_L3MISSONLY (1ULL << 16)
|
2021-02-02 12:09:09 -08:00
|
|
|
|
|
|
|
void arch_evsel__set_sample_weight(struct evsel *evsel)
|
|
|
|
{
|
|
|
|
evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
|
|
|
|
}
|
2021-11-02 11:01:12 +05:30
|
|
|
|
perf evsel: Fixes topdown events in a weak group for the hybrid platform
The patch ("perf evlist: Keep topdown counters in weak group") fixes the
perf metrics topdown event issue when the topdown events are in a weak
group on a non-hybrid platform. However, it doesn't work for the hybrid
platform.
$./perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
751,765,068 cpu_core/slots/ (84.07%)
<not supported> cpu_core/topdown-bad-spec/
<not supported> cpu_core/topdown-be-bound/
<not supported> cpu_core/topdown-fe-bound/
<not supported> cpu_core/topdown-retiring/
12,398,197 cpu_core/branch-instructions/ (84.07%)
1,054,218 cpu_core/branch-misses/ (84.24%)
539,764,637 cpu_core/bus-cycles/ (84.64%)
14,683 cpu_core/cache-misses/ (84.87%)
7,277,809 cpu_core/cache-references/ (77.30%)
222,299,439 cpu_core/cpu-cycles/ (77.28%)
63,661,714 cpu_core/instructions/ (84.85%)
0 cpu_core/mem-loads/ (77.29%)
12,271,725 cpu_core/mem-stores/ (77.30%)
542,241,102 cpu_core/ref-cycles/ (84.85%)
8,854 cpu_core/cache-misses/ (76.71%)
7,179,013 cpu_core/cache-references/ (76.31%)
1.003245250 seconds time elapsed
A hybrid platform has a different PMU name for the core PMUs, while
the current perf hard code the PMU name "cpu".
The evsel->pmu_name can be used to replace the "cpu" to fix the issue.
For a hybrid platform, the pmu_name must be non-NULL. Because there are
at least two core PMUs. The PMU has to be specified.
For a non-hybrid platform, the pmu_name may be NULL. Because there is
only one core PMU, "cpu". For a NULL pmu_name, we can safely assume that
it is a "cpu" PMU.
In case other PMUs also define the "slots" event, checking the PMU type
as well.
With the patch,
$ perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
766,620,266 cpu_core/slots/ (84.06%)
73,172,129 cpu_core/topdown-bad-spec/ # 9.5% bad speculation (84.06%)
193,443,341 cpu_core/topdown-be-bound/ # 25.0% backend bound (84.06%)
403,940,929 cpu_core/topdown-fe-bound/ # 52.3% frontend bound (84.06%)
102,070,237 cpu_core/topdown-retiring/ # 13.2% retiring (84.06%)
12,364,429 cpu_core/branch-instructions/ (84.03%)
1,080,124 cpu_core/branch-misses/ (84.24%)
564,120,383 cpu_core/bus-cycles/ (84.65%)
36,979 cpu_core/cache-misses/ (84.86%)
7,298,094 cpu_core/cache-references/ (77.30%)
227,174,372 cpu_core/cpu-cycles/ (77.31%)
63,886,523 cpu_core/instructions/ (84.87%)
0 cpu_core/mem-loads/ (77.31%)
12,208,782 cpu_core/mem-stores/ (77.31%)
566,409,738 cpu_core/ref-cycles/ (84.87%)
23,118 cpu_core/cache-misses/ (76.71%)
7,212,602 cpu_core/cache-references/ (76.29%)
1.003228667 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220518143900.1493980-2-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-18 07:38:57 -07:00
|
|
|
/* Check whether the evsel's PMU supports the perf metrics */
|
perf record: Support sample-read topdown metric group for hybrid platforms
With the hardware TopDown metrics feature, the sample-read feature should
be supported for a TopDown group, e.g., sample a non-topdown event and read
a Topdown metric group. But the current perf record code errors are out.
For a TopDown metric group,the slots event must be the leader of the group,
but the leader slots event doesn't support sampling. To support sample-read
the TopDown metric group, uses the 2nd event of the group as the "leader"
for the purposes of sampling.
Only the platform with the TopDown metric feature supports sample-read the
topdown group. In commit acb65150a47c ("perf record: Support sample-read
topdown metric group"), it adds arch_topdown_sample_read() to indicate
whether the TopDown group supports sample-read, it should only work on the
non-hybrid systems, this patch extends the support for hybrid platforms.
Before:
# ./perf record -e "{cpu_core/slots/,cpu_core/cycles/,cpu_core/topdown-retiring/}:S" -a sleep 1
Error:
The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (cpu_core/topdown-retiring/).
/bin/dmesg | grep -i perf may provide additional information.
After:
# ./perf record -e "{cpu_core/slots/,cpu_core/cycles/,cpu_core/topdown-retiring/}:S" -a sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.238 MB perf.data (369 samples) ]
Fixes: acb65150a47c2bae ("perf record: Support sample-read topdown metric group")
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Zhengjun Xing <zhengjun.xing@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220602153603.1884710-1-zhengjun.xing@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-06-02 23:36:03 +08:00
|
|
|
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
|
perf evsel: Fixes topdown events in a weak group for the hybrid platform
The patch ("perf evlist: Keep topdown counters in weak group") fixes the
perf metrics topdown event issue when the topdown events are in a weak
group on a non-hybrid platform. However, it doesn't work for the hybrid
platform.
$./perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
751,765,068 cpu_core/slots/ (84.07%)
<not supported> cpu_core/topdown-bad-spec/
<not supported> cpu_core/topdown-be-bound/
<not supported> cpu_core/topdown-fe-bound/
<not supported> cpu_core/topdown-retiring/
12,398,197 cpu_core/branch-instructions/ (84.07%)
1,054,218 cpu_core/branch-misses/ (84.24%)
539,764,637 cpu_core/bus-cycles/ (84.64%)
14,683 cpu_core/cache-misses/ (84.87%)
7,277,809 cpu_core/cache-references/ (77.30%)
222,299,439 cpu_core/cpu-cycles/ (77.28%)
63,661,714 cpu_core/instructions/ (84.85%)
0 cpu_core/mem-loads/ (77.29%)
12,271,725 cpu_core/mem-stores/ (77.30%)
542,241,102 cpu_core/ref-cycles/ (84.85%)
8,854 cpu_core/cache-misses/ (76.71%)
7,179,013 cpu_core/cache-references/ (76.31%)
1.003245250 seconds time elapsed
A hybrid platform has a different PMU name for the core PMUs, while
the current perf hard code the PMU name "cpu".
The evsel->pmu_name can be used to replace the "cpu" to fix the issue.
For a hybrid platform, the pmu_name must be non-NULL. Because there are
at least two core PMUs. The PMU has to be specified.
For a non-hybrid platform, the pmu_name may be NULL. Because there is
only one core PMU, "cpu". For a NULL pmu_name, we can safely assume that
it is a "cpu" PMU.
In case other PMUs also define the "slots" event, checking the PMU type
as well.
With the patch,
$ perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
766,620,266 cpu_core/slots/ (84.06%)
73,172,129 cpu_core/topdown-bad-spec/ # 9.5% bad speculation (84.06%)
193,443,341 cpu_core/topdown-be-bound/ # 25.0% backend bound (84.06%)
403,940,929 cpu_core/topdown-fe-bound/ # 52.3% frontend bound (84.06%)
102,070,237 cpu_core/topdown-retiring/ # 13.2% retiring (84.06%)
12,364,429 cpu_core/branch-instructions/ (84.03%)
1,080,124 cpu_core/branch-misses/ (84.24%)
564,120,383 cpu_core/bus-cycles/ (84.65%)
36,979 cpu_core/cache-misses/ (84.86%)
7,298,094 cpu_core/cache-references/ (77.30%)
227,174,372 cpu_core/cpu-cycles/ (77.31%)
63,886,523 cpu_core/instructions/ (84.87%)
0 cpu_core/mem-loads/ (77.31%)
12,208,782 cpu_core/mem-stores/ (77.31%)
566,409,738 cpu_core/ref-cycles/ (84.87%)
23,118 cpu_core/cache-misses/ (76.71%)
7,212,602 cpu_core/cache-references/ (76.29%)
1.003228667 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220518143900.1493980-2-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-18 07:38:57 -07:00
|
|
|
{
|
2024-09-26 15:48:35 +01:00
|
|
|
struct perf_pmu *pmu;
|
|
|
|
u32 type = evsel->core.attr.type;
|
perf evsel: Fixes topdown events in a weak group for the hybrid platform
The patch ("perf evlist: Keep topdown counters in weak group") fixes the
perf metrics topdown event issue when the topdown events are in a weak
group on a non-hybrid platform. However, it doesn't work for the hybrid
platform.
$./perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
751,765,068 cpu_core/slots/ (84.07%)
<not supported> cpu_core/topdown-bad-spec/
<not supported> cpu_core/topdown-be-bound/
<not supported> cpu_core/topdown-fe-bound/
<not supported> cpu_core/topdown-retiring/
12,398,197 cpu_core/branch-instructions/ (84.07%)
1,054,218 cpu_core/branch-misses/ (84.24%)
539,764,637 cpu_core/bus-cycles/ (84.64%)
14,683 cpu_core/cache-misses/ (84.87%)
7,277,809 cpu_core/cache-references/ (77.30%)
222,299,439 cpu_core/cpu-cycles/ (77.28%)
63,661,714 cpu_core/instructions/ (84.85%)
0 cpu_core/mem-loads/ (77.29%)
12,271,725 cpu_core/mem-stores/ (77.30%)
542,241,102 cpu_core/ref-cycles/ (84.85%)
8,854 cpu_core/cache-misses/ (76.71%)
7,179,013 cpu_core/cache-references/ (76.31%)
1.003245250 seconds time elapsed
A hybrid platform has a different PMU name for the core PMUs, while
the current perf hard code the PMU name "cpu".
The evsel->pmu_name can be used to replace the "cpu" to fix the issue.
For a hybrid platform, the pmu_name must be non-NULL. Because there are
at least two core PMUs. The PMU has to be specified.
For a non-hybrid platform, the pmu_name may be NULL. Because there is
only one core PMU, "cpu". For a NULL pmu_name, we can safely assume that
it is a "cpu" PMU.
In case other PMUs also define the "slots" event, checking the PMU type
as well.
With the patch,
$ perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
766,620,266 cpu_core/slots/ (84.06%)
73,172,129 cpu_core/topdown-bad-spec/ # 9.5% bad speculation (84.06%)
193,443,341 cpu_core/topdown-be-bound/ # 25.0% backend bound (84.06%)
403,940,929 cpu_core/topdown-fe-bound/ # 52.3% frontend bound (84.06%)
102,070,237 cpu_core/topdown-retiring/ # 13.2% retiring (84.06%)
12,364,429 cpu_core/branch-instructions/ (84.03%)
1,080,124 cpu_core/branch-misses/ (84.24%)
564,120,383 cpu_core/bus-cycles/ (84.65%)
36,979 cpu_core/cache-misses/ (84.86%)
7,298,094 cpu_core/cache-references/ (77.30%)
227,174,372 cpu_core/cpu-cycles/ (77.31%)
63,886,523 cpu_core/instructions/ (84.87%)
0 cpu_core/mem-loads/ (77.31%)
12,208,782 cpu_core/mem-stores/ (77.31%)
566,409,738 cpu_core/ref-cycles/ (84.87%)
23,118 cpu_core/cache-misses/ (76.71%)
7,212,602 cpu_core/cache-references/ (76.29%)
1.003228667 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220518143900.1493980-2-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-18 07:38:57 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU
|
|
|
|
* on a non-hybrid machine, "cpu_core" PMU on a hybrid machine.
|
|
|
|
* The slots event is only available for the core PMU, which
|
|
|
|
* supports the perf metrics feature.
|
|
|
|
* Checking both the PERF_TYPE_RAW type and the slots event
|
|
|
|
* should be good enough to detect the perf metrics feature.
|
|
|
|
*/
|
2024-09-26 15:48:35 +01:00
|
|
|
again:
|
|
|
|
switch (type) {
|
|
|
|
case PERF_TYPE_HARDWARE:
|
|
|
|
case PERF_TYPE_HW_CACHE:
|
|
|
|
type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
|
|
|
|
if (type)
|
|
|
|
goto again;
|
|
|
|
break;
|
|
|
|
case PERF_TYPE_RAW:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
pmu = evsel->pmu;
|
|
|
|
if (pmu && perf_pmu__is_fake(pmu))
|
|
|
|
pmu = NULL;
|
perf evsel: Fixes topdown events in a weak group for the hybrid platform
The patch ("perf evlist: Keep topdown counters in weak group") fixes the
perf metrics topdown event issue when the topdown events are in a weak
group on a non-hybrid platform. However, it doesn't work for the hybrid
platform.
$./perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
751,765,068 cpu_core/slots/ (84.07%)
<not supported> cpu_core/topdown-bad-spec/
<not supported> cpu_core/topdown-be-bound/
<not supported> cpu_core/topdown-fe-bound/
<not supported> cpu_core/topdown-retiring/
12,398,197 cpu_core/branch-instructions/ (84.07%)
1,054,218 cpu_core/branch-misses/ (84.24%)
539,764,637 cpu_core/bus-cycles/ (84.64%)
14,683 cpu_core/cache-misses/ (84.87%)
7,277,809 cpu_core/cache-references/ (77.30%)
222,299,439 cpu_core/cpu-cycles/ (77.28%)
63,661,714 cpu_core/instructions/ (84.85%)
0 cpu_core/mem-loads/ (77.29%)
12,271,725 cpu_core/mem-stores/ (77.30%)
542,241,102 cpu_core/ref-cycles/ (84.85%)
8,854 cpu_core/cache-misses/ (76.71%)
7,179,013 cpu_core/cache-references/ (76.31%)
1.003245250 seconds time elapsed
A hybrid platform has a different PMU name for the core PMUs, while
the current perf hard code the PMU name "cpu".
The evsel->pmu_name can be used to replace the "cpu" to fix the issue.
For a hybrid platform, the pmu_name must be non-NULL. Because there are
at least two core PMUs. The PMU has to be specified.
For a non-hybrid platform, the pmu_name may be NULL. Because there is
only one core PMU, "cpu". For a NULL pmu_name, we can safely assume that
it is a "cpu" PMU.
In case other PMUs also define the "slots" event, checking the PMU type
as well.
With the patch,
$ perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
766,620,266 cpu_core/slots/ (84.06%)
73,172,129 cpu_core/topdown-bad-spec/ # 9.5% bad speculation (84.06%)
193,443,341 cpu_core/topdown-be-bound/ # 25.0% backend bound (84.06%)
403,940,929 cpu_core/topdown-fe-bound/ # 52.3% frontend bound (84.06%)
102,070,237 cpu_core/topdown-retiring/ # 13.2% retiring (84.06%)
12,364,429 cpu_core/branch-instructions/ (84.03%)
1,080,124 cpu_core/branch-misses/ (84.24%)
564,120,383 cpu_core/bus-cycles/ (84.65%)
36,979 cpu_core/cache-misses/ (84.86%)
7,298,094 cpu_core/cache-references/ (77.30%)
227,174,372 cpu_core/cpu-cycles/ (77.31%)
63,886,523 cpu_core/instructions/ (84.87%)
0 cpu_core/mem-loads/ (77.31%)
12,208,782 cpu_core/mem-stores/ (77.31%)
566,409,738 cpu_core/ref-cycles/ (84.87%)
23,118 cpu_core/cache-misses/ (76.71%)
7,212,602 cpu_core/cache-references/ (76.29%)
1.003228667 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220518143900.1493980-2-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-18 07:38:57 -07:00
|
|
|
|
2024-09-26 15:48:35 +01:00
|
|
|
if (!pmu) {
|
|
|
|
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
|
|
|
|
if (pmu->type == PERF_TYPE_RAW)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pmu && perf_pmu__have_event(pmu, "slots");
|
perf evsel: Fixes topdown events in a weak group for the hybrid platform
The patch ("perf evlist: Keep topdown counters in weak group") fixes the
perf metrics topdown event issue when the topdown events are in a weak
group on a non-hybrid platform. However, it doesn't work for the hybrid
platform.
$./perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
751,765,068 cpu_core/slots/ (84.07%)
<not supported> cpu_core/topdown-bad-spec/
<not supported> cpu_core/topdown-be-bound/
<not supported> cpu_core/topdown-fe-bound/
<not supported> cpu_core/topdown-retiring/
12,398,197 cpu_core/branch-instructions/ (84.07%)
1,054,218 cpu_core/branch-misses/ (84.24%)
539,764,637 cpu_core/bus-cycles/ (84.64%)
14,683 cpu_core/cache-misses/ (84.87%)
7,277,809 cpu_core/cache-references/ (77.30%)
222,299,439 cpu_core/cpu-cycles/ (77.28%)
63,661,714 cpu_core/instructions/ (84.85%)
0 cpu_core/mem-loads/ (77.29%)
12,271,725 cpu_core/mem-stores/ (77.30%)
542,241,102 cpu_core/ref-cycles/ (84.85%)
8,854 cpu_core/cache-misses/ (76.71%)
7,179,013 cpu_core/cache-references/ (76.31%)
1.003245250 seconds time elapsed
A hybrid platform has a different PMU name for the core PMUs, while
the current perf hard code the PMU name "cpu".
The evsel->pmu_name can be used to replace the "cpu" to fix the issue.
For a hybrid platform, the pmu_name must be non-NULL. Because there are
at least two core PMUs. The PMU has to be specified.
For a non-hybrid platform, the pmu_name may be NULL. Because there is
only one core PMU, "cpu". For a NULL pmu_name, we can safely assume that
it is a "cpu" PMU.
In case other PMUs also define the "slots" event, checking the PMU type
as well.
With the patch,
$ perf stat -e '{cpu_core/slots/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-be-bound/,cpu_core/topdown-fe-bound/,
cpu_core/topdown-retiring/,cpu_core/branch-instructions/,
cpu_core/branch-misses/,cpu_core/bus-cycles/,cpu_core/cache-misses/,
cpu_core/cache-references/,cpu_core/cpu-cycles/,cpu_core/instructions/,
cpu_core/mem-loads/,cpu_core/mem-stores/,cpu_core/ref-cycles/,
cpu_core/cache-misses/,cpu_core/cache-references/}:W' -a sleep 1
Performance counter stats for 'system wide':
766,620,266 cpu_core/slots/ (84.06%)
73,172,129 cpu_core/topdown-bad-spec/ # 9.5% bad speculation (84.06%)
193,443,341 cpu_core/topdown-be-bound/ # 25.0% backend bound (84.06%)
403,940,929 cpu_core/topdown-fe-bound/ # 52.3% frontend bound (84.06%)
102,070,237 cpu_core/topdown-retiring/ # 13.2% retiring (84.06%)
12,364,429 cpu_core/branch-instructions/ (84.03%)
1,080,124 cpu_core/branch-misses/ (84.24%)
564,120,383 cpu_core/bus-cycles/ (84.65%)
36,979 cpu_core/cache-misses/ (84.86%)
7,298,094 cpu_core/cache-references/ (77.30%)
227,174,372 cpu_core/cpu-cycles/ (77.31%)
63,886,523 cpu_core/instructions/ (84.87%)
0 cpu_core/mem-loads/ (77.31%)
12,208,782 cpu_core/mem-stores/ (77.31%)
566,409,738 cpu_core/ref-cycles/ (84.87%)
23,118 cpu_core/cache-misses/ (76.71%)
7,212,602 cpu_core/cache-references/ (76.29%)
1.003228667 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220518143900.1493980-2-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-18 07:38:57 -07:00
|
|
|
}
|
|
|
|
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-16 22:27:23 -07:00
|
|
|
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
|
|
|
|
{
|
2023-07-31 22:36:31 -07:00
|
|
|
if (!evsel__sys_has_perf_metrics(evsel) || !evsel->name ||
|
|
|
|
strcasestr(evsel->name, "uops_retired.slots"))
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-16 22:27:23 -07:00
|
|
|
return false;
|
|
|
|
|
2024-09-13 08:47:07 +00:00
|
|
|
return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-16 22:27:23 -07:00
|
|
|
}
|
2022-06-04 10:15:12 +05:30
|
|
|
|
2022-07-21 14:57:03 +08:00
|
|
|
int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
|
|
|
|
u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
|
|
|
|
const char *event_name;
|
|
|
|
|
|
|
|
if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
|
|
|
|
event_name = evsel__hw_names[event];
|
|
|
|
else
|
|
|
|
event_name = "unknown-hardware";
|
|
|
|
|
|
|
|
/* The PMU type is not required for the non-hybrid platform. */
|
|
|
|
if (!pmu)
|
|
|
|
return scnprintf(bf, size, "%s", event_name);
|
|
|
|
|
|
|
|
return scnprintf(bf, size, "%s/%s/",
|
2024-09-26 15:48:36 +01:00
|
|
|
evsel->pmu ? evsel->pmu->name : "cpu",
|
2022-07-21 14:57:03 +08:00
|
|
|
event_name);
|
|
|
|
}
|
|
|
|
|
2022-06-04 10:15:12 +05:30
|
|
|
static void ibs_l3miss_warn(void)
|
|
|
|
{
|
|
|
|
pr_warning(
|
|
|
|
"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
|
|
|
|
"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
|
|
|
|
{
|
|
|
|
struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
|
|
|
|
static int warned_once;
|
|
|
|
|
2023-06-13 15:25:04 +05:30
|
|
|
if (warned_once || !x86__is_amd_cpu())
|
2022-06-04 10:15:12 +05:30
|
|
|
return;
|
|
|
|
|
|
|
|
evsel_pmu = evsel__find_pmu(evsel);
|
|
|
|
if (!evsel_pmu)
|
|
|
|
return;
|
|
|
|
|
2023-05-27 00:22:03 -07:00
|
|
|
ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
|
|
|
|
ibs_op_pmu = perf_pmus__find("ibs_op");
|
2022-06-04 10:15:12 +05:30
|
|
|
|
|
|
|
if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
|
|
|
|
if (attr->config & IBS_FETCH_L3MISSONLY) {
|
|
|
|
ibs_l3miss_warn();
|
|
|
|
warned_once = 1;
|
|
|
|
}
|
|
|
|
} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
|
|
|
|
if (attr->config & IBS_OP_L3MISSONLY) {
|
|
|
|
ibs_l3miss_warn();
|
|
|
|
warned_once = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-06-30 14:22:30 +05:30
|
|
|
|
|
|
|
int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
|
|
|
|
{
|
|
|
|
if (!x86__is_amd_cpu())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!evsel->core.attr.precise_ip &&
|
2024-09-26 15:48:36 +01:00
|
|
|
!(evsel->pmu && !strncmp(evsel->pmu->name, "ibs", 3)))
|
2023-06-30 14:22:30 +05:30
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* More verbose IBS errors. */
|
|
|
|
if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
|
|
|
|
evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
|
|
|
|
evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
|
|
|
|
return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
|
|
|
|
"again without the privilege modifiers (like 'k') at the end.");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|