2019-08-30 14:45:20 -03:00
|
|
|
#include <stdlib.h>
|
2018-08-30 08:32:52 +02:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <inttypes.h>
|
2019-06-25 21:28:49 -03:00
|
|
|
#include <linux/string.h>
|
2018-08-30 08:32:52 +02:00
|
|
|
#include <linux/time64.h>
|
|
|
|
#include <math.h>
|
2022-01-04 22:13:34 -08:00
|
|
|
#include <perf/cpumap.h>
|
2019-01-22 10:47:38 -02:00
|
|
|
#include "color.h"
|
2019-08-21 14:02:05 -03:00
|
|
|
#include "counts.h"
|
perf stat: Remove evlist__add_default_attrs use strings
add_default_atttributes would add evsels by having pre-created
perf_event_attr, however, this needed fixing for hybrid as the
extended PMU type was necessary for each core PMU. The logic for this
was in an arch specific x86 function and wasn't present for ARM,
meaning that default events weren't being opened on all PMUs on
ARM. Change the creation of the default events to use parse_events and
strings as that will open the events on all PMUs.
Rather than try to detect events on PMUs before parsing, parse the
event but skip its output in stat-display.
The previous order of hardware events was: cycles,
stalled-cycles-frontend, stalled-cycles-backend, instructions. As
instructions is a more fundamental concept the order is changed to:
instructions, cycles, stalled-cycles-frontend, stalled-cycles-backend.
Closes: https://lore.kernel.org/lkml/CAP-5=fVABSBZnsmtRn1uF-k-G1GWM-L5SgiinhPTfHbQsKXb_g@mail.gmail.com/
Acked-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Ian Rogers <irogers@google.com>
[Don't display unsupported default events except 'cycles']
Acked-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: James Clark <james.clark@linaro.org>
Cc: Yang Jihong <yangjihong@bytedance.com>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ze Gao <zegao2021@gmail.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Yang Li <yang.lee@linux.alibaba.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: ak@linux.intel.com
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sun Haiyong <sunhaiyong@loongson.cn>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20240926144851.245903-4-james.clark@linaro.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-09-26 15:48:34 +01:00
|
|
|
#include "debug.h"
|
2018-08-30 08:32:52 +02:00
|
|
|
#include "evlist.h"
|
|
|
|
#include "evsel.h"
|
|
|
|
#include "stat.h"
|
|
|
|
#include "top.h"
|
|
|
|
#include "thread_map.h"
|
|
|
|
#include "cpumap.h"
|
|
|
|
#include "string2.h"
|
tools perf: Move from sane_ctype.h obtained from git to the Linux's original
We got the sane_ctype.h headers from git and kept using it so far, but
since that code originally came from the kernel sources to the git
sources, perhaps its better to just use the one in the kernel, so that
we can leverage tools/perf/check_headers.sh to be notified when our copy
gets out of sync, i.e. when fixes or goodies are added to the code we've
copied.
This will help with things like tools/lib/string.c where we want to have
more things in common with the kernel, such as strim(), skip_spaces(),
etc so as to go on removing the things that we have in tools/perf/util/
and instead using the code in the kernel, indirectly and removing things
like EXPORT_SYMBOL(), etc, getting notified when fixes and improvements
are made to the original code.
Hopefully this also should help with reducing the difference of code
hosted in tools/ to the one in the kernel proper.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-7k9868l713wqtgo01xxygn12@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 17:27:31 -03:00
|
|
|
#include <linux/ctype.h>
|
2018-08-30 08:32:52 +02:00
|
|
|
#include "cgroup.h"
|
|
|
|
#include <api/fs/fs.h>
|
2020-02-24 13:59:22 -08:00
|
|
|
#include "util.h"
|
2021-04-19 12:41:44 +03:00
|
|
|
#include "iostat.h"
|
2023-05-27 00:21:58 -07:00
|
|
|
#include "pmu.h"
|
2023-05-27 00:22:03 -07:00
|
|
|
#include "pmus.h"
|
2024-10-01 20:20:07 -07:00
|
|
|
#include "tool_pmu.h"
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
#define CNTR_NOT_SUPPORTED "<not supported>"
|
|
|
|
#define CNTR_NOT_COUNTED "<not counted>"
|
|
|
|
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
#define MGROUP_LEN 50
|
2022-11-14 15:02:20 -08:00
|
|
|
#define METRIC_LEN 38
|
|
|
|
#define EVNAME_LEN 32
|
|
|
|
#define COUNTS_LEN 18
|
|
|
|
#define INTERVAL_LEN 16
|
|
|
|
#define CGROUP_LEN 16
|
|
|
|
#define COMM_LEN 16
|
|
|
|
#define PID_LEN 7
|
|
|
|
#define CPUS_LEN 4
|
|
|
|
|
|
|
|
static int aggr_header_lens[] = {
|
|
|
|
[AGGR_CORE] = 18,
|
2023-05-17 22:57:42 +05:30
|
|
|
[AGGR_CACHE] = 22,
|
2024-06-27 17:06:03 -07:00
|
|
|
[AGGR_CLUSTER] = 20,
|
2022-11-14 15:02:20 -08:00
|
|
|
[AGGR_DIE] = 12,
|
|
|
|
[AGGR_SOCKET] = 6,
|
|
|
|
[AGGR_NODE] = 6,
|
|
|
|
[AGGR_NONE] = 6,
|
|
|
|
[AGGR_THREAD] = 16,
|
|
|
|
[AGGR_GLOBAL] = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char *aggr_header_csv[] = {
|
2025-06-27 13:16:41 -07:00
|
|
|
[AGGR_CORE] = "core,ctrs,",
|
|
|
|
[AGGR_CACHE] = "cache,ctrs,",
|
|
|
|
[AGGR_CLUSTER] = "cluster,ctrs,",
|
|
|
|
[AGGR_DIE] = "die,ctrs,",
|
|
|
|
[AGGR_SOCKET] = "socket,ctrs,",
|
|
|
|
[AGGR_NONE] = "cpu,",
|
|
|
|
[AGGR_THREAD] = "comm-pid,",
|
|
|
|
[AGGR_NODE] = "node,",
|
|
|
|
[AGGR_GLOBAL] = ""
|
2022-11-14 15:02:20 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const char *aggr_header_std[] = {
|
|
|
|
[AGGR_CORE] = "core",
|
2023-05-17 22:57:42 +05:30
|
|
|
[AGGR_CACHE] = "cache",
|
2024-06-27 17:06:03 -07:00
|
|
|
[AGGR_CLUSTER] = "cluster",
|
2022-11-14 15:02:20 -08:00
|
|
|
[AGGR_DIE] = "die",
|
|
|
|
[AGGR_SOCKET] = "socket",
|
|
|
|
[AGGR_NONE] = "cpu",
|
|
|
|
[AGGR_THREAD] = "comm-pid",
|
|
|
|
[AGGR_NODE] = "node",
|
|
|
|
[AGGR_GLOBAL] = ""
|
|
|
|
};
|
|
|
|
|
2024-10-17 10:53:54 -07:00
|
|
|
const char *metric_threshold_classify__color(enum metric_threshold_classify thresh)
|
|
|
|
{
|
|
|
|
const char * const colors[] = {
|
|
|
|
"", /* unknown */
|
|
|
|
PERF_COLOR_RED, /* bad */
|
|
|
|
PERF_COLOR_MAGENTA, /* nearly bad */
|
|
|
|
PERF_COLOR_YELLOW, /* less good */
|
|
|
|
PERF_COLOR_GREEN, /* good */
|
|
|
|
};
|
|
|
|
static_assert(ARRAY_SIZE(colors) - 1 == METRIC_THRESHOLD_GOOD, "missing enum value");
|
|
|
|
return colors[thresh];
|
|
|
|
}
|
|
|
|
|
perf stat: Add metric-threshold to json output
When the threshold isn't unknown add a value to the json like:
"metric-threshold" : "good"
A more complete example:
```
$ perf stat -a -j -I 1000
{"interval" : 1.001089747, "counter-value" : "16045.281449", "unit" : "msec", "event" : "cpu-clock", "event-runtime" : 16045355135, "pcnt-running" : 100.00, "metric-value" : "16.045281", "metric-unit" : "CPUs utilized"}
{"interval" : 1.001089747, "counter-value" : "10003.000000", "unit" : "", "event" : "context-switches", "event-runtime" : 16045314844, "pcnt-running" : 100.00, "metric-value" : "623.423156", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "328.000000", "unit" : "", "event" : "cpu-migrations", "event-runtime" : 16045321403, "pcnt-running" : 100.00, "metric-value" : "20.442147", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "20114.000000", "unit" : "", "event" : "page-faults", "event-runtime" : 16045355927, "pcnt-running" : 100.00, "metric-value" : "1.253577", "metric-unit" : "K/sec"}
{"interval" : 1.001089747, "counter-value" : "4066679471.000000", "unit" : "", "event" : "instructions", "event-runtime" : 16045369123, "pcnt-running" : 100.00, "metric-value" : "1.628330", "metric-unit" : "insn per cycle"}
{"interval" : 1.001089747, "counter-value" : "2497454658.000000", "unit" : "", "event" : "cycles", "event-runtime" : 16045374810, "pcnt-running" : 100.00, "metric-value" : "0.155650", "metric-unit" : "GHz"}
{"interval" : 1.001089747, "counter-value" : "914974294.000000", "unit" : "", "event" : "branches", "event-runtime" : 16045379877, "pcnt-running" : 100.00, "metric-value" : "57.024509", "metric-unit" : "M/sec"}
{"interval" : 1.001089747, "counter-value" : "9237201.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 16045375017, "pcnt-running" : 100.00, "metric-value" : "1.009559", "metric-unit" : "of all branches", "metric-threshold" : "good"}
{"interval" : 1.001089747, "event-runtime" : 16045397172, "pcnt-running" : 100.00, "metricgroup" : "TopdownL1"}
{"interval" : 1.001089747, "metric-value" : "22.036686", "metric-unit" : "% tma_backend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "7.610161", "metric-unit" : "% tma_bad_speculation", "metric-threshold" : "good"}
{"interval" : 1.001089747, "metric-value" : "36.729687", "metric-unit" : "% tma_frontend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "33.623465", "metric-unit" : "% tma_retiring"}
...
```
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: James Clark <james.clark@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241017175356.783793-7-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-17 10:53:55 -07:00
|
|
|
static const char *metric_threshold_classify__str(enum metric_threshold_classify thresh)
|
|
|
|
{
|
|
|
|
const char * const strs[] = {
|
|
|
|
"unknown",
|
|
|
|
"bad",
|
|
|
|
"nearly bad",
|
|
|
|
"less good",
|
|
|
|
"good",
|
|
|
|
};
|
|
|
|
static_assert(ARRAY_SIZE(strs) - 1 == METRIC_THRESHOLD_GOOD, "missing enum value");
|
|
|
|
return strs[thresh];
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:10 -08:00
|
|
|
static void print_running_std(struct perf_stat_config *config, u64 run, u64 ena)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
2022-11-14 15:02:10 -08:00
|
|
|
if (run != ena)
|
|
|
|
fprintf(config->output, " (%.2f%%)", 100.0 * run / ena);
|
|
|
|
}
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
2022-11-14 15:02:10 -08:00
|
|
|
static void print_running_csv(struct perf_stat_config *config, u64 run, u64 ena)
|
|
|
|
{
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
double enabled_percent = 100;
|
|
|
|
|
|
|
|
if (run != ena)
|
|
|
|
enabled_percent = 100 * run / ena;
|
2022-11-14 15:02:10 -08:00
|
|
|
fprintf(config->output, "%s%" PRIu64 "%s%.2f",
|
|
|
|
config->csv_sep, run, config->csv_sep, enabled_percent);
|
|
|
|
}
|
2024-11-12 16:00:41 +00:00
|
|
|
struct outstate {
|
2024-11-12 16:00:45 +00:00
|
|
|
/* Std mode: insert a newline before the next metric */
|
2024-11-12 16:00:41 +00:00
|
|
|
bool newline;
|
2024-11-12 16:00:45 +00:00
|
|
|
/* JSON mode: track need for comma for a previous field or not */
|
2024-11-12 16:00:41 +00:00
|
|
|
bool first;
|
2024-11-12 16:00:45 +00:00
|
|
|
/* Num CSV separators remaining to pad out when not all fields are printed */
|
|
|
|
int csv_col_pad;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following don't track state across fields, but are here as a shortcut to
|
|
|
|
* pass data to the print functions. The alternative would be to update the
|
|
|
|
* function signatures of the entire print stack to pass them through.
|
|
|
|
*/
|
|
|
|
/* Place to output to */
|
|
|
|
FILE * const fh;
|
2024-11-12 16:00:44 +00:00
|
|
|
/* Lines are timestamped in --interval-print mode */
|
|
|
|
char timestamp[64];
|
2024-11-12 16:00:45 +00:00
|
|
|
/* Num items aggregated in current line. See struct perf_stat_aggr.nr */
|
|
|
|
int aggr_nr;
|
|
|
|
/* Core/socket/die etc ID for the current line */
|
2024-11-12 16:00:41 +00:00
|
|
|
struct aggr_cpu_id id;
|
2024-11-12 16:00:45 +00:00
|
|
|
/* Event for current line */
|
2024-11-12 16:00:41 +00:00
|
|
|
struct evsel *evsel;
|
2024-11-12 16:00:45 +00:00
|
|
|
/* Cgroup for current line */
|
2024-11-12 16:00:41 +00:00
|
|
|
struct cgroup *cgrp;
|
|
|
|
};
|
2022-11-14 15:02:10 -08:00
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static const char *json_sep(struct outstate *os)
|
|
|
|
{
|
|
|
|
const char *sep = os->first ? "" : ", ";
|
|
|
|
|
|
|
|
os->first = false;
|
|
|
|
return sep;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define json_out(os, format, ...) fprintf((os)->fh, "%s" format, json_sep(os), ##__VA_ARGS__)
|
|
|
|
|
|
|
|
static void print_running_json(struct outstate *os, u64 run, u64 ena)
|
2022-11-14 15:02:10 -08:00
|
|
|
{
|
|
|
|
double enabled_percent = 100;
|
|
|
|
|
|
|
|
if (run != ena)
|
|
|
|
enabled_percent = 100 * run / ena;
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f",
|
|
|
|
run, enabled_percent);
|
2022-11-14 15:02:10 -08:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_running(struct perf_stat_config *config, struct outstate *os,
|
2022-11-14 15:02:16 -08:00
|
|
|
u64 run, u64 ena, bool before_metric)
|
2022-11-14 15:02:10 -08:00
|
|
|
{
|
2022-11-14 15:02:16 -08:00
|
|
|
if (config->json_output) {
|
|
|
|
if (before_metric)
|
2024-11-12 16:00:41 +00:00
|
|
|
print_running_json(os, run, ena);
|
2022-11-14 15:02:16 -08:00
|
|
|
} else if (config->csv_output) {
|
|
|
|
if (before_metric)
|
|
|
|
print_running_csv(config, run, ena);
|
|
|
|
} else {
|
|
|
|
if (!before_metric)
|
|
|
|
print_running_std(config, run, ena);
|
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:11 -08:00
|
|
|
static void print_noise_pct_std(struct perf_stat_config *config,
|
|
|
|
double pct)
|
|
|
|
{
|
|
|
|
if (pct)
|
|
|
|
fprintf(config->output, " ( +-%6.2f%% )", pct);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_noise_pct_csv(struct perf_stat_config *config,
|
|
|
|
double pct)
|
|
|
|
{
|
|
|
|
fprintf(config->output, "%s%.2f%%", config->csv_sep, pct);
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_noise_pct_json(struct outstate *os,
|
2022-11-14 15:02:11 -08:00
|
|
|
double pct)
|
|
|
|
{
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"variance\" : %.2f", pct);
|
2022-11-14 15:02:11 -08:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_noise_pct(struct perf_stat_config *config, struct outstate *os,
|
2022-11-14 15:02:16 -08:00
|
|
|
double total, double avg, bool before_metric)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
double pct = rel_stddev_stats(total, avg);
|
|
|
|
|
2022-11-14 15:02:16 -08:00
|
|
|
if (config->json_output) {
|
|
|
|
if (before_metric)
|
2024-11-12 16:00:41 +00:00
|
|
|
print_noise_pct_json(os, pct);
|
2022-11-14 15:02:16 -08:00
|
|
|
} else if (config->csv_output) {
|
|
|
|
if (before_metric)
|
|
|
|
print_noise_pct_csv(config, pct);
|
|
|
|
} else {
|
|
|
|
if (!before_metric)
|
|
|
|
print_noise_pct_std(config, pct);
|
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_noise(struct perf_stat_config *config, struct outstate *os,
|
2022-11-14 15:02:16 -08:00
|
|
|
struct evsel *evsel, double avg, bool before_metric)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
struct perf_stat_evsel *ps;
|
|
|
|
|
|
|
|
if (config->run_count == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ps = evsel->stats;
|
2024-11-12 16:00:41 +00:00
|
|
|
print_noise_pct(config, os, stddev_stats(&ps->res_stats), avg, before_metric);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:12 -08:00
|
|
|
static void print_cgroup_std(struct perf_stat_config *config, const char *cgrp_name)
|
|
|
|
{
|
2022-11-14 15:02:20 -08:00
|
|
|
fprintf(config->output, " %-*s", CGROUP_LEN, cgrp_name);
|
2022-11-14 15:02:12 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_cgroup_csv(struct perf_stat_config *config, const char *cgrp_name)
|
|
|
|
{
|
|
|
|
fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_cgroup_json(struct outstate *os, const char *cgrp_name)
|
2022-11-14 15:02:12 -08:00
|
|
|
{
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"cgroup\" : \"%s\"", cgrp_name);
|
2022-11-14 15:02:12 -08:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_cgroup(struct perf_stat_config *config, struct outstate *os,
|
|
|
|
struct cgroup *cgrp)
|
perf stat: Fix CSV mode column output for non-cgroup events
When using the -x option, perf stat prints CSV-style output with one
event per line. For each event, it prints the count, the unit, the
event name, the cgroup, and a bunch of other event specific fields (such
as insn per cycles).
When you use CSV-style mode, you expect a normalized output where each
event is printed with the same number of fields regardless of what it is
so it can easily be imported into a spreadsheet or parsed.
For instance, if an event does not have a unit, then print an empty
field for it.
Although this approach was implemented for the unit, it was not for the
cgroup.
When mixing cgroup and non-cgroup events, then non-cgroup events would
not show an empty field, instead the next field was printed, make
columns not line up correctly.
This patch fixes the cgroup output issues by forcing an empty field
for non-cgroup events as soon as one event has cgroup.
Before:
<not counted> @ @cycles @foo @ 0 @100.00@@
2531614 @ @cycles @6420922@100.00@ @
foo cgroup lines up with time_running!
After:
<not counted> @ @cycles @foo @0 @100.00@@
2594834 @ @cycles @ @5287372 @100.00@@
Fields line up.
Signed-off-by: Stephane Eranian <eranian@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1541587845-9150-1-git-send-email-eranian@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-07 02:50:45 -08:00
|
|
|
{
|
2022-11-14 15:02:26 -08:00
|
|
|
if (nr_cgroups || config->cgroup_list) {
|
|
|
|
const char *cgrp_name = cgrp ? cgrp->name : "";
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
|
|
|
if (config->json_output)
|
2024-11-12 16:00:41 +00:00
|
|
|
print_cgroup_json(os, cgrp_name);
|
2022-11-23 10:01:54 -08:00
|
|
|
else if (config->csv_output)
|
2022-11-14 15:02:12 -08:00
|
|
|
print_cgroup_csv(config, cgrp_name);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
else
|
2022-11-14 15:02:12 -08:00
|
|
|
print_cgroup_std(config, cgrp_name);
|
perf stat: Fix CSV mode column output for non-cgroup events
When using the -x option, perf stat prints CSV-style output with one
event per line. For each event, it prints the count, the unit, the
event name, the cgroup, and a bunch of other event specific fields (such
as insn per cycles).
When you use CSV-style mode, you expect a normalized output where each
event is printed with the same number of fields regardless of what it is
so it can easily be imported into a spreadsheet or parsed.
For instance, if an event does not have a unit, then print an empty
field for it.
Although this approach was implemented for the unit, it was not for the
cgroup.
When mixing cgroup and non-cgroup events, then non-cgroup events would
not show an empty field, instead the next field was printed, make
columns not line up correctly.
This patch fixes the cgroup output issues by forcing an empty field
for non-cgroup events as soon as one event has cgroup.
Before:
<not counted> @ @cycles @foo @ 0 @100.00@@
2531614 @ @cycles @6420922@100.00@ @
foo cgroup lines up with time_running!
After:
<not counted> @ @cycles @foo @0 @100.00@@
2594834 @ @cycles @ @5287372 @100.00@@
Fields line up.
Signed-off-by: Stephane Eranian <eranian@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1541587845-9150-1-git-send-email-eranian@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-07 02:50:45 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:13 -08:00
|
|
|
static void print_aggr_id_std(struct perf_stat_config *config,
|
2023-02-19 01:28:45 -08:00
|
|
|
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
2022-11-14 15:02:13 -08:00
|
|
|
FILE *output = config->output;
|
2022-11-14 15:02:20 -08:00
|
|
|
int idx = config->aggr_mode;
|
|
|
|
char buf[128];
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
2022-11-14 15:02:13 -08:00
|
|
|
switch (config->aggr_mode) {
|
|
|
|
case AGGR_CORE:
|
2022-11-14 15:02:20 -08:00
|
|
|
snprintf(buf, sizeof(buf), "S%d-D%d-C%d", id.socket, id.die, id.core);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
2023-05-17 22:57:42 +05:30
|
|
|
case AGGR_CACHE:
|
|
|
|
snprintf(buf, sizeof(buf), "S%d-D%d-L%d-ID%d",
|
|
|
|
id.socket, id.die, id.cache_lvl, id.cache);
|
|
|
|
break;
|
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will
share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2
cache (for Intel Jacobsville). Currently parsing and building cluster
topology have been supported since [1].
perf stat has already supported aggregation for other topologies like
die or socket, etc. It'll be useful to aggregate per-cluster to find
problems like L3T bandwidth contention.
This patch add support for "--per-cluster" option for per-cluster
aggregation. Also update the docs and related test. The output will
be like:
[root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5
Performance counter stats for 'system wide':
S56-D0-CLS158 4 1,321,521,570 LLC-load
S56-D0-CLS594 4 794,211,453 LLC-load
S56-D0-CLS1030 4 41,623 LLC-load
S56-D0-CLS1466 4 41,646 LLC-load
S56-D0-CLS1902 4 16,863 LLC-load
S56-D0-CLS2338 4 15,721 LLC-load
S56-D0-CLS2774 4 22,671 LLC-load
[...]
On a legacy system without cluster or cluster support, the output will
be look like:
[root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1
Performance counter stats for 'system wide':
S56-D0-CLS0 64 18,011,485 cycles
S7182-D0-CLS0 64 16,548,835 cycles
Note that this patch doesn't mix the cluster information in the outputs
of --per-core to avoid breaking any tools/scripts using it.
Note that perf recently supports "--per-cache" aggregation, but it's not
the same with the cluster although cluster CPUs may share some cache
resources. For example on my machine all clusters within a die share the
same L3 cache:
$ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list
0-31
$ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list
0-3
[1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die")
Tested-by: Jie Zhan <zhanjie9@hisilicon.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Cc: james.clark@arm.com
Cc: 21cnbao@gmail.com
Cc: prime.zeng@hisilicon.com
Cc: Jonathan.Cameron@huawei.com
Cc: fanghao11@huawei.com
Cc: linuxarm@huawei.com
Cc: tim.c.chen@intel.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
2024-02-08 10:40:26 +08:00
|
|
|
case AGGR_CLUSTER:
|
|
|
|
snprintf(buf, sizeof(buf), "S%d-D%d-CLS%d", id.socket, id.die, id.cluster);
|
|
|
|
break;
|
2022-11-14 15:02:13 -08:00
|
|
|
case AGGR_DIE:
|
2022-11-14 15:02:20 -08:00
|
|
|
snprintf(buf, sizeof(buf), "S%d-D%d", id.socket, id.die);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
|
|
|
case AGGR_SOCKET:
|
2022-11-14 15:02:20 -08:00
|
|
|
snprintf(buf, sizeof(buf), "S%d", id.socket);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
|
|
|
case AGGR_NODE:
|
2022-11-14 15:02:20 -08:00
|
|
|
snprintf(buf, sizeof(buf), "N%d", id.node);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
|
|
|
case AGGR_NONE:
|
|
|
|
if (evsel->percore && !config->percore_show_thread) {
|
2022-11-14 15:02:20 -08:00
|
|
|
snprintf(buf, sizeof(buf), "S%d-D%d-C%d ",
|
|
|
|
id.socket, id.die, id.core);
|
|
|
|
fprintf(output, "%-*s ",
|
|
|
|
aggr_header_lens[AGGR_CORE], buf);
|
2022-11-14 15:02:13 -08:00
|
|
|
} else if (id.cpu.cpu > -1) {
|
2022-11-14 15:02:20 -08:00
|
|
|
fprintf(output, "CPU%-*d ",
|
|
|
|
aggr_header_lens[AGGR_NONE] - 3, id.cpu.cpu);
|
2022-11-14 15:02:13 -08:00
|
|
|
}
|
2022-11-14 15:02:20 -08:00
|
|
|
return;
|
2022-11-14 15:02:13 -08:00
|
|
|
case AGGR_THREAD:
|
2022-11-14 15:02:20 -08:00
|
|
|
fprintf(output, "%*s-%-*d ",
|
|
|
|
COMM_LEN, perf_thread_map__comm(evsel->core.threads, id.thread_idx),
|
|
|
|
PID_LEN, perf_thread_map__pid(evsel->core.threads, id.thread_idx));
|
|
|
|
return;
|
2022-11-14 15:02:13 -08:00
|
|
|
case AGGR_GLOBAL:
|
|
|
|
case AGGR_UNSET:
|
|
|
|
case AGGR_MAX:
|
|
|
|
default:
|
2022-11-14 15:02:20 -08:00
|
|
|
return;
|
2022-11-14 15:02:13 -08:00
|
|
|
}
|
2022-11-14 15:02:20 -08:00
|
|
|
|
2025-06-27 13:16:41 -07:00
|
|
|
fprintf(output, "%-*s %*d ", aggr_header_lens[idx], buf, /*strlen("ctrs")*/ 4, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
}
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
2022-11-14 15:02:13 -08:00
|
|
|
static void print_aggr_id_csv(struct perf_stat_config *config,
|
2023-02-19 01:28:45 -08:00
|
|
|
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
|
2022-11-14 15:02:13 -08:00
|
|
|
{
|
|
|
|
FILE *output = config->output;
|
|
|
|
const char *sep = config->csv_sep;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
switch (config->aggr_mode) {
|
|
|
|
case AGGR_CORE:
|
2022-11-14 15:02:13 -08:00
|
|
|
fprintf(output, "S%d-D%d-C%d%s%d%s",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.socket, id.die, id.core, sep, aggr_nr, sep);
|
2018-08-30 08:32:52 +02:00
|
|
|
break;
|
2023-05-17 22:57:42 +05:30
|
|
|
case AGGR_CACHE:
|
|
|
|
fprintf(config->output, "S%d-D%d-L%d-ID%d%s%d%s",
|
|
|
|
id.socket, id.die, id.cache_lvl, id.cache, sep, aggr_nr, sep);
|
|
|
|
break;
|
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will
share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2
cache (for Intel Jacobsville). Currently parsing and building cluster
topology have been supported since [1].
perf stat has already supported aggregation for other topologies like
die or socket, etc. It'll be useful to aggregate per-cluster to find
problems like L3T bandwidth contention.
This patch add support for "--per-cluster" option for per-cluster
aggregation. Also update the docs and related test. The output will
be like:
[root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5
Performance counter stats for 'system wide':
S56-D0-CLS158 4 1,321,521,570 LLC-load
S56-D0-CLS594 4 794,211,453 LLC-load
S56-D0-CLS1030 4 41,623 LLC-load
S56-D0-CLS1466 4 41,646 LLC-load
S56-D0-CLS1902 4 16,863 LLC-load
S56-D0-CLS2338 4 15,721 LLC-load
S56-D0-CLS2774 4 22,671 LLC-load
[...]
On a legacy system without cluster or cluster support, the output will
be look like:
[root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1
Performance counter stats for 'system wide':
S56-D0-CLS0 64 18,011,485 cycles
S7182-D0-CLS0 64 16,548,835 cycles
Note that this patch doesn't mix the cluster information in the outputs
of --per-core to avoid breaking any tools/scripts using it.
Note that perf recently supports "--per-cache" aggregation, but it's not
the same with the cluster although cluster CPUs may share some cache
resources. For example on my machine all clusters within a die share the
same L3 cache:
$ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list
0-31
$ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list
0-3
[1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die")
Tested-by: Jie Zhan <zhanjie9@hisilicon.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Cc: james.clark@arm.com
Cc: 21cnbao@gmail.com
Cc: prime.zeng@hisilicon.com
Cc: Jonathan.Cameron@huawei.com
Cc: fanghao11@huawei.com
Cc: linuxarm@huawei.com
Cc: tim.c.chen@intel.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
2024-02-08 10:40:26 +08:00
|
|
|
case AGGR_CLUSTER:
|
|
|
|
fprintf(config->output, "S%d-D%d-CLS%d%s%d%s",
|
|
|
|
id.socket, id.die, id.cluster, sep, aggr_nr, sep);
|
|
|
|
break;
|
2019-06-04 15:50:42 -07:00
|
|
|
case AGGR_DIE:
|
2022-11-14 15:02:13 -08:00
|
|
|
fprintf(output, "S%d-D%d%s%d%s",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.socket, id.die, sep, aggr_nr, sep);
|
2019-06-04 15:50:42 -07:00
|
|
|
break;
|
2018-08-30 08:32:52 +02:00
|
|
|
case AGGR_SOCKET:
|
2022-11-14 15:02:13 -08:00
|
|
|
fprintf(output, "S%d%s%d%s",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.socket, sep, aggr_nr, sep);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
break;
|
perf stat: Add --per-node agregation support
Adding new --per-node option to aggregate counts per NUMA
nodes for system-wide mode measurements.
You can specify --per-node in live mode:
# perf stat -a -I 1000 -e cycles --per-node
# time node cpus counts unit events
1.000542550 N0 20 6,202,097 cycles
1.000542550 N1 20 639,559 cycles
2.002040063 N0 20 7,412,495 cycles
2.002040063 N1 20 2,185,577 cycles
3.003451699 N0 20 6,508,917 cycles
3.003451699 N1 20 765,607 cycles
...
Or in the record/report stat session:
# perf stat record -a -I 1000 -e cycles
# time counts unit events
1.000536937 10,008,468 cycles
2.002090152 9,578,539 cycles
3.003625233 7,647,869 cycles
4.005135036 7,032,086 cycles
^C 4.340902364 3,923,893 cycles
# perf stat report --per-node
# time node cpus counts unit events
1.000536937 N0 20 9,355,086 cycles
1.000536937 N1 20 653,382 cycles
2.002090152 N0 20 7,712,838 cycles
2.002090152 N1 20 1,865,701 cycles
3.003625233 N0 20 6,604,441 cycles
3.003625233 N1 20 1,043,428 cycles
4.005135036 N0 20 6,350,522 cycles
4.005135036 N1 20 681,564 cycles
4.340902364 N0 20 3,403,188 cycles
4.340902364 N1 20 520,705 cycles
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Joe Mario <jmario@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20190904073415.723-4-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-28 10:17:43 +02:00
|
|
|
case AGGR_NODE:
|
2022-11-14 15:02:13 -08:00
|
|
|
fprintf(output, "N%d%s%d%s",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.node, sep, aggr_nr, sep);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
break;
|
2018-08-30 08:32:52 +02:00
|
|
|
case AGGR_NONE:
|
2022-11-14 15:02:13 -08:00
|
|
|
if (evsel->percore && !config->percore_show_thread) {
|
|
|
|
fprintf(output, "S%d-D%d-C%d%s",
|
|
|
|
id.socket, id.die, id.core, sep);
|
|
|
|
} else if (id.cpu.cpu > -1) {
|
|
|
|
fprintf(output, "CPU%d%s",
|
|
|
|
id.cpu.cpu, sep);
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
break;
|
|
|
|
case AGGR_THREAD:
|
2022-11-14 15:02:13 -08:00
|
|
|
fprintf(output, "%s-%d%s",
|
|
|
|
perf_thread_map__comm(evsel->core.threads, id.thread_idx),
|
|
|
|
perf_thread_map__pid(evsel->core.threads, id.thread_idx),
|
|
|
|
sep);
|
|
|
|
break;
|
|
|
|
case AGGR_GLOBAL:
|
|
|
|
case AGGR_UNSET:
|
|
|
|
case AGGR_MAX:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_aggr_id_json(struct perf_stat_config *config, struct outstate *os,
|
2023-02-19 01:28:45 -08:00
|
|
|
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
|
2022-11-14 15:02:13 -08:00
|
|
|
{
|
|
|
|
switch (config->aggr_mode) {
|
|
|
|
case AGGR_CORE:
|
2025-06-27 13:16:41 -07:00
|
|
|
json_out(os, "\"core\" : \"S%d-D%d-C%d\", \"counters\" : %d",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.socket, id.die, id.core, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
2023-05-17 22:57:42 +05:30
|
|
|
case AGGR_CACHE:
|
2025-06-27 13:16:41 -07:00
|
|
|
json_out(os, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"counters\" : %d",
|
2023-05-17 22:57:42 +05:30
|
|
|
id.socket, id.die, id.cache_lvl, id.cache, aggr_nr);
|
|
|
|
break;
|
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will
share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2
cache (for Intel Jacobsville). Currently parsing and building cluster
topology have been supported since [1].
perf stat has already supported aggregation for other topologies like
die or socket, etc. It'll be useful to aggregate per-cluster to find
problems like L3T bandwidth contention.
This patch add support for "--per-cluster" option for per-cluster
aggregation. Also update the docs and related test. The output will
be like:
[root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5
Performance counter stats for 'system wide':
S56-D0-CLS158 4 1,321,521,570 LLC-load
S56-D0-CLS594 4 794,211,453 LLC-load
S56-D0-CLS1030 4 41,623 LLC-load
S56-D0-CLS1466 4 41,646 LLC-load
S56-D0-CLS1902 4 16,863 LLC-load
S56-D0-CLS2338 4 15,721 LLC-load
S56-D0-CLS2774 4 22,671 LLC-load
[...]
On a legacy system without cluster or cluster support, the output will
be look like:
[root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1
Performance counter stats for 'system wide':
S56-D0-CLS0 64 18,011,485 cycles
S7182-D0-CLS0 64 16,548,835 cycles
Note that this patch doesn't mix the cluster information in the outputs
of --per-core to avoid breaking any tools/scripts using it.
Note that perf recently supports "--per-cache" aggregation, but it's not
the same with the cluster although cluster CPUs may share some cache
resources. For example on my machine all clusters within a die share the
same L3 cache:
$ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list
0-31
$ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list
0-3
[1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die")
Tested-by: Jie Zhan <zhanjie9@hisilicon.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Cc: james.clark@arm.com
Cc: 21cnbao@gmail.com
Cc: prime.zeng@hisilicon.com
Cc: Jonathan.Cameron@huawei.com
Cc: fanghao11@huawei.com
Cc: linuxarm@huawei.com
Cc: tim.c.chen@intel.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
2024-02-08 10:40:26 +08:00
|
|
|
case AGGR_CLUSTER:
|
2025-06-27 13:16:41 -07:00
|
|
|
json_out(os, "\"cluster\" : \"S%d-D%d-CLS%d\", \"counters\" : %d",
|
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will
share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2
cache (for Intel Jacobsville). Currently parsing and building cluster
topology have been supported since [1].
perf stat has already supported aggregation for other topologies like
die or socket, etc. It'll be useful to aggregate per-cluster to find
problems like L3T bandwidth contention.
This patch add support for "--per-cluster" option for per-cluster
aggregation. Also update the docs and related test. The output will
be like:
[root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5
Performance counter stats for 'system wide':
S56-D0-CLS158 4 1,321,521,570 LLC-load
S56-D0-CLS594 4 794,211,453 LLC-load
S56-D0-CLS1030 4 41,623 LLC-load
S56-D0-CLS1466 4 41,646 LLC-load
S56-D0-CLS1902 4 16,863 LLC-load
S56-D0-CLS2338 4 15,721 LLC-load
S56-D0-CLS2774 4 22,671 LLC-load
[...]
On a legacy system without cluster or cluster support, the output will
be look like:
[root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1
Performance counter stats for 'system wide':
S56-D0-CLS0 64 18,011,485 cycles
S7182-D0-CLS0 64 16,548,835 cycles
Note that this patch doesn't mix the cluster information in the outputs
of --per-core to avoid breaking any tools/scripts using it.
Note that perf recently supports "--per-cache" aggregation, but it's not
the same with the cluster although cluster CPUs may share some cache
resources. For example on my machine all clusters within a die share the
same L3 cache:
$ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list
0-31
$ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list
0-3
[1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die")
Tested-by: Jie Zhan <zhanjie9@hisilicon.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Cc: james.clark@arm.com
Cc: 21cnbao@gmail.com
Cc: prime.zeng@hisilicon.com
Cc: Jonathan.Cameron@huawei.com
Cc: fanghao11@huawei.com
Cc: linuxarm@huawei.com
Cc: tim.c.chen@intel.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
2024-02-08 10:40:26 +08:00
|
|
|
id.socket, id.die, id.cluster, aggr_nr);
|
|
|
|
break;
|
2022-11-14 15:02:13 -08:00
|
|
|
case AGGR_DIE:
|
2025-06-27 13:16:41 -07:00
|
|
|
json_out(os, "\"die\" : \"S%d-D%d\", \"counters\" : %d",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.socket, id.die, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
|
|
|
case AGGR_SOCKET:
|
2025-06-27 13:16:41 -07:00
|
|
|
json_out(os, "\"socket\" : \"S%d\", \"counters\" : %d",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.socket, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
|
|
|
case AGGR_NODE:
|
2025-06-27 13:16:41 -07:00
|
|
|
json_out(os, "\"node\" : \"N%d\", \"counters\" : %d",
|
2023-02-19 01:28:45 -08:00
|
|
|
id.node, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
break;
|
|
|
|
case AGGR_NONE:
|
|
|
|
if (evsel->percore && !config->percore_show_thread) {
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"core\" : \"S%d-D%d-C%d\"",
|
2022-11-14 15:02:13 -08:00
|
|
|
id.socket, id.die, id.core);
|
|
|
|
} else if (id.cpu.cpu > -1) {
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"cpu\" : \"%d\"",
|
2022-11-14 15:02:13 -08:00
|
|
|
id.cpu.cpu);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
break;
|
2022-11-14 15:02:13 -08:00
|
|
|
case AGGR_THREAD:
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"thread\" : \"%s-%d\"",
|
2022-11-14 15:02:13 -08:00
|
|
|
perf_thread_map__comm(evsel->core.threads, id.thread_idx),
|
|
|
|
perf_thread_map__pid(evsel->core.threads, id.thread_idx));
|
|
|
|
break;
|
2018-08-30 08:32:52 +02:00
|
|
|
case AGGR_GLOBAL:
|
|
|
|
case AGGR_UNSET:
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
case AGGR_MAX:
|
2018-08-30 08:32:52 +02:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void aggr_printout(struct perf_stat_config *config, struct outstate *os,
|
2023-02-19 01:28:45 -08:00
|
|
|
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
|
2022-11-14 15:02:13 -08:00
|
|
|
{
|
|
|
|
if (config->json_output)
|
2024-11-12 16:00:41 +00:00
|
|
|
print_aggr_id_json(config, os, evsel, id, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
else if (config->csv_output)
|
2023-02-19 01:28:45 -08:00
|
|
|
print_aggr_id_csv(config, evsel, id, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
else
|
2023-02-19 01:28:45 -08:00
|
|
|
print_aggr_id_std(config, evsel, id, aggr_nr);
|
2022-11-14 15:02:13 -08:00
|
|
|
}
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
static void new_line_std(struct perf_stat_config *config __maybe_unused,
|
|
|
|
void *ctx)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
|
|
|
|
os->newline = true;
|
|
|
|
}
|
|
|
|
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
static inline void __new_line_std_csv(struct perf_stat_config *config,
|
|
|
|
struct outstate *os)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
fputc('\n', os->fh);
|
2024-11-12 16:00:44 +00:00
|
|
|
if (config->interval)
|
|
|
|
fputs(os->timestamp, os->fh);
|
2024-11-12 16:00:41 +00:00
|
|
|
aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __new_line_std(struct outstate *os)
|
|
|
|
{
|
|
|
|
fprintf(os->fh, " ");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_new_line_std(struct perf_stat_config *config,
|
|
|
|
struct outstate *os)
|
|
|
|
{
|
|
|
|
__new_line_std_csv(config, os);
|
2018-08-30 08:32:52 +02:00
|
|
|
if (config->aggr_mode == AGGR_NONE)
|
|
|
|
fprintf(os->fh, " ");
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
__new_line_std(os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metric_std(struct perf_stat_config *config,
|
2024-10-17 10:53:54 -07:00
|
|
|
void *ctx, enum metric_threshold_classify thresh,
|
|
|
|
const char *fmt, const char *unit, double val)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
FILE *out = os->fh;
|
|
|
|
int n;
|
|
|
|
bool newline = os->newline;
|
2024-10-17 10:53:54 -07:00
|
|
|
const char *color = metric_threshold_classify__color(thresh);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
os->newline = false;
|
|
|
|
|
|
|
|
if (unit == NULL || fmt == NULL) {
|
|
|
|
fprintf(out, "%-*s", METRIC_LEN, "");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newline)
|
|
|
|
do_new_line_std(config, os);
|
|
|
|
|
|
|
|
n = fprintf(out, " # ");
|
|
|
|
if (color)
|
|
|
|
n += color_fprintf(out, color, fmt, val);
|
|
|
|
else
|
|
|
|
n += fprintf(out, fmt, val);
|
|
|
|
fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void new_line_csv(struct perf_stat_config *config, void *ctx)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
int i;
|
|
|
|
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
__new_line_std_csv(config, os);
|
2024-11-12 16:00:45 +00:00
|
|
|
for (i = 0; i < os->csv_col_pad; i++)
|
2018-08-30 08:32:52 +02:00
|
|
|
fputs(config->csv_sep, os->fh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metric_csv(struct perf_stat_config *config __maybe_unused,
|
|
|
|
void *ctx,
|
2024-10-17 10:53:54 -07:00
|
|
|
enum metric_threshold_classify thresh __maybe_unused,
|
2018-08-30 08:32:52 +02:00
|
|
|
const char *fmt, const char *unit, double val)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
FILE *out = os->fh;
|
|
|
|
char buf[64], *vals, *ends;
|
|
|
|
|
|
|
|
if (unit == NULL || fmt == NULL) {
|
|
|
|
fprintf(out, "%s%s", config->csv_sep, config->csv_sep);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
snprintf(buf, sizeof(buf), fmt, val);
|
2019-06-26 11:42:03 -03:00
|
|
|
ends = vals = skip_spaces(buf);
|
2018-08-30 08:32:52 +02:00
|
|
|
while (isdigit(*ends) || *ends == '.')
|
|
|
|
ends++;
|
|
|
|
*ends = 0;
|
2019-06-25 21:28:49 -03:00
|
|
|
fprintf(out, "%s%s%s%s", config->csv_sep, vals, config->csv_sep, skip_spaces(unit));
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
static void print_metric_json(struct perf_stat_config *config __maybe_unused,
|
|
|
|
void *ctx,
|
perf stat: Add metric-threshold to json output
When the threshold isn't unknown add a value to the json like:
"metric-threshold" : "good"
A more complete example:
```
$ perf stat -a -j -I 1000
{"interval" : 1.001089747, "counter-value" : "16045.281449", "unit" : "msec", "event" : "cpu-clock", "event-runtime" : 16045355135, "pcnt-running" : 100.00, "metric-value" : "16.045281", "metric-unit" : "CPUs utilized"}
{"interval" : 1.001089747, "counter-value" : "10003.000000", "unit" : "", "event" : "context-switches", "event-runtime" : 16045314844, "pcnt-running" : 100.00, "metric-value" : "623.423156", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "328.000000", "unit" : "", "event" : "cpu-migrations", "event-runtime" : 16045321403, "pcnt-running" : 100.00, "metric-value" : "20.442147", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "20114.000000", "unit" : "", "event" : "page-faults", "event-runtime" : 16045355927, "pcnt-running" : 100.00, "metric-value" : "1.253577", "metric-unit" : "K/sec"}
{"interval" : 1.001089747, "counter-value" : "4066679471.000000", "unit" : "", "event" : "instructions", "event-runtime" : 16045369123, "pcnt-running" : 100.00, "metric-value" : "1.628330", "metric-unit" : "insn per cycle"}
{"interval" : 1.001089747, "counter-value" : "2497454658.000000", "unit" : "", "event" : "cycles", "event-runtime" : 16045374810, "pcnt-running" : 100.00, "metric-value" : "0.155650", "metric-unit" : "GHz"}
{"interval" : 1.001089747, "counter-value" : "914974294.000000", "unit" : "", "event" : "branches", "event-runtime" : 16045379877, "pcnt-running" : 100.00, "metric-value" : "57.024509", "metric-unit" : "M/sec"}
{"interval" : 1.001089747, "counter-value" : "9237201.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 16045375017, "pcnt-running" : 100.00, "metric-value" : "1.009559", "metric-unit" : "of all branches", "metric-threshold" : "good"}
{"interval" : 1.001089747, "event-runtime" : 16045397172, "pcnt-running" : 100.00, "metricgroup" : "TopdownL1"}
{"interval" : 1.001089747, "metric-value" : "22.036686", "metric-unit" : "% tma_backend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "7.610161", "metric-unit" : "% tma_bad_speculation", "metric-threshold" : "good"}
{"interval" : 1.001089747, "metric-value" : "36.729687", "metric-unit" : "% tma_frontend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "33.623465", "metric-unit" : "% tma_retiring"}
...
```
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: James Clark <james.clark@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241017175356.783793-7-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-17 10:53:55 -07:00
|
|
|
enum metric_threshold_classify thresh,
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
const char *fmt __maybe_unused,
|
|
|
|
const char *unit, double val)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
FILE *out = os->fh;
|
|
|
|
|
perf stat: Add metric-threshold to json output
When the threshold isn't unknown add a value to the json like:
"metric-threshold" : "good"
A more complete example:
```
$ perf stat -a -j -I 1000
{"interval" : 1.001089747, "counter-value" : "16045.281449", "unit" : "msec", "event" : "cpu-clock", "event-runtime" : 16045355135, "pcnt-running" : 100.00, "metric-value" : "16.045281", "metric-unit" : "CPUs utilized"}
{"interval" : 1.001089747, "counter-value" : "10003.000000", "unit" : "", "event" : "context-switches", "event-runtime" : 16045314844, "pcnt-running" : 100.00, "metric-value" : "623.423156", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "328.000000", "unit" : "", "event" : "cpu-migrations", "event-runtime" : 16045321403, "pcnt-running" : 100.00, "metric-value" : "20.442147", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "20114.000000", "unit" : "", "event" : "page-faults", "event-runtime" : 16045355927, "pcnt-running" : 100.00, "metric-value" : "1.253577", "metric-unit" : "K/sec"}
{"interval" : 1.001089747, "counter-value" : "4066679471.000000", "unit" : "", "event" : "instructions", "event-runtime" : 16045369123, "pcnt-running" : 100.00, "metric-value" : "1.628330", "metric-unit" : "insn per cycle"}
{"interval" : 1.001089747, "counter-value" : "2497454658.000000", "unit" : "", "event" : "cycles", "event-runtime" : 16045374810, "pcnt-running" : 100.00, "metric-value" : "0.155650", "metric-unit" : "GHz"}
{"interval" : 1.001089747, "counter-value" : "914974294.000000", "unit" : "", "event" : "branches", "event-runtime" : 16045379877, "pcnt-running" : 100.00, "metric-value" : "57.024509", "metric-unit" : "M/sec"}
{"interval" : 1.001089747, "counter-value" : "9237201.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 16045375017, "pcnt-running" : 100.00, "metric-value" : "1.009559", "metric-unit" : "of all branches", "metric-threshold" : "good"}
{"interval" : 1.001089747, "event-runtime" : 16045397172, "pcnt-running" : 100.00, "metricgroup" : "TopdownL1"}
{"interval" : 1.001089747, "metric-value" : "22.036686", "metric-unit" : "% tma_backend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "7.610161", "metric-unit" : "% tma_bad_speculation", "metric-threshold" : "good"}
{"interval" : 1.001089747, "metric-value" : "36.729687", "metric-unit" : "% tma_frontend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "33.623465", "metric-unit" : "% tma_retiring"}
...
```
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: James Clark <james.clark@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241017175356.783793-7-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-17 10:53:55 -07:00
|
|
|
if (unit) {
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"metric-value\" : \"%f\", \"metric-unit\" : \"%s\"", val, unit);
|
perf stat: Add metric-threshold to json output
When the threshold isn't unknown add a value to the json like:
"metric-threshold" : "good"
A more complete example:
```
$ perf stat -a -j -I 1000
{"interval" : 1.001089747, "counter-value" : "16045.281449", "unit" : "msec", "event" : "cpu-clock", "event-runtime" : 16045355135, "pcnt-running" : 100.00, "metric-value" : "16.045281", "metric-unit" : "CPUs utilized"}
{"interval" : 1.001089747, "counter-value" : "10003.000000", "unit" : "", "event" : "context-switches", "event-runtime" : 16045314844, "pcnt-running" : 100.00, "metric-value" : "623.423156", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "328.000000", "unit" : "", "event" : "cpu-migrations", "event-runtime" : 16045321403, "pcnt-running" : 100.00, "metric-value" : "20.442147", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "20114.000000", "unit" : "", "event" : "page-faults", "event-runtime" : 16045355927, "pcnt-running" : 100.00, "metric-value" : "1.253577", "metric-unit" : "K/sec"}
{"interval" : 1.001089747, "counter-value" : "4066679471.000000", "unit" : "", "event" : "instructions", "event-runtime" : 16045369123, "pcnt-running" : 100.00, "metric-value" : "1.628330", "metric-unit" : "insn per cycle"}
{"interval" : 1.001089747, "counter-value" : "2497454658.000000", "unit" : "", "event" : "cycles", "event-runtime" : 16045374810, "pcnt-running" : 100.00, "metric-value" : "0.155650", "metric-unit" : "GHz"}
{"interval" : 1.001089747, "counter-value" : "914974294.000000", "unit" : "", "event" : "branches", "event-runtime" : 16045379877, "pcnt-running" : 100.00, "metric-value" : "57.024509", "metric-unit" : "M/sec"}
{"interval" : 1.001089747, "counter-value" : "9237201.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 16045375017, "pcnt-running" : 100.00, "metric-value" : "1.009559", "metric-unit" : "of all branches", "metric-threshold" : "good"}
{"interval" : 1.001089747, "event-runtime" : 16045397172, "pcnt-running" : 100.00, "metricgroup" : "TopdownL1"}
{"interval" : 1.001089747, "metric-value" : "22.036686", "metric-unit" : "% tma_backend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "7.610161", "metric-unit" : "% tma_bad_speculation", "metric-threshold" : "good"}
{"interval" : 1.001089747, "metric-value" : "36.729687", "metric-unit" : "% tma_frontend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "33.623465", "metric-unit" : "% tma_retiring"}
...
```
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: James Clark <james.clark@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241017175356.783793-7-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-17 10:53:55 -07:00
|
|
|
if (thresh != METRIC_THRESHOLD_UNKNOWN) {
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"metric-threshold\" : \"%s\"",
|
perf stat: Add metric-threshold to json output
When the threshold isn't unknown add a value to the json like:
"metric-threshold" : "good"
A more complete example:
```
$ perf stat -a -j -I 1000
{"interval" : 1.001089747, "counter-value" : "16045.281449", "unit" : "msec", "event" : "cpu-clock", "event-runtime" : 16045355135, "pcnt-running" : 100.00, "metric-value" : "16.045281", "metric-unit" : "CPUs utilized"}
{"interval" : 1.001089747, "counter-value" : "10003.000000", "unit" : "", "event" : "context-switches", "event-runtime" : 16045314844, "pcnt-running" : 100.00, "metric-value" : "623.423156", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "328.000000", "unit" : "", "event" : "cpu-migrations", "event-runtime" : 16045321403, "pcnt-running" : 100.00, "metric-value" : "20.442147", "metric-unit" : "/sec"}
{"interval" : 1.001089747, "counter-value" : "20114.000000", "unit" : "", "event" : "page-faults", "event-runtime" : 16045355927, "pcnt-running" : 100.00, "metric-value" : "1.253577", "metric-unit" : "K/sec"}
{"interval" : 1.001089747, "counter-value" : "4066679471.000000", "unit" : "", "event" : "instructions", "event-runtime" : 16045369123, "pcnt-running" : 100.00, "metric-value" : "1.628330", "metric-unit" : "insn per cycle"}
{"interval" : 1.001089747, "counter-value" : "2497454658.000000", "unit" : "", "event" : "cycles", "event-runtime" : 16045374810, "pcnt-running" : 100.00, "metric-value" : "0.155650", "metric-unit" : "GHz"}
{"interval" : 1.001089747, "counter-value" : "914974294.000000", "unit" : "", "event" : "branches", "event-runtime" : 16045379877, "pcnt-running" : 100.00, "metric-value" : "57.024509", "metric-unit" : "M/sec"}
{"interval" : 1.001089747, "counter-value" : "9237201.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 16045375017, "pcnt-running" : 100.00, "metric-value" : "1.009559", "metric-unit" : "of all branches", "metric-threshold" : "good"}
{"interval" : 1.001089747, "event-runtime" : 16045397172, "pcnt-running" : 100.00, "metricgroup" : "TopdownL1"}
{"interval" : 1.001089747, "metric-value" : "22.036686", "metric-unit" : "% tma_backend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "7.610161", "metric-unit" : "% tma_bad_speculation", "metric-threshold" : "good"}
{"interval" : 1.001089747, "metric-value" : "36.729687", "metric-unit" : "% tma_frontend_bound", "metric-threshold" : "bad"}
{"interval" : 1.001089747, "metric-value" : "33.623465", "metric-unit" : "% tma_retiring"}
...
```
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: James Clark <james.clark@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241017175356.783793-7-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-17 10:53:55 -07:00
|
|
|
metric_threshold_classify__str(thresh));
|
|
|
|
}
|
|
|
|
}
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
if (!config->metric_only)
|
|
|
|
fprintf(out, "}");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void new_line_json(struct perf_stat_config *config, void *ctx)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
|
2022-12-02 11:04:47 -08:00
|
|
|
fputs("\n{", os->fh);
|
2024-11-12 16:00:41 +00:00
|
|
|
os->first = true;
|
2024-11-12 16:00:44 +00:00
|
|
|
if (config->interval)
|
|
|
|
json_out(os, "%s", os->timestamp);
|
2024-11-12 16:00:41 +00:00
|
|
|
|
|
|
|
aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
}
|
|
|
|
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
static void print_metricgroup_header_json(struct perf_stat_config *config,
|
|
|
|
void *ctx,
|
|
|
|
const char *metricgroup_name)
|
|
|
|
{
|
|
|
|
if (!metricgroup_name)
|
|
|
|
return;
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out((struct outstate *) ctx, "\"metricgroup\" : \"%s\"}", metricgroup_name);
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
new_line_json(config, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metricgroup_header_csv(struct perf_stat_config *config,
|
|
|
|
void *ctx,
|
|
|
|
const char *metricgroup_name)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!metricgroup_name) {
|
|
|
|
/* Leave space for running and enabling */
|
2024-11-12 16:00:45 +00:00
|
|
|
for (i = 0; i < os->csv_col_pad - 2; i++)
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
fputs(config->csv_sep, os->fh);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:45 +00:00
|
|
|
for (i = 0; i < os->csv_col_pad; i++)
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
fputs(config->csv_sep, os->fh);
|
|
|
|
fprintf(config->output, "%s", metricgroup_name);
|
|
|
|
new_line_csv(config, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metricgroup_header_std(struct perf_stat_config *config,
|
|
|
|
void *ctx,
|
|
|
|
const char *metricgroup_name)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!metricgroup_name) {
|
|
|
|
__new_line_std(os);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = fprintf(config->output, " %*s", EVNAME_LEN, metricgroup_name);
|
|
|
|
|
|
|
|
fprintf(config->output, "%*s", MGROUP_LEN - n - 1, "");
|
|
|
|
}
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
/* Filter out some columns that don't work well in metrics only mode */
|
|
|
|
|
|
|
|
static bool valid_only_metric(const char *unit)
|
|
|
|
{
|
|
|
|
if (!unit)
|
|
|
|
return false;
|
|
|
|
if (strstr(unit, "/sec") ||
|
|
|
|
strstr(unit, "CPUs utilized"))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-21 13:23:51 +02:00
|
|
|
static const char *fixunit(char *buf, struct evsel *evsel,
|
2018-08-30 08:32:52 +02:00
|
|
|
const char *unit)
|
|
|
|
{
|
|
|
|
if (!strncmp(unit, "of all", 6)) {
|
2020-04-29 16:07:09 -03:00
|
|
|
snprintf(buf, 1024, "%s %s", evsel__name(evsel),
|
2018-08-30 08:32:52 +02:00
|
|
|
unit);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
return unit;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metric_only(struct perf_stat_config *config,
|
2024-10-17 10:53:54 -07:00
|
|
|
void *ctx, enum metric_threshold_classify thresh,
|
|
|
|
const char *fmt, const char *unit, double val)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
FILE *out = os->fh;
|
|
|
|
char buf[1024], str[1024];
|
|
|
|
unsigned mlen = config->metric_only_len;
|
2024-10-17 10:53:54 -07:00
|
|
|
const char *color = metric_threshold_classify__color(thresh);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
if (!valid_only_metric(unit))
|
|
|
|
return;
|
|
|
|
unit = fixunit(buf, os->evsel, unit);
|
|
|
|
if (mlen < strlen(unit))
|
|
|
|
mlen = strlen(unit) + 1;
|
|
|
|
|
|
|
|
if (color)
|
|
|
|
mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
|
|
|
|
|
2024-02-09 12:49:46 -08:00
|
|
|
color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val);
|
2018-08-30 08:32:52 +02:00
|
|
|
fprintf(out, "%*s ", mlen, str);
|
2022-11-23 10:02:06 -08:00
|
|
|
os->first = false;
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metric_only_csv(struct perf_stat_config *config __maybe_unused,
|
2024-10-17 10:53:54 -07:00
|
|
|
void *ctx,
|
|
|
|
enum metric_threshold_classify thresh __maybe_unused,
|
2018-08-30 08:32:52 +02:00
|
|
|
const char *fmt,
|
|
|
|
const char *unit, double val)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
FILE *out = os->fh;
|
|
|
|
char buf[64], *vals, *ends;
|
|
|
|
char tbuf[1024];
|
|
|
|
|
|
|
|
if (!valid_only_metric(unit))
|
|
|
|
return;
|
|
|
|
unit = fixunit(tbuf, os->evsel, unit);
|
2023-08-04 10:09:08 +08:00
|
|
|
snprintf(buf, sizeof(buf), fmt ?: "", val);
|
2019-06-26 11:42:03 -03:00
|
|
|
ends = vals = skip_spaces(buf);
|
2018-08-30 08:32:52 +02:00
|
|
|
while (isdigit(*ends) || *ends == '.')
|
|
|
|
ends++;
|
|
|
|
*ends = 0;
|
|
|
|
fprintf(out, "%s%s", vals, config->csv_sep);
|
2022-11-23 10:02:06 -08:00
|
|
|
os->first = false;
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
static void print_metric_only_json(struct perf_stat_config *config __maybe_unused,
|
2024-10-17 10:53:54 -07:00
|
|
|
void *ctx,
|
|
|
|
enum metric_threshold_classify thresh __maybe_unused,
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
const char *fmt,
|
|
|
|
const char *unit, double val)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
2024-10-17 10:53:52 -07:00
|
|
|
char buf[64], *ends;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
char tbuf[1024];
|
2024-10-17 10:53:52 -07:00
|
|
|
const char *vals;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
|
|
|
if (!valid_only_metric(unit))
|
|
|
|
return;
|
|
|
|
unit = fixunit(tbuf, os->evsel, unit);
|
2024-10-17 10:53:52 -07:00
|
|
|
if (!unit[0])
|
|
|
|
return;
|
2023-08-04 10:09:08 +08:00
|
|
|
snprintf(buf, sizeof(buf), fmt ?: "", val);
|
2024-10-17 10:53:52 -07:00
|
|
|
vals = ends = skip_spaces(buf);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
while (isdigit(*ends) || *ends == '.')
|
|
|
|
ends++;
|
|
|
|
*ends = 0;
|
2024-10-17 10:53:52 -07:00
|
|
|
if (!vals[0])
|
|
|
|
vals = "none";
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"%s\" : \"%s\"", unit, vals);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
}
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
static void print_metric_header(struct perf_stat_config *config,
|
2024-10-17 10:53:54 -07:00
|
|
|
void *ctx,
|
|
|
|
enum metric_threshold_classify thresh __maybe_unused,
|
2018-08-30 08:32:52 +02:00
|
|
|
const char *fmt __maybe_unused,
|
|
|
|
const char *unit, double val __maybe_unused)
|
|
|
|
{
|
|
|
|
struct outstate *os = ctx;
|
|
|
|
char tbuf[1024];
|
|
|
|
|
2021-04-19 12:41:44 +03:00
|
|
|
/* In case of iostat, print metric header for first root port only */
|
|
|
|
if (config->iostat_run &&
|
|
|
|
os->evsel->priv != os->evsel->evlist->selected->priv)
|
|
|
|
return;
|
|
|
|
|
2022-11-14 15:02:26 -08:00
|
|
|
if (os->evsel->cgrp != os->cgrp)
|
|
|
|
return;
|
|
|
|
|
2022-11-07 13:33:10 -08:00
|
|
|
if (!valid_only_metric(unit))
|
2018-08-30 08:32:52 +02:00
|
|
|
return;
|
|
|
|
unit = fixunit(tbuf, os->evsel, unit);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
|
|
|
|
if (config->json_output)
|
2022-11-23 10:02:06 -08:00
|
|
|
return;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
else if (config->csv_output)
|
2018-08-30 08:32:52 +02:00
|
|
|
fprintf(os->fh, "%s%s", unit, config->csv_sep);
|
|
|
|
else
|
|
|
|
fprintf(os->fh, "%*s ", config->metric_only_len, unit);
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:14 -08:00
|
|
|
static void print_counter_value_std(struct perf_stat_config *config,
|
2022-11-14 15:02:15 -08:00
|
|
|
struct evsel *evsel, double avg, bool ok)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
FILE *output = config->output;
|
|
|
|
double sc = evsel->scale;
|
|
|
|
const char *fmt;
|
2022-11-14 15:02:15 -08:00
|
|
|
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:14 -08:00
|
|
|
if (config->big_num)
|
2022-11-14 15:02:20 -08:00
|
|
|
fmt = floor(sc) != sc ? "%'*.2f " : "%'*.0f ";
|
2022-11-14 15:02:14 -08:00
|
|
|
else
|
2022-11-14 15:02:20 -08:00
|
|
|
fmt = floor(sc) != sc ? "%*.2f " : "%*.0f ";
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:15 -08:00
|
|
|
if (ok)
|
2022-11-14 15:02:20 -08:00
|
|
|
fprintf(output, fmt, COUNTS_LEN, avg);
|
2022-11-14 15:02:15 -08:00
|
|
|
else
|
2022-11-14 15:02:20 -08:00
|
|
|
fprintf(output, "%*s ", COUNTS_LEN, bad_count);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:14 -08:00
|
|
|
if (evsel->unit)
|
|
|
|
fprintf(output, "%-*s ", config->unit_width, evsel->unit);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:20 -08:00
|
|
|
fprintf(output, "%-*s", EVNAME_LEN, evsel__name(evsel));
|
2022-11-14 15:02:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_counter_value_csv(struct perf_stat_config *config,
|
2022-11-14 15:02:15 -08:00
|
|
|
struct evsel *evsel, double avg, bool ok)
|
2022-11-14 15:02:14 -08:00
|
|
|
{
|
|
|
|
FILE *output = config->output;
|
|
|
|
double sc = evsel->scale;
|
|
|
|
const char *sep = config->csv_sep;
|
|
|
|
const char *fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
|
2022-11-14 15:02:15 -08:00
|
|
|
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
|
2022-11-14 15:02:14 -08:00
|
|
|
|
2022-11-14 15:02:15 -08:00
|
|
|
if (ok)
|
|
|
|
fprintf(output, fmt, avg, sep);
|
|
|
|
else
|
|
|
|
fprintf(output, "%s%s", bad_count, sep);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:14 -08:00
|
|
|
if (evsel->unit)
|
|
|
|
fprintf(output, "%s%s", evsel->unit, sep);
|
|
|
|
|
|
|
|
fprintf(output, "%s", evsel__name(evsel));
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_counter_value_json(struct outstate *os,
|
2022-11-14 15:02:15 -08:00
|
|
|
struct evsel *evsel, double avg, bool ok)
|
2022-11-14 15:02:14 -08:00
|
|
|
{
|
2022-11-14 15:02:15 -08:00
|
|
|
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
|
2022-11-14 15:02:14 -08:00
|
|
|
|
2022-11-14 15:02:15 -08:00
|
|
|
if (ok)
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"counter-value\" : \"%f\"", avg);
|
2022-11-14 15:02:15 -08:00
|
|
|
else
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"counter-value\" : \"%s\"", bad_count);
|
2022-11-14 15:02:14 -08:00
|
|
|
|
|
|
|
if (evsel->unit)
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"unit\" : \"%s\"", evsel->unit);
|
2022-11-14 15:02:14 -08:00
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
json_out(os, "\"event\" : \"%s\"", evsel__name(evsel));
|
2022-11-14 15:02:14 -08:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
static void print_counter_value(struct perf_stat_config *config, struct outstate *os,
|
2022-11-14 15:02:15 -08:00
|
|
|
struct evsel *evsel, double avg, bool ok)
|
2022-11-14 15:02:14 -08:00
|
|
|
{
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
if (config->json_output)
|
2024-11-12 16:00:41 +00:00
|
|
|
print_counter_value_json(os, evsel, avg, ok);
|
2022-11-14 15:02:14 -08:00
|
|
|
else if (config->csv_output)
|
2022-11-14 15:02:15 -08:00
|
|
|
print_counter_value_csv(config, evsel, avg, ok);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
else
|
2022-11-14 15:02:15 -08:00
|
|
|
print_counter_value_std(config, evsel, avg, ok);
|
2022-11-14 15:02:14 -08:00
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:14 -08:00
|
|
|
static void abs_printout(struct perf_stat_config *config,
|
2024-11-12 16:00:41 +00:00
|
|
|
struct outstate *os,
|
2023-02-19 01:28:45 -08:00
|
|
|
struct aggr_cpu_id id, int aggr_nr,
|
2022-11-14 15:02:15 -08:00
|
|
|
struct evsel *evsel, double avg, bool ok)
|
2022-11-14 15:02:14 -08:00
|
|
|
{
|
2024-11-12 16:00:41 +00:00
|
|
|
aggr_printout(config, os, evsel, id, aggr_nr);
|
|
|
|
print_counter_value(config, os, evsel, avg, ok);
|
|
|
|
print_cgroup(config, os, evsel->cgrp);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
2025-04-02 13:15:45 -07:00
|
|
|
static bool evlist__has_hybrid_pmus(struct evlist *evlist)
|
2023-05-27 00:21:50 -07:00
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
2025-04-02 13:15:45 -07:00
|
|
|
struct perf_pmu *last_core_pmu = NULL;
|
2023-05-27 00:21:50 -07:00
|
|
|
|
2023-05-27 00:22:09 -07:00
|
|
|
if (perf_pmus__num_core_pmus() == 1)
|
2023-05-27 00:21:58 -07:00
|
|
|
return false;
|
|
|
|
|
2023-05-27 00:21:50 -07:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2025-04-02 13:15:45 -07:00
|
|
|
if (evsel->core.is_pmu_core) {
|
|
|
|
struct perf_pmu *pmu = evsel__find_pmu(evsel);
|
|
|
|
|
|
|
|
if (pmu == last_core_pmu)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (last_core_pmu == NULL) {
|
|
|
|
last_core_pmu = pmu;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* A distinct core PMU. */
|
2023-05-27 00:21:50 -07:00
|
|
|
return true;
|
2025-04-02 13:15:45 -07:00
|
|
|
}
|
2023-05-27 00:21:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-11-23 10:02:03 -08:00
|
|
|
static void printout(struct perf_stat_config *config, struct outstate *os,
|
2023-02-19 01:28:45 -08:00
|
|
|
double uval, u64 run, u64 ena, double noise, int aggr_idx)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
struct perf_stat_output_ctx out;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
print_metric_t pm;
|
2018-08-30 08:32:52 +02:00
|
|
|
new_line_t nl;
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
print_metricgroup_header_t pmh;
|
2022-11-14 15:02:16 -08:00
|
|
|
bool ok = true;
|
2022-11-23 10:02:03 -08:00
|
|
|
struct evsel *counter = os->evsel;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
if (config->csv_output) {
|
|
|
|
pm = config->metric_only ? print_metric_only_csv : print_metric_csv;
|
2024-11-12 16:00:43 +00:00
|
|
|
nl = config->metric_only ? NULL : new_line_csv;
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
pmh = print_metricgroup_header_csv;
|
2024-11-12 16:00:45 +00:00
|
|
|
os->csv_col_pad = 4 + (counter->cgrp ? 1 : 0);
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
} else if (config->json_output) {
|
|
|
|
pm = config->metric_only ? print_metric_only_json : print_metric_json;
|
2024-11-12 16:00:43 +00:00
|
|
|
nl = config->metric_only ? NULL : new_line_json;
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
pmh = print_metricgroup_header_json;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
} else {
|
|
|
|
pm = config->metric_only ? print_metric_only : print_metric_std;
|
2024-11-12 16:00:43 +00:00
|
|
|
nl = config->metric_only ? NULL : new_line_std;
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
pmh = print_metricgroup_header_std;
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
perf stat: Align CSV output for summary mode
The 'perf stat' subcommand supports the request for a summary of the
interval counter readings. But the summary lines break the CSV output
so it's hard for scripts to parse the result.
Before:
# perf stat -x, -I1000 --interval-count 1 --summary
1.001323097,8013.48,msec,cpu-clock,8013483384,100.00,8.013,CPUs utilized
1.001323097,270,,context-switches,8013513297,100.00,0.034,K/sec
1.001323097,13,,cpu-migrations,8013530032,100.00,0.002,K/sec
1.001323097,184,,page-faults,8013546992,100.00,0.023,K/sec
1.001323097,20574191,,cycles,8013551506,100.00,0.003,GHz
1.001323097,10562267,,instructions,8013564958,100.00,0.51,insn per cycle
1.001323097,2019244,,branches,8013575673,100.00,0.252,M/sec
1.001323097,106152,,branch-misses,8013585776,100.00,5.26,of all branches
8013.48,msec,cpu-clock,8013483384,100.00,7.984,CPUs utilized
270,,context-switches,8013513297,100.00,0.034,K/sec
13,,cpu-migrations,8013530032,100.00,0.002,K/sec
184,,page-faults,8013546992,100.00,0.023,K/sec
20574191,,cycles,8013551506,100.00,0.003,GHz
10562267,,instructions,8013564958,100.00,0.51,insn per cycle
2019244,,branches,8013575673,100.00,0.252,M/sec
106152,,branch-misses,8013585776,100.00,5.26,of all branches
The summary line loses the timestamp column, which breaks the CSV
output.
We add a column at the original 'timestamp' position and it just says
'summary' for the summary line.
After:
# perf stat -x, -I1000 --interval-count 1 --summary
1.001196053,8012.72,msec,cpu-clock,8012722903,100.00,8.013,CPUs utilized
1.001196053,218,,context-switches,8012753271,100.00,0.027,K/sec
1.001196053,9,,cpu-migrations,8012769767,100.00,0.001,K/sec
1.001196053,0,,page-faults,8012786257,100.00,0.000,K/sec
1.001196053,15004518,,cycles,8012790637,100.00,0.002,GHz
1.001196053,7954691,,instructions,8012804027,100.00,0.53,insn per cycle
1.001196053,1590259,,branches,8012814766,100.00,0.198,M/sec
1.001196053,82601,,branch-misses,8012824365,100.00,5.19,of all branches
summary,8012.72,msec,cpu-clock,8012722903,100.00,7.986,CPUs utilized
summary,218,,context-switches,8012753271,100.00,0.027,K/sec
summary,9,,cpu-migrations,8012769767,100.00,0.001,K/sec
summary,0,,page-faults,8012786257,100.00,0.000,K/sec
summary,15004518,,cycles,8012790637,100.00,0.002,GHz
summary,7954691,,instructions,8012804027,100.00,0.53,insn per cycle
summary,1590259,,branches,8012814766,100.00,0.198,M/sec
summary,82601,,branch-misses,8012824365,100.00,5.19,of all branches
Now it's easy for script to analyse the summary lines.
Of course, we also consider not to break possible existing scripts which
can continue to use the broken CSV format by using a new '--no-csv-summary.'
option.
# perf stat -x, -I1000 --interval-count 1 --summary --no-csv-summary
1.001213261,8012.67,msec,cpu-clock,8012672327,100.00,8.013,CPUs utilized
1.001213261,197,,context-switches,8012703742,100.00,24.586,/sec
1.001213261,9,,cpu-migrations,8012720902,100.00,1.123,/sec
1.001213261,644,,page-faults,8012738266,100.00,80.373,/sec
1.001213261,18350698,,cycles,8012744109,100.00,0.002,GHz
1.001213261,12745021,,instructions,8012759001,100.00,0.69,insn per cycle
1.001213261,2458033,,branches,8012770864,100.00,306.768,K/sec
1.001213261,102107,,branch-misses,8012781751,100.00,4.15,of all branches
8012.67,msec,cpu-clock,8012672327,100.00,7.985,CPUs utilized
197,,context-switches,8012703742,100.00,24.586,/sec
9,,cpu-migrations,8012720902,100.00,1.123,/sec
644,,page-faults,8012738266,100.00,80.373,/sec
18350698,,cycles,8012744109,100.00,0.002,GHz
12745021,,instructions,8012759001,100.00,0.69,insn per cycle
2458033,,branches,8012770864,100.00,306.768,K/sec
102107,,branch-misses,8012781751,100.00,4.15,of all branches
This option can be enabled in perf config by setting the variable
'stat.no-csv-summary'.
# perf config stat.no-csv-summary=true
# perf config -l
stat.no-csv-summary=true
# perf stat -x, -I1000 --interval-count 1 --summary
1.001330198,8013.28,msec,cpu-clock,8013279201,100.00,8.013,CPUs utilized
1.001330198,205,,context-switches,8013308394,100.00,25.583,/sec
1.001330198,10,,cpu-migrations,8013324681,100.00,1.248,/sec
1.001330198,0,,page-faults,8013340926,100.00,0.000,/sec
1.001330198,8027742,,cycles,8013344503,100.00,0.001,GHz
1.001330198,2871717,,instructions,8013356501,100.00,0.36,insn per cycle
1.001330198,553564,,branches,8013366204,100.00,69.081,K/sec
1.001330198,54021,,branch-misses,8013375952,100.00,9.76,of all branches
8013.28,msec,cpu-clock,8013279201,100.00,7.985,CPUs utilized
205,,context-switches,8013308394,100.00,25.583,/sec
10,,cpu-migrations,8013324681,100.00,1.248,/sec
0,,page-faults,8013340926,100.00,0.000,/sec
8027742,,cycles,8013344503,100.00,0.001,GHz
2871717,,instructions,8013356501,100.00,0.36,insn per cycle
553564,,branches,8013366204,100.00,69.081,K/sec
54021,,branch-misses,8013375952,100.00,9.76,of all branches
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20210319070156.20394-1-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-03-19 15:01:55 +08:00
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
|
|
|
|
if (config->metric_only) {
|
perf stat: Also hide metric-units from JSON when event didn't run
We decided to hide NULL metric-units rather than showing it as "(null)"
when a dependent event for a metric doesn't exist. But on hybrid systems
if the process doesn't hit a PMU you get an empty string metric unit
instead. To make it consistent change all empty strings to NULL.
Note that metric-threshold is already hidden in this case without this
change.
Where a process only runs on cpu_core and never hits cpu_atom:
Before:
$ perf stat -j -- true
...
{"counter-value" : "<not counted>", "unit" : "", "event" : "cpu_atom/branch-misses/", "event-runtime" : 0, "pcnt-running" : 0.00, "metric-value" : "0.000000", "metric-unit" : ""}
{"counter-value" : "6326.000000", "unit" : "", "event" : "cpu_core/branch-misses/", "event-runtime" : 293786, "pcnt-running" : 100.00, "metric-value" : "3.553394", "metric-unit" : "of all branches", "metric-threshold" : "good"}
...
After:
...
{"counter-value" : "<not counted>", "unit" : "", "event" : "cpu_atom/branch-misses/", "event-runtime" : 0, "pcnt-running" : 0.00}
{"counter-value" : "5778.000000", "unit" : "", "event" : "cpu_core/branch-misses/", "event-runtime" : 282240, "pcnt-running" : 100.00, "metric-value" : "3.226797", "metric-unit" : "of all branches", "metric-threshold" : "good"}
...
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: James Clark <james.clark@linaro.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Link: https://lore.kernel.org/r/20241112160048.951213-3-james.clark@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-11-12 16:00:42 +00:00
|
|
|
pm(config, os, METRIC_THRESHOLD_UNKNOWN, /*format=*/NULL,
|
|
|
|
/*unit=*/NULL, /*val=*/0);
|
2018-08-30 08:32:52 +02:00
|
|
|
return;
|
|
|
|
}
|
2022-11-14 15:02:15 -08:00
|
|
|
|
2022-11-14 15:02:16 -08:00
|
|
|
ok = false;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
if (counter->supported) {
|
2025-04-02 13:15:45 -07:00
|
|
|
if (!evlist__has_hybrid_pmus(counter->evlist)) {
|
perf stat: Disable the NMI watchdog message on hybrid
If we run a single workload that only runs on big core, there is always
a ugly message about disabling the NMI watchdog because the atom is not
counted.
Before:
# ./perf stat true
Performance counter stats for 'true':
0.43 msec task-clock # 0.396 CPUs utilized
0 context-switches # 0.000 /sec
0 cpu-migrations # 0.000 /sec
45 page-faults # 103.918 K/sec
639,634 cpu_core/cycles/ # 1.477 G/sec
<not counted> cpu_atom/cycles/ (0.00%)
643,498 cpu_core/instructions/ # 1.486 G/sec
<not counted> cpu_atom/instructions/ (0.00%)
123,715 cpu_core/branches/ # 285.694 M/sec
<not counted> cpu_atom/branches/ (0.00%)
4,094 cpu_core/branch-misses/ # 9.454 M/sec
<not counted> cpu_atom/branch-misses/ (0.00%)
0.001092407 seconds time elapsed
0.001144000 seconds user
0.000000000 seconds sys
Some events weren't counted. Try disabling the NMI watchdog:
echo 0 > /proc/sys/kernel/nmi_watchdog
perf stat ...
echo 1 > /proc/sys/kernel/nmi_watchdog
# ./perf stat -e '{cpu_atom/cycles/,msr/tsc/}' true
Performance counter stats for 'true':
<not counted> cpu_atom/cycles/ (0.00%)
<not counted> msr/tsc/ (0.00%)
0.001904106 seconds time elapsed
0.001947000 seconds user
0.000000000 seconds sys
Some events weren't counted. Try disabling the NMI watchdog:
echo 0 > /proc/sys/kernel/nmi_watchdog
perf stat ...
echo 1 > /proc/sys/kernel/nmi_watchdog
The events in group usually have to be from the same PMU. Try reorganizing the group.
Now we disable the NMI watchdog message on hybrid, otherwise there
are too many false positives.
After:
# ./perf stat true
Performance counter stats for 'true':
0.79 msec task-clock # 0.419 CPUs utilized
0 context-switches # 0.000 /sec
0 cpu-migrations # 0.000 /sec
48 page-faults # 60.889 K/sec
777,692 cpu_core/cycles/ # 986.519 M/sec
<not counted> cpu_atom/cycles/ (0.00%)
669,147 cpu_core/instructions/ # 848.828 M/sec
<not counted> cpu_atom/instructions/ (0.00%)
128,635 cpu_core/branches/ # 163.176 M/sec
<not counted> cpu_atom/branches/ (0.00%)
4,089 cpu_core/branch-misses/ # 5.187 M/sec
<not counted> cpu_atom/branch-misses/ (0.00%)
0.001880649 seconds time elapsed
0.001935000 seconds user
0.000000000 seconds sys
# ./perf stat -e '{cpu_atom/cycles/,msr/tsc/}' true
Performance counter stats for 'true':
<not counted> cpu_atom/cycles/ (0.00%)
<not counted> msr/tsc/ (0.00%)
0.000963319 seconds time elapsed
0.000999000 seconds user
0.000000000 seconds sys
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20210610034557.29766-1-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-06-10 11:45:57 +08:00
|
|
|
config->print_free_counters_hint = 1;
|
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out.print_metric = pm;
|
|
|
|
out.new_line = nl;
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
out.print_metricgroup_header = pmh;
|
2022-11-23 10:02:03 -08:00
|
|
|
out.ctx = os;
|
2018-08-30 08:32:52 +02:00
|
|
|
out.force_header = false;
|
|
|
|
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
if (!config->metric_only && !counter->default_metricgroup) {
|
2024-11-12 16:00:41 +00:00
|
|
|
abs_printout(config, os, os->id, os->aggr_nr, counter, uval, ok);
|
2022-11-14 15:02:16 -08:00
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
print_noise(config, os, counter, noise, /*before_metric=*/true);
|
|
|
|
print_running(config, os, run, ena, /*before_metric=*/true);
|
2022-11-14 15:02:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ok) {
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
if (!config->metric_only && counter->default_metricgroup) {
|
|
|
|
void *from = NULL;
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
/* Print out all the metricgroup with the same metric event. */
|
|
|
|
do {
|
|
|
|
int num = 0;
|
|
|
|
|
|
|
|
/* Print out the new line for the next new metricgroup. */
|
|
|
|
if (from) {
|
|
|
|
if (config->json_output)
|
|
|
|
new_line_json(config, (void *)os);
|
|
|
|
else
|
|
|
|
__new_line_std_csv(config, os);
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
print_noise(config, os, counter, noise, /*before_metric=*/true);
|
|
|
|
print_running(config, os, run, ena, /*before_metric=*/true);
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
from = perf_stat__print_shadow_stats_metricgroup(config, counter, aggr_idx,
|
2025-07-10 16:51:19 -07:00
|
|
|
&num, from, &out);
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
} while (from != NULL);
|
2025-07-10 16:51:19 -07:00
|
|
|
} else {
|
|
|
|
perf_stat__print_shadow_stats(config, counter, uval, aggr_idx, &out);
|
|
|
|
}
|
2022-11-14 15:02:16 -08:00
|
|
|
} else {
|
perf stat: Also hide metric-units from JSON when event didn't run
We decided to hide NULL metric-units rather than showing it as "(null)"
when a dependent event for a metric doesn't exist. But on hybrid systems
if the process doesn't hit a PMU you get an empty string metric unit
instead. To make it consistent change all empty strings to NULL.
Note that metric-threshold is already hidden in this case without this
change.
Where a process only runs on cpu_core and never hits cpu_atom:
Before:
$ perf stat -j -- true
...
{"counter-value" : "<not counted>", "unit" : "", "event" : "cpu_atom/branch-misses/", "event-runtime" : 0, "pcnt-running" : 0.00, "metric-value" : "0.000000", "metric-unit" : ""}
{"counter-value" : "6326.000000", "unit" : "", "event" : "cpu_core/branch-misses/", "event-runtime" : 293786, "pcnt-running" : 100.00, "metric-value" : "3.553394", "metric-unit" : "of all branches", "metric-threshold" : "good"}
...
After:
...
{"counter-value" : "<not counted>", "unit" : "", "event" : "cpu_atom/branch-misses/", "event-runtime" : 0, "pcnt-running" : 0.00}
{"counter-value" : "5778.000000", "unit" : "", "event" : "cpu_core/branch-misses/", "event-runtime" : 282240, "pcnt-running" : 100.00, "metric-value" : "3.226797", "metric-unit" : "of all branches", "metric-threshold" : "good"}
...
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: James Clark <james.clark@linaro.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Link: https://lore.kernel.org/r/20241112160048.951213-3-james.clark@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-11-12 16:00:42 +00:00
|
|
|
pm(config, os, METRIC_THRESHOLD_UNKNOWN, /*format=*/NULL, /*unit=*/NULL, /*val=*/0);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:16 -08:00
|
|
|
if (!config->metric_only) {
|
2024-11-12 16:00:41 +00:00
|
|
|
print_noise(config, os, counter, noise, /*before_metric=*/false);
|
|
|
|
print_running(config, os, run, ena, /*before_metric=*/false);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-25 11:24:31 -08:00
|
|
|
/**
|
|
|
|
* should_skip_zero_count() - Check if the event should print 0 values.
|
|
|
|
* @config: The perf stat configuration (including aggregation mode).
|
|
|
|
* @counter: The evsel with its associated cpumap.
|
|
|
|
* @id: The aggregation id that is being queried.
|
|
|
|
*
|
|
|
|
* Due to mismatch between the event cpumap or thread-map and the
|
|
|
|
* aggregation mode, sometimes it'd iterate the counter with the map
|
|
|
|
* which does not contain any values.
|
|
|
|
*
|
|
|
|
* For example, uncore events have dedicated CPUs to manage them,
|
|
|
|
* result for other CPUs should be zero and skipped.
|
|
|
|
*
|
|
|
|
* Return: %true if the value should NOT be printed, %false if the value
|
|
|
|
* needs to be printed like "<not counted>" or "<not supported>".
|
|
|
|
*/
|
|
|
|
static bool should_skip_zero_counter(struct perf_stat_config *config,
|
|
|
|
struct evsel *counter,
|
|
|
|
const struct aggr_cpu_id *id)
|
|
|
|
{
|
|
|
|
struct perf_cpu cpu;
|
|
|
|
int idx;
|
|
|
|
|
perf stat: Remove evlist__add_default_attrs use strings
add_default_atttributes would add evsels by having pre-created
perf_event_attr, however, this needed fixing for hybrid as the
extended PMU type was necessary for each core PMU. The logic for this
was in an arch specific x86 function and wasn't present for ARM,
meaning that default events weren't being opened on all PMUs on
ARM. Change the creation of the default events to use parse_events and
strings as that will open the events on all PMUs.
Rather than try to detect events on PMUs before parsing, parse the
event but skip its output in stat-display.
The previous order of hardware events was: cycles,
stalled-cycles-frontend, stalled-cycles-backend, instructions. As
instructions is a more fundamental concept the order is changed to:
instructions, cycles, stalled-cycles-frontend, stalled-cycles-backend.
Closes: https://lore.kernel.org/lkml/CAP-5=fVABSBZnsmtRn1uF-k-G1GWM-L5SgiinhPTfHbQsKXb_g@mail.gmail.com/
Acked-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Ian Rogers <irogers@google.com>
[Don't display unsupported default events except 'cycles']
Acked-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: James Clark <james.clark@linaro.org>
Cc: Yang Jihong <yangjihong@bytedance.com>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ze Gao <zegao2021@gmail.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Yang Li <yang.lee@linux.alibaba.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: ak@linux.intel.com
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sun Haiyong <sunhaiyong@loongson.cn>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20240926144851.245903-4-james.clark@linaro.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-09-26 15:48:34 +01:00
|
|
|
/*
|
|
|
|
* Skip unsupported default events when not verbose. (default events
|
|
|
|
* are all marked 'skippable').
|
|
|
|
*/
|
|
|
|
if (verbose == 0 && counter->skippable && !counter->supported)
|
|
|
|
return true;
|
|
|
|
|
2023-01-25 11:24:31 -08:00
|
|
|
/*
|
|
|
|
* Skip value 0 when enabling --per-thread globally,
|
|
|
|
* otherwise it will have too many 0 output.
|
|
|
|
*/
|
|
|
|
if (config->aggr_mode == AGGR_THREAD && config->system_wide)
|
|
|
|
return true;
|
2023-08-01 13:54:52 -07:00
|
|
|
|
2024-10-01 20:20:07 -07:00
|
|
|
/*
|
2025-04-03 12:43:35 -07:00
|
|
|
* In per-thread mode the aggr_map and aggr_get_id functions may be
|
|
|
|
* NULL, assume all 0 values should be output in that case.
|
|
|
|
*/
|
|
|
|
if (!config->aggr_map || !config->aggr_get_id)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tool events may be gathered on all logical CPUs, for example
|
|
|
|
* system_time, but for many the first index is the only one used, for
|
|
|
|
* example num_cores. Don't skip for the first index.
|
2024-10-01 20:20:07 -07:00
|
|
|
*/
|
perf tool_pmu: Move expr literals to tool_pmu
Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
16 num_cpus_online
1 num_dies
1 num_packages
1 smt_on
2,496,000,000 system_tsc_freq
0.001113637 seconds time elapsed
0.001218000 seconds user
0.000000000 seconds sys
```
And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true
Performance counter stats for 'true':
0 has_pmem
8 num_cores
16 num_cpus
8 num_cpus_online
1 num_dies
1 num_packages
0 smt_on
2,496,000,000 system_tsc_freq
0.000802115 seconds time elapsed
0.000000000 seconds user
0.000806000 seconds sys
```
As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.
The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2024-10-01 20:20:10 -07:00
|
|
|
if (evsel__is_tool(counter)) {
|
|
|
|
struct aggr_cpu_id own_id =
|
|
|
|
config->aggr_get_id(config, (struct perf_cpu){ .cpu = 0 });
|
|
|
|
|
|
|
|
return !aggr_cpu_id__equal(id, &own_id);
|
|
|
|
}
|
2023-01-25 11:24:31 -08:00
|
|
|
/*
|
2025-04-03 12:43:35 -07:00
|
|
|
* Skip value 0 when the counter's cpumask doesn't match the given aggr
|
|
|
|
* id.
|
2023-01-25 11:24:31 -08:00
|
|
|
*/
|
|
|
|
|
2025-04-03 12:43:35 -07:00
|
|
|
perf_cpu_map__for_each_cpu(cpu, idx, counter->core.cpus) {
|
2023-01-25 11:24:31 -08:00
|
|
|
struct aggr_cpu_id own_id = config->aggr_get_id(config, cpu);
|
|
|
|
|
|
|
|
if (aggr_cpu_id__equal(id, &own_id))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-12 21:59:48 +08:00
|
|
|
static void print_counter_aggrdata(struct perf_stat_config *config,
|
2023-02-19 01:28:45 -08:00
|
|
|
struct evsel *counter, int aggr_idx,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct outstate *os)
|
2019-04-12 21:59:48 +08:00
|
|
|
{
|
|
|
|
FILE *output = config->output;
|
|
|
|
u64 ena, run, val;
|
|
|
|
double uval;
|
2022-10-17 19:02:25 -07:00
|
|
|
struct perf_stat_evsel *ps = counter->stats;
|
2023-02-19 01:28:45 -08:00
|
|
|
struct perf_stat_aggr *aggr = &ps->aggr[aggr_idx];
|
|
|
|
struct aggr_cpu_id id = config->aggr_map->map[aggr_idx];
|
2022-10-17 19:02:25 -07:00
|
|
|
double avg = aggr->counts.val;
|
2022-11-23 10:01:59 -08:00
|
|
|
bool metric_only = config->metric_only;
|
2022-11-23 10:02:05 -08:00
|
|
|
|
|
|
|
os->id = id;
|
2023-02-19 01:28:45 -08:00
|
|
|
os->aggr_nr = aggr->nr;
|
2022-11-23 10:02:05 -08:00
|
|
|
os->evsel = counter;
|
2019-04-12 21:59:48 +08:00
|
|
|
|
2022-12-06 09:58:04 -08:00
|
|
|
/* Skip already merged uncore/hybrid events */
|
2025-05-13 14:45:02 -07:00
|
|
|
if (config->aggr_mode != AGGR_NONE) {
|
|
|
|
if (evsel__is_hybrid(counter)) {
|
|
|
|
if (config->hybrid_merge && counter->first_wildcard_match != NULL)
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
if (counter->first_wildcard_match != NULL)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-04-12 21:59:48 +08:00
|
|
|
|
2022-10-17 19:02:25 -07:00
|
|
|
val = aggr->counts.val;
|
|
|
|
ena = aggr->counts.ena;
|
|
|
|
run = aggr->counts.run;
|
perf stat: Filter out unmatched aggregation for hybrid event
perf-stat has supported some aggregation modes, such as --per-core,
--per-socket and etc. While for hybrid event, it may only available
on part of cpus. So for --per-core, we need to filter out the
unavailable cores, for --per-socket, filter out the unavailable
sockets, and so on.
Before:
# perf stat --per-core -e cpu_core/cycles/ -a -- sleep 1
Performance counter stats for 'system wide':
S0-D0-C0 2 479,530 cpu_core/cycles/
S0-D0-C4 2 175,007 cpu_core/cycles/
S0-D0-C8 2 166,240 cpu_core/cycles/
S0-D0-C12 2 704,673 cpu_core/cycles/
S0-D0-C16 2 865,835 cpu_core/cycles/
S0-D0-C20 2 2,958,461 cpu_core/cycles/
S0-D0-C24 2 163,988 cpu_core/cycles/
S0-D0-C28 2 164,729 cpu_core/cycles/
S0-D0-C32 0 <not counted> cpu_core/cycles/
S0-D0-C33 0 <not counted> cpu_core/cycles/
S0-D0-C34 0 <not counted> cpu_core/cycles/
S0-D0-C35 0 <not counted> cpu_core/cycles/
S0-D0-C36 0 <not counted> cpu_core/cycles/
S0-D0-C37 0 <not counted> cpu_core/cycles/
S0-D0-C38 0 <not counted> cpu_core/cycles/
S0-D0-C39 0 <not counted> cpu_core/cycles/
1.003597211 seconds time elapsed
After:
# perf stat --per-core -e cpu_core/cycles/ -a -- sleep 1
Performance counter stats for 'system wide':
S0-D0-C0 2 210,428 cpu_core/cycles/
S0-D0-C4 2 444,830 cpu_core/cycles/
S0-D0-C8 2 435,241 cpu_core/cycles/
S0-D0-C12 2 423,976 cpu_core/cycles/
S0-D0-C16 2 859,350 cpu_core/cycles/
S0-D0-C20 2 1,559,589 cpu_core/cycles/
S0-D0-C24 2 163,924 cpu_core/cycles/
S0-D0-C28 2 376,610 cpu_core/cycles/
1.003621290 seconds time elapsed
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Co-developed-by: Jiri Olsa <jolsa@redhat.com>
Reviewed-by: Jiri Olsa <jolsa@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210427070139.25256-16-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-04-27 15:01:28 +08:00
|
|
|
|
2025-07-10 16:51:19 -07:00
|
|
|
if (perf_stat__skip_metric_event(counter, ena, run))
|
perf stat: New metricgroup output for the default mode
In the default mode, the current output of the metricgroup include both
events and metrics, which is not necessary and just makes the output
hard to read. Since different ARCHs (even different generations in the
same ARCH) may use different events. The output also vary on different
platforms.
For a metricgroup, only outputting the value of each metric is good
enough.
Add a new field default_metricgroup in evsel to indicate an event of the
default metricgroup. For those events, printout() should print the
metricgroup name rather than each event.
Add perf_stat__skip_metric_event() to skip the evsel in the Default
metricgroup, if it's not running or not the metric event.
Add print_metricgroup_header_t to pass the functions which print the
display name of each metricgroup in the Default metricgroup. Support all
three output methods.
Factor out perf_stat__print_shadow_stats_metricgroup() to print out each
metrics.
On SPR:
Before:
./perf_old stat sleep 1
Performance counter stats for 'sleep 1':
0.54 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 125.445 K/sec
540,970 cycles:u # 0.998 GHz
556,325 instructions:u # 1.03 insn per cycle
123,602 branches:u # 228.018 M/sec
6,889 branch-misses:u # 5.57% of all branches
3,245,820 TOPDOWN.SLOTS:u # 18.4 % tma_backend_bound
# 17.2 % tma_retiring
# 23.1 % tma_bad_speculation
# 41.4 % tma_frontend_bound
564,859 topdown-retiring:u
1,370,999 topdown-fe-bound:u
603,271 topdown-be-bound:u
744,874 topdown-bad-spec:u
12,661 INT_MISC.UOP_DROPPING:u # 23.357 M/sec
1.001798215 seconds time elapsed
0.000193000 seconds user
0.001700000 seconds sys
After:
$ ./perf stat sleep 1
Performance counter stats for 'sleep 1':
0.51 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 /sec
0 cpu-migrations:u # 0.000 /sec
68 page-faults:u # 132.683 K/sec
545,228 cycles:u # 1.064 GHz
555,509 instructions:u # 1.02 insn per cycle
123,574 branches:u # 241.120 M/sec
6,957 branch-misses:u # 5.63% of all branches
TopdownL1 # 17.5 % tma_backend_bound
# 22.6 % tma_bad_speculation
# 42.7 % tma_frontend_bound
# 17.1 % tma_retiring
TopdownL2 # 21.8 % tma_branch_mispredicts
# 11.5 % tma_core_bound
# 13.4 % tma_fetch_bandwidth
# 29.3 % tma_fetch_latency
# 2.7 % tma_heavy_operations
# 14.5 % tma_light_operations
# 0.8 % tma_machine_clears
# 6.1 % tma_memory_bound
1.001712086 seconds time elapsed
0.000151000 seconds user
0.001618000 seconds sys
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: https://lore.kernel.org/r/20230616031420.3751973-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-15 20:14:17 -07:00
|
|
|
return;
|
|
|
|
|
2023-01-25 11:24:31 -08:00
|
|
|
if (val == 0 && should_skip_zero_counter(config, counter, &id))
|
2022-12-06 09:58:04 -08:00
|
|
|
return;
|
|
|
|
|
2022-11-23 10:01:55 -08:00
|
|
|
if (!metric_only) {
|
2024-11-12 16:00:41 +00:00
|
|
|
if (config->json_output) {
|
|
|
|
os->first = true;
|
2022-11-23 10:02:06 -08:00
|
|
|
fputc('{', output);
|
2024-11-12 16:00:41 +00:00
|
|
|
}
|
2024-11-12 16:00:44 +00:00
|
|
|
if (config->interval) {
|
2024-11-12 16:00:41 +00:00
|
|
|
if (config->json_output)
|
2024-11-12 16:00:44 +00:00
|
|
|
json_out(os, "%s", os->timestamp);
|
2024-11-12 16:00:41 +00:00
|
|
|
else
|
2024-11-12 16:00:44 +00:00
|
|
|
fprintf(output, "%s", os->timestamp);
|
2024-11-12 16:00:41 +00:00
|
|
|
} else if (config->summary && config->csv_output &&
|
2024-11-12 16:00:44 +00:00
|
|
|
!config->no_csv_summary)
|
perf stat: Do not align time prefix in CSV output
We don't care about the alignment in the CSV output as it's intended for machine
processing. Let's get rid of it to make the output more compact.
Before:
# perf stat -a --summary -I 1 -x, true
0.001149309,219.20,msec,cpu-clock,219322251,100.00,219.200,CPUs utilized
0.001149309,144,,context-switches,219241902,100.00,656.935,/sec
0.001149309,38,,cpu-migrations,219173705,100.00,173.358,/sec
0.001149309,61,,page-faults,219093635,100.00,278.285,/sec
0.001149309,10679310,,cycles,218746228,100.00,0.049,GHz
0.001149309,6288296,,instructions,218589869,100.00,0.59,insn per cycle
0.001149309,1386904,,branches,218428851,100.00,6.327,M/sec
0.001149309,56863,,branch-misses,218219951,100.00,4.10,of all branches
summary,219.20,msec,cpu-clock,219322251,100.00,20.025,CPUs utilized
summary,144,,context-switches,219241902,100.00,656.935,/sec
summary,38,,cpu-migrations,219173705,100.00,173.358,/sec
summary,61,,page-faults,219093635,100.00,278.285,/sec
summary,10679310,,cycles,218746228,100.00,0.049,GHz
summary,6288296,,instructions,218589869,100.00,0.59,insn per cycle
summary,1386904,,branches,218428851,100.00,6.327,M/sec
summary,56863,,branch-misses,218219951,100.00,4.10,of all branches
After:
0.001148449,224.75,msec,cpu-clock,224870589,100.00,224.747,CPUs utilized
0.001148449,176,,context-switches,224775564,100.00,783.103,/sec
0.001148449,38,,cpu-migrations,224707428,100.00,169.079,/sec
0.001148449,61,,page-faults,224629326,100.00,271.416,/sec
0.001148449,12172071,,cycles,224266368,100.00,0.054,GHz
0.001148449,6901907,,instructions,224108764,100.00,0.57,insn per cycle
0.001148449,1515655,,branches,223946693,100.00,6.744,M/sec
0.001148449,70027,,branch-misses,223735385,100.00,4.62,of all branches
summary,224.75,msec,cpu-clock,224870589,100.00,21.066,CPUs utilized
summary,176,,context-switches,224775564,100.00,783.103,/sec
summary,38,,cpu-migrations,224707428,100.00,169.079,/sec
summary,61,,page-faults,224629326,100.00,271.416,/sec
summary,12172071,,cycles,224266368,100.00,0.054,GHz
summary,6901907,,instructions,224108764,100.00,0.57,insn per cycle
summary,1515655,,branches,223946693,100.00,6.744,M/sec
summary,70027,,branch-misses,223735385,100.00,4.62,of all branches
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-4-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:01:56 -08:00
|
|
|
fprintf(output, "%s%s", "summary", config->csv_sep);
|
2022-11-23 10:01:55 -08:00
|
|
|
}
|
2019-04-12 21:59:48 +08:00
|
|
|
|
|
|
|
uval = val * counter->scale;
|
2022-01-04 22:13:28 -08:00
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
printout(config, os, uval, run, ena, avg, aggr_idx);
|
2022-10-17 19:02:25 -07:00
|
|
|
|
2019-04-12 21:59:48 +08:00
|
|
|
if (!metric_only)
|
|
|
|
fputc('\n', output);
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:25 -08:00
|
|
|
static void print_metric_begin(struct perf_stat_config *config,
|
|
|
|
struct evlist *evlist,
|
2022-11-23 10:02:02 -08:00
|
|
|
struct outstate *os, int aggr_idx)
|
2022-11-14 15:02:25 -08:00
|
|
|
{
|
|
|
|
struct perf_stat_aggr *aggr;
|
|
|
|
struct aggr_cpu_id id;
|
|
|
|
struct evsel *evsel;
|
|
|
|
|
2022-11-23 10:02:06 -08:00
|
|
|
os->first = true;
|
2022-11-14 15:02:25 -08:00
|
|
|
if (!config->metric_only)
|
|
|
|
return;
|
|
|
|
|
2022-11-23 10:02:06 -08:00
|
|
|
if (config->json_output)
|
|
|
|
fputc('{', config->output);
|
2022-11-14 15:02:25 -08:00
|
|
|
|
2024-11-12 16:00:44 +00:00
|
|
|
if (config->interval) {
|
2024-11-12 16:00:41 +00:00
|
|
|
if (config->json_output)
|
2024-11-12 16:00:44 +00:00
|
|
|
json_out(os, "%s", os->timestamp);
|
2024-11-12 16:00:41 +00:00
|
|
|
else
|
2024-11-12 16:00:44 +00:00
|
|
|
fprintf(config->output, "%s", os->timestamp);
|
2024-11-12 16:00:41 +00:00
|
|
|
}
|
2022-11-14 15:02:25 -08:00
|
|
|
evsel = evlist__first(evlist);
|
|
|
|
id = config->aggr_map->map[aggr_idx];
|
|
|
|
aggr = &evsel->stats->aggr[aggr_idx];
|
2024-11-12 16:00:41 +00:00
|
|
|
aggr_printout(config, os, evsel, id, aggr->nr);
|
2022-11-14 15:02:26 -08:00
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
print_cgroup(config, os, os->cgrp ? : evsel->cgrp);
|
2022-11-14 15:02:25 -08:00
|
|
|
}
|
|
|
|
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
static void print_metric_end(struct perf_stat_config *config, struct outstate *os)
|
2022-11-14 15:02:25 -08:00
|
|
|
{
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
FILE *output = config->output;
|
|
|
|
|
2022-11-14 15:02:25 -08:00
|
|
|
if (!config->metric_only)
|
|
|
|
return;
|
|
|
|
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
if (config->json_output) {
|
|
|
|
if (os->first)
|
|
|
|
fputs("\"metric-value\" : \"none\"", output);
|
|
|
|
fputc('}', output);
|
|
|
|
}
|
|
|
|
fputc('\n', output);
|
2022-11-14 15:02:25 -08:00
|
|
|
}
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
static void print_aggr(struct perf_stat_config *config,
|
2019-07-21 13:23:52 +02:00
|
|
|
struct evlist *evlist,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct outstate *os)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *counter;
|
2023-02-19 01:28:45 -08:00
|
|
|
int aggr_idx;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2020-06-05 17:17:40 +08:00
|
|
|
if (!config->aggr_map || !config->aggr_get_id)
|
2018-08-30 08:32:52 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With metric_only everything is on a single line.
|
|
|
|
* Without each counter has its own line.
|
|
|
|
*/
|
2023-02-19 01:28:45 -08:00
|
|
|
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
|
|
|
|
print_metric_begin(config, evlist, os, aggr_idx);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, counter) {
|
2023-02-19 01:28:45 -08:00
|
|
|
print_counter_aggrdata(config, counter, aggr_idx, os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
print_metric_end(config, os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:27 -08:00
|
|
|
static void print_aggr_cgroup(struct perf_stat_config *config,
|
|
|
|
struct evlist *evlist,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct outstate *os)
|
2022-11-14 15:02:27 -08:00
|
|
|
{
|
|
|
|
struct evsel *counter, *evsel;
|
2023-02-19 01:28:45 -08:00
|
|
|
int aggr_idx;
|
2022-11-14 15:02:27 -08:00
|
|
|
|
|
|
|
if (!config->aggr_map || !config->aggr_get_id)
|
|
|
|
return;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2022-11-23 10:02:05 -08:00
|
|
|
if (os->cgrp == evsel->cgrp)
|
2022-11-14 15:02:27 -08:00
|
|
|
continue;
|
|
|
|
|
2022-11-23 10:02:05 -08:00
|
|
|
os->cgrp = evsel->cgrp;
|
2022-11-14 15:02:27 -08:00
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
|
|
|
|
print_metric_begin(config, evlist, os, aggr_idx);
|
2022-11-14 15:02:27 -08:00
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, counter) {
|
2022-11-23 10:02:05 -08:00
|
|
|
if (counter->cgrp != os->cgrp)
|
2022-11-14 15:02:27 -08:00
|
|
|
continue;
|
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
print_counter_aggrdata(config, counter, aggr_idx, os);
|
2022-11-14 15:02:27 -08:00
|
|
|
}
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
print_metric_end(config, os);
|
2022-11-14 15:02:27 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
static void print_counter(struct perf_stat_config *config,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct evsel *counter, struct outstate *os)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
2023-02-19 01:28:45 -08:00
|
|
|
int aggr_idx;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-10-17 19:02:25 -07:00
|
|
|
/* AGGR_THREAD doesn't have config->aggr_get_id */
|
|
|
|
if (!config->aggr_map)
|
|
|
|
return;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
|
|
|
|
print_counter_aggrdata(config, counter, aggr_idx, os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_no_aggr_metric(struct perf_stat_config *config,
|
2019-07-21 13:23:52 +02:00
|
|
|
struct evlist *evlist,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct outstate *os)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
2022-01-04 22:13:51 -08:00
|
|
|
int all_idx;
|
|
|
|
struct perf_cpu cpu;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-03-28 16:26:44 -07:00
|
|
|
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
|
2022-01-04 22:13:28 -08:00
|
|
|
struct evsel *counter;
|
2018-08-30 08:32:52 +02:00
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, counter) {
|
2022-01-04 22:13:28 -08:00
|
|
|
u64 ena, run, val;
|
|
|
|
double uval;
|
2022-10-17 19:02:25 -07:00
|
|
|
struct perf_stat_evsel *ps = counter->stats;
|
2024-02-20 23:07:54 -08:00
|
|
|
int aggr_idx = 0;
|
2022-01-04 22:13:28 -08:00
|
|
|
|
2024-02-20 23:07:54 -08:00
|
|
|
if (!perf_cpu_map__has(evsel__cpus(counter), cpu))
|
2022-01-04 22:13:28 -08:00
|
|
|
continue;
|
|
|
|
|
2024-02-20 23:07:54 -08:00
|
|
|
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
|
|
|
|
if (config->aggr_map->map[aggr_idx].cpu.cpu == cpu.cpu)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-11-23 10:02:05 -08:00
|
|
|
os->evsel = counter;
|
|
|
|
os->id = aggr_cpu_id__cpu(cpu, /*data=*/NULL);
|
2018-08-30 08:32:52 +02:00
|
|
|
if (first) {
|
2023-02-19 01:28:45 -08:00
|
|
|
print_metric_begin(config, evlist, os, aggr_idx);
|
2018-08-30 08:32:52 +02:00
|
|
|
first = false;
|
|
|
|
}
|
2023-02-19 01:28:45 -08:00
|
|
|
val = ps->aggr[aggr_idx].counts.val;
|
|
|
|
ena = ps->aggr[aggr_idx].counts.ena;
|
|
|
|
run = ps->aggr[aggr_idx].counts.run;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
uval = val * counter->scale;
|
2023-02-19 01:28:45 -08:00
|
|
|
printout(config, os, uval, run, ena, 1.0, aggr_idx);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
2022-05-02 21:17:54 -07:00
|
|
|
if (!first)
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
print_metric_end(config, os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:18 -08:00
|
|
|
static void print_metric_headers_std(struct perf_stat_config *config,
|
2022-11-23 10:01:58 -08:00
|
|
|
bool no_indent)
|
2022-11-14 15:02:18 -08:00
|
|
|
{
|
2022-11-23 10:01:58 -08:00
|
|
|
fputc(' ', config->output);
|
2022-11-14 15:02:20 -08:00
|
|
|
|
2022-11-14 15:02:18 -08:00
|
|
|
if (!no_indent) {
|
2022-11-14 15:02:20 -08:00
|
|
|
int len = aggr_header_lens[config->aggr_mode];
|
|
|
|
|
2022-11-14 15:02:26 -08:00
|
|
|
if (nr_cgroups || config->cgroup_list)
|
2022-11-14 15:02:20 -08:00
|
|
|
len += CGROUP_LEN + 1;
|
|
|
|
|
|
|
|
fprintf(config->output, "%*s", len, "");
|
2022-11-14 15:02:18 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_metric_headers_csv(struct perf_stat_config *config,
|
|
|
|
bool no_indent __maybe_unused)
|
|
|
|
{
|
2024-06-27 17:06:04 -07:00
|
|
|
const char *p;
|
|
|
|
|
2022-11-14 15:02:18 -08:00
|
|
|
if (config->interval)
|
2024-06-27 17:06:04 -07:00
|
|
|
fprintf(config->output, "time%s", config->csv_sep);
|
|
|
|
if (config->iostat_run)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p = aggr_header_csv[config->aggr_mode];
|
|
|
|
while (*p) {
|
|
|
|
if (*p == ',')
|
|
|
|
fputs(config->csv_sep, config->output);
|
|
|
|
else
|
|
|
|
fputc(*p, config->output);
|
|
|
|
p++;
|
|
|
|
}
|
2022-11-14 15:02:18 -08:00
|
|
|
}
|
|
|
|
|
2022-11-23 10:02:06 -08:00
|
|
|
static void print_metric_headers_json(struct perf_stat_config *config __maybe_unused,
|
2022-11-14 15:02:18 -08:00
|
|
|
bool no_indent __maybe_unused)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
static void print_metric_headers(struct perf_stat_config *config,
|
2022-11-23 10:01:58 -08:00
|
|
|
struct evlist *evlist, bool no_indent)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *counter;
|
2018-08-30 08:32:52 +02:00
|
|
|
struct outstate os = {
|
|
|
|
.fh = config->output
|
|
|
|
};
|
2022-11-07 13:33:09 -08:00
|
|
|
struct perf_stat_output_ctx out = {
|
|
|
|
.ctx = &os,
|
|
|
|
.print_metric = print_metric_header,
|
2024-11-12 16:00:43 +00:00
|
|
|
.new_line = NULL,
|
2022-11-07 13:33:09 -08:00
|
|
|
.force_header = true,
|
|
|
|
};
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:18 -08:00
|
|
|
if (config->json_output)
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers_json(config, no_indent);
|
2022-11-14 15:02:18 -08:00
|
|
|
else if (config->csv_output)
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers_csv(config, no_indent);
|
2022-11-14 15:02:18 -08:00
|
|
|
else
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers_std(config, no_indent);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2021-04-19 12:41:44 +03:00
|
|
|
if (config->iostat_run)
|
|
|
|
iostat_print_header_prefix(config);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:26 -08:00
|
|
|
if (config->cgroup_list)
|
|
|
|
os.cgrp = evlist__first(evlist)->cgrp;
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
/* Print metrics headers only */
|
|
|
|
evlist__for_each_entry(evlist, counter) {
|
2024-08-02 14:58:00 +08:00
|
|
|
if (!config->iostat_run &&
|
|
|
|
config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
|
2024-05-09 22:13:09 -07:00
|
|
|
continue;
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
os.evsel = counter;
|
2022-11-07 13:33:09 -08:00
|
|
|
|
2025-07-10 16:51:19 -07:00
|
|
|
perf_stat__print_shadow_stats(config, counter, 0, 0, &out);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
2022-11-23 10:02:06 -08:00
|
|
|
|
|
|
|
if (!config->json_output)
|
|
|
|
fputc('\n', config->output);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:00:44 +00:00
|
|
|
static void prepare_timestamp(struct perf_stat_config *config,
|
|
|
|
struct outstate *os, struct timespec *ts)
|
2022-11-14 15:02:19 -08:00
|
|
|
{
|
|
|
|
if (config->iostat_run)
|
|
|
|
return;
|
|
|
|
|
2022-11-23 10:02:06 -08:00
|
|
|
if (config->json_output)
|
2024-11-12 16:00:44 +00:00
|
|
|
scnprintf(os->timestamp, sizeof(os->timestamp), "\"interval\" : %lu.%09lu",
|
2022-11-23 10:02:06 -08:00
|
|
|
(unsigned long) ts->tv_sec, ts->tv_nsec);
|
|
|
|
else if (config->csv_output)
|
2024-11-12 16:00:44 +00:00
|
|
|
scnprintf(os->timestamp, sizeof(os->timestamp), "%lu.%09lu%s",
|
2022-11-23 10:01:57 -08:00
|
|
|
(unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
|
2022-11-14 15:02:19 -08:00
|
|
|
else
|
2024-11-12 16:00:44 +00:00
|
|
|
scnprintf(os->timestamp, sizeof(os->timestamp), "%6lu.%09lu ",
|
2022-11-23 10:01:57 -08:00
|
|
|
(unsigned long) ts->tv_sec, ts->tv_nsec);
|
2022-11-14 15:02:19 -08:00
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
static void print_header_interval_std(struct perf_stat_config *config,
|
|
|
|
struct target *_target __maybe_unused,
|
|
|
|
struct evlist *evlist,
|
|
|
|
int argc __maybe_unused,
|
|
|
|
const char **argv __maybe_unused)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
FILE *output = config->output;
|
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
switch (config->aggr_mode) {
|
|
|
|
case AGGR_NODE:
|
|
|
|
case AGGR_SOCKET:
|
|
|
|
case AGGR_DIE:
|
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will
share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2
cache (for Intel Jacobsville). Currently parsing and building cluster
topology have been supported since [1].
perf stat has already supported aggregation for other topologies like
die or socket, etc. It'll be useful to aggregate per-cluster to find
problems like L3T bandwidth contention.
This patch add support for "--per-cluster" option for per-cluster
aggregation. Also update the docs and related test. The output will
be like:
[root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5
Performance counter stats for 'system wide':
S56-D0-CLS158 4 1,321,521,570 LLC-load
S56-D0-CLS594 4 794,211,453 LLC-load
S56-D0-CLS1030 4 41,623 LLC-load
S56-D0-CLS1466 4 41,646 LLC-load
S56-D0-CLS1902 4 16,863 LLC-load
S56-D0-CLS2338 4 15,721 LLC-load
S56-D0-CLS2774 4 22,671 LLC-load
[...]
On a legacy system without cluster or cluster support, the output will
be look like:
[root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1
Performance counter stats for 'system wide':
S56-D0-CLS0 64 18,011,485 cycles
S7182-D0-CLS0 64 16,548,835 cycles
Note that this patch doesn't mix the cluster information in the outputs
of --per-core to avoid breaking any tools/scripts using it.
Note that perf recently supports "--per-cache" aggregation, but it's not
the same with the cluster although cluster CPUs may share some cache
resources. For example on my machine all clusters within a die share the
same L3 cache:
$ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list
0-31
$ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list
0-3
[1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die")
Tested-by: Jie Zhan <zhanjie9@hisilicon.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Cc: james.clark@arm.com
Cc: 21cnbao@gmail.com
Cc: prime.zeng@hisilicon.com
Cc: Jonathan.Cameron@huawei.com
Cc: fanghao11@huawei.com
Cc: linuxarm@huawei.com
Cc: tim.c.chen@intel.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
2024-02-08 10:40:26 +08:00
|
|
|
case AGGR_CLUSTER:
|
2023-05-17 22:57:42 +05:30
|
|
|
case AGGR_CACHE:
|
2022-11-14 15:02:22 -08:00
|
|
|
case AGGR_CORE:
|
2025-06-27 13:16:41 -07:00
|
|
|
fprintf(output, "#%*s %-*s ctrs",
|
2022-11-14 15:02:22 -08:00
|
|
|
INTERVAL_LEN - 1, "time",
|
|
|
|
aggr_header_lens[config->aggr_mode],
|
|
|
|
aggr_header_std[config->aggr_mode]);
|
|
|
|
break;
|
|
|
|
case AGGR_NONE:
|
|
|
|
fprintf(output, "#%*s %-*s",
|
|
|
|
INTERVAL_LEN - 1, "time",
|
|
|
|
aggr_header_lens[config->aggr_mode],
|
|
|
|
aggr_header_std[config->aggr_mode]);
|
|
|
|
break;
|
|
|
|
case AGGR_THREAD:
|
|
|
|
fprintf(output, "#%*s %*s-%-*s",
|
|
|
|
INTERVAL_LEN - 1, "time",
|
|
|
|
COMM_LEN, "comm", PID_LEN, "pid");
|
|
|
|
break;
|
|
|
|
case AGGR_GLOBAL:
|
|
|
|
default:
|
|
|
|
if (!config->iostat_run)
|
|
|
|
fprintf(output, "#%*s",
|
|
|
|
INTERVAL_LEN - 1, "time");
|
|
|
|
case AGGR_UNSET:
|
|
|
|
case AGGR_MAX:
|
|
|
|
break;
|
|
|
|
}
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
if (config->metric_only)
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers(config, evlist, true);
|
2022-11-14 15:02:22 -08:00
|
|
|
else
|
|
|
|
fprintf(output, " %*s %*s events\n",
|
|
|
|
COUNTS_LEN, "counts", config->unit_width, "unit");
|
|
|
|
}
|
2022-11-14 15:02:20 -08:00
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
static void print_header_std(struct perf_stat_config *config,
|
|
|
|
struct target *_target, struct evlist *evlist,
|
|
|
|
int argc, const char **argv)
|
|
|
|
{
|
|
|
|
FILE *output = config->output;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
fprintf(output, "\n");
|
|
|
|
fprintf(output, " Performance counter stats for ");
|
|
|
|
if (_target->bpf_str)
|
|
|
|
fprintf(output, "\'BPF program(s) %s", _target->bpf_str);
|
|
|
|
else if (_target->system_wide)
|
|
|
|
fprintf(output, "\'system wide");
|
|
|
|
else if (_target->cpu_list)
|
|
|
|
fprintf(output, "\'CPU(s) %s", _target->cpu_list);
|
|
|
|
else if (!target__has_task(_target)) {
|
|
|
|
fprintf(output, "\'%s", argv ? argv[0] : "pipe");
|
|
|
|
for (i = 1; argv && (i < argc); i++)
|
|
|
|
fprintf(output, " %s", argv[i]);
|
|
|
|
} else if (_target->pid)
|
|
|
|
fprintf(output, "process id \'%s", _target->pid);
|
|
|
|
else
|
|
|
|
fprintf(output, "thread id \'%s", _target->tid);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
fprintf(output, "\'");
|
|
|
|
if (config->run_count > 1)
|
|
|
|
fprintf(output, " (%d runs)", config->run_count);
|
|
|
|
fprintf(output, ":\n\n");
|
|
|
|
|
|
|
|
if (config->metric_only)
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers(config, evlist, false);
|
2022-11-14 15:02:22 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_header_csv(struct perf_stat_config *config,
|
|
|
|
struct target *_target __maybe_unused,
|
|
|
|
struct evlist *evlist,
|
|
|
|
int argc __maybe_unused,
|
|
|
|
const char **argv __maybe_unused)
|
|
|
|
{
|
|
|
|
if (config->metric_only)
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers(config, evlist, true);
|
2022-11-14 15:02:22 -08:00
|
|
|
}
|
|
|
|
static void print_header_json(struct perf_stat_config *config,
|
|
|
|
struct target *_target __maybe_unused,
|
|
|
|
struct evlist *evlist,
|
|
|
|
int argc __maybe_unused,
|
|
|
|
const char **argv __maybe_unused)
|
|
|
|
{
|
|
|
|
if (config->metric_only)
|
2022-11-23 10:01:58 -08:00
|
|
|
print_metric_headers(config, evlist, true);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_header(struct perf_stat_config *config,
|
|
|
|
struct target *_target,
|
2022-11-14 15:02:22 -08:00
|
|
|
struct evlist *evlist,
|
2018-08-30 08:32:52 +02:00
|
|
|
int argc, const char **argv)
|
|
|
|
{
|
2022-11-14 15:02:22 -08:00
|
|
|
static int num_print_iv;
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
fflush(stdout);
|
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
if (config->interval_clear)
|
|
|
|
puts(CONSOLE_CLEAR);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
if (num_print_iv == 0 || config->interval_clear) {
|
|
|
|
if (config->json_output)
|
|
|
|
print_header_json(config, _target, evlist, argc, argv);
|
|
|
|
else if (config->csv_output)
|
|
|
|
print_header_csv(config, _target, evlist, argc, argv);
|
|
|
|
else if (config->interval)
|
|
|
|
print_header_interval_std(config, _target, evlist, argc, argv);
|
|
|
|
else
|
|
|
|
print_header_std(config, _target, evlist, argc, argv);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
2022-11-14 15:02:22 -08:00
|
|
|
|
|
|
|
if (num_print_iv++ == 25)
|
|
|
|
num_print_iv = 0;
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_precision(double num)
|
|
|
|
{
|
|
|
|
if (num > 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return lround(ceil(-log10(num)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_table(struct perf_stat_config *config,
|
|
|
|
FILE *output, int precision, double avg)
|
|
|
|
{
|
|
|
|
char tmp[64];
|
|
|
|
int idx, indent = 0;
|
|
|
|
|
|
|
|
scnprintf(tmp, 64, " %17.*f", precision, avg);
|
|
|
|
while (tmp[indent] == ' ')
|
|
|
|
indent++;
|
|
|
|
|
|
|
|
fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
|
|
|
|
|
|
|
|
for (idx = 0; idx < config->run_count; idx++) {
|
|
|
|
double run = (double) config->walltime_run[idx] / NSEC_PER_SEC;
|
|
|
|
int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
|
|
|
|
|
|
|
|
fprintf(output, " %17.*f (%+.*f) ",
|
|
|
|
precision, run, precision, run - avg);
|
|
|
|
|
|
|
|
for (h = 0; h < n; h++)
|
|
|
|
fprintf(output, "#");
|
|
|
|
|
|
|
|
fprintf(output, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(output, "\n%*s# Final result:\n", indent, "");
|
|
|
|
}
|
|
|
|
|
|
|
|
static double timeval2double(struct timeval *t)
|
|
|
|
{
|
|
|
|
return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_footer(struct perf_stat_config *config)
|
|
|
|
{
|
|
|
|
double avg = avg_stats(config->walltime_nsecs_stats) / NSEC_PER_SEC;
|
|
|
|
FILE *output = config->output;
|
|
|
|
|
2022-11-14 15:02:23 -08:00
|
|
|
if (config->interval || config->csv_output || config->json_output)
|
|
|
|
return;
|
|
|
|
|
2018-08-30 08:32:52 +02:00
|
|
|
if (!config->null_run)
|
|
|
|
fprintf(output, "\n");
|
|
|
|
|
|
|
|
if (config->run_count == 1) {
|
|
|
|
fprintf(output, " %17.9f seconds time elapsed", avg);
|
|
|
|
|
|
|
|
if (config->ru_display) {
|
|
|
|
double ru_utime = timeval2double(&config->ru_data.ru_utime);
|
|
|
|
double ru_stime = timeval2double(&config->ru_data.ru_stime);
|
|
|
|
|
|
|
|
fprintf(output, "\n\n");
|
|
|
|
fprintf(output, " %17.9f seconds user\n", ru_utime);
|
|
|
|
fprintf(output, " %17.9f seconds sys\n", ru_stime);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
double sd = stddev_stats(config->walltime_nsecs_stats) / NSEC_PER_SEC;
|
|
|
|
/*
|
|
|
|
* Display at most 2 more significant
|
|
|
|
* digits than the stddev inaccuracy.
|
|
|
|
*/
|
|
|
|
int precision = get_precision(sd) + 2;
|
|
|
|
|
|
|
|
if (config->walltime_run_table)
|
|
|
|
print_table(config, output, precision, avg);
|
|
|
|
|
|
|
|
fprintf(output, " %17.*f +- %.*f seconds time elapsed",
|
|
|
|
precision, avg, precision, sd);
|
|
|
|
|
2024-11-12 16:00:41 +00:00
|
|
|
print_noise_pct(config, NULL, sd, avg, /*before_metric=*/false);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
fprintf(output, "\n\n");
|
|
|
|
|
2020-02-24 13:59:22 -08:00
|
|
|
if (config->print_free_counters_hint && sysctl__nmi_watchdog_enabled())
|
2018-08-30 08:32:52 +02:00
|
|
|
fprintf(output,
|
|
|
|
"Some events weren't counted. Try disabling the NMI watchdog:\n"
|
|
|
|
" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
|
|
|
|
" perf stat ...\n"
|
|
|
|
" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
|
|
|
|
}
|
|
|
|
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
static void print_percore(struct perf_stat_config *config,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct evsel *counter, struct outstate *os)
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
{
|
|
|
|
bool metric_only = config->metric_only;
|
|
|
|
FILE *output = config->output;
|
2022-10-17 19:02:26 -07:00
|
|
|
struct cpu_aggr_map *core_map;
|
2023-02-19 01:28:45 -08:00
|
|
|
int aggr_idx, core_map_len = 0;
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
|
2020-06-05 17:17:40 +08:00
|
|
|
if (!config->aggr_map || !config->aggr_get_id)
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
return;
|
|
|
|
|
perf stat: Show percore counts in per CPU output
We have supported the event modifier "percore" which sums up the event
counts for all hardware threads in a core and show the counts per core.
For example,
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A -- sleep 1
Performance counter stats for 'system wide':
S0-D0-C0 395,072 cpu/event=cpu-cycles,percore/
S0-D0-C1 851,248 cpu/event=cpu-cycles,percore/
S0-D0-C2 954,226 cpu/event=cpu-cycles,percore/
S0-D0-C3 1,233,659 cpu/event=cpu-cycles,percore/
This patch provides a new option "--percore-show-thread". It is used
with event modifier "percore" together to sum up the event counts for
all hardware threads in a core but show the counts per hardware thread.
This is essentially a replacement for the any bit (which is gone in
Icelake). Per core counts are useful for some formulas, e.g. CoreIPC.
The original percore version was inconvenient to post process. This
variant matches the output of the any bit.
With this patch, for example,
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A --percore-show-thread -- sleep 1
Performance counter stats for 'system wide':
CPU0 2,453,061 cpu/event=cpu-cycles,percore/
CPU1 1,823,921 cpu/event=cpu-cycles,percore/
CPU2 1,383,166 cpu/event=cpu-cycles,percore/
CPU3 1,102,652 cpu/event=cpu-cycles,percore/
CPU4 2,453,061 cpu/event=cpu-cycles,percore/
CPU5 1,823,921 cpu/event=cpu-cycles,percore/
CPU6 1,383,166 cpu/event=cpu-cycles,percore/
CPU7 1,102,652 cpu/event=cpu-cycles,percore/
We can see counts are duplicated in CPU pairs (CPU0/CPU4, CPU1/CPU5,
CPU2/CPU6, CPU3/CPU7).
The interval mode also works. For example,
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A --percore-show-thread -I 1000
# time CPU counts unit events
1.000425421 CPU0 925,032 cpu/event=cpu-cycles,percore/
1.000425421 CPU1 430,202 cpu/event=cpu-cycles,percore/
1.000425421 CPU2 436,843 cpu/event=cpu-cycles,percore/
1.000425421 CPU3 1,192,504 cpu/event=cpu-cycles,percore/
1.000425421 CPU4 925,032 cpu/event=cpu-cycles,percore/
1.000425421 CPU5 430,202 cpu/event=cpu-cycles,percore/
1.000425421 CPU6 436,843 cpu/event=cpu-cycles,percore/
1.000425421 CPU7 1,192,504 cpu/event=cpu-cycles,percore/
If we offline CPU5, the result is:
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A --percore-show-thread -- sleep 1
Performance counter stats for 'system wide':
CPU0 2,752,148 cpu/event=cpu-cycles,percore/
CPU1 1,009,312 cpu/event=cpu-cycles,percore/
CPU2 2,784,072 cpu/event=cpu-cycles,percore/
CPU3 2,427,922 cpu/event=cpu-cycles,percore/
CPU4 2,752,148 cpu/event=cpu-cycles,percore/
CPU6 2,784,072 cpu/event=cpu-cycles,percore/
CPU7 2,427,922 cpu/event=cpu-cycles,percore/
1.001416041 seconds time elapsed
v4:
---
Ravi Bangoria reports an issue in v3. Once we offline a CPU,
the output is not correct. The issue is we should use the cpu
idx in print_percore_thread rather than using the cpu value.
v3:
---
1. Fix the interval mode output error
2. Use cpu value (not cpu index) in config->aggr_get_id().
3. Refine the code according to Jiri's comments.
v2:
---
Add the explanation in change log. This is essentially a replacement
for the any bit. No code change.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20200214080452.26402-1-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-14 16:04:52 +08:00
|
|
|
if (config->percore_show_thread)
|
2022-11-23 10:02:05 -08:00
|
|
|
return print_counter(config, counter, os);
|
perf stat: Show percore counts in per CPU output
We have supported the event modifier "percore" which sums up the event
counts for all hardware threads in a core and show the counts per core.
For example,
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A -- sleep 1
Performance counter stats for 'system wide':
S0-D0-C0 395,072 cpu/event=cpu-cycles,percore/
S0-D0-C1 851,248 cpu/event=cpu-cycles,percore/
S0-D0-C2 954,226 cpu/event=cpu-cycles,percore/
S0-D0-C3 1,233,659 cpu/event=cpu-cycles,percore/
This patch provides a new option "--percore-show-thread". It is used
with event modifier "percore" together to sum up the event counts for
all hardware threads in a core but show the counts per hardware thread.
This is essentially a replacement for the any bit (which is gone in
Icelake). Per core counts are useful for some formulas, e.g. CoreIPC.
The original percore version was inconvenient to post process. This
variant matches the output of the any bit.
With this patch, for example,
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A --percore-show-thread -- sleep 1
Performance counter stats for 'system wide':
CPU0 2,453,061 cpu/event=cpu-cycles,percore/
CPU1 1,823,921 cpu/event=cpu-cycles,percore/
CPU2 1,383,166 cpu/event=cpu-cycles,percore/
CPU3 1,102,652 cpu/event=cpu-cycles,percore/
CPU4 2,453,061 cpu/event=cpu-cycles,percore/
CPU5 1,823,921 cpu/event=cpu-cycles,percore/
CPU6 1,383,166 cpu/event=cpu-cycles,percore/
CPU7 1,102,652 cpu/event=cpu-cycles,percore/
We can see counts are duplicated in CPU pairs (CPU0/CPU4, CPU1/CPU5,
CPU2/CPU6, CPU3/CPU7).
The interval mode also works. For example,
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A --percore-show-thread -I 1000
# time CPU counts unit events
1.000425421 CPU0 925,032 cpu/event=cpu-cycles,percore/
1.000425421 CPU1 430,202 cpu/event=cpu-cycles,percore/
1.000425421 CPU2 436,843 cpu/event=cpu-cycles,percore/
1.000425421 CPU3 1,192,504 cpu/event=cpu-cycles,percore/
1.000425421 CPU4 925,032 cpu/event=cpu-cycles,percore/
1.000425421 CPU5 430,202 cpu/event=cpu-cycles,percore/
1.000425421 CPU6 436,843 cpu/event=cpu-cycles,percore/
1.000425421 CPU7 1,192,504 cpu/event=cpu-cycles,percore/
If we offline CPU5, the result is:
# perf stat -e cpu/event=cpu-cycles,percore/ -a -A --percore-show-thread -- sleep 1
Performance counter stats for 'system wide':
CPU0 2,752,148 cpu/event=cpu-cycles,percore/
CPU1 1,009,312 cpu/event=cpu-cycles,percore/
CPU2 2,784,072 cpu/event=cpu-cycles,percore/
CPU3 2,427,922 cpu/event=cpu-cycles,percore/
CPU4 2,752,148 cpu/event=cpu-cycles,percore/
CPU6 2,784,072 cpu/event=cpu-cycles,percore/
CPU7 2,427,922 cpu/event=cpu-cycles,percore/
1.001416041 seconds time elapsed
v4:
---
Ravi Bangoria reports an issue in v3. Once we offline a CPU,
the output is not correct. The issue is we should use the cpu
idx in print_percore_thread rather than using the cpu value.
v3:
---
1. Fix the interval mode output error
2. Use cpu value (not cpu index) in config->aggr_get_id().
3. Refine the code according to Jiri's comments.
v2:
---
Add the explanation in change log. This is essentially a replacement
for the any bit. No code change.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20200214080452.26402-1-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-14 16:04:52 +08:00
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
/*
|
|
|
|
* core_map will hold the aggr_cpu_id for the cores that have been
|
|
|
|
* printed so that each core is printed just once.
|
|
|
|
*/
|
2022-10-17 19:02:26 -07:00
|
|
|
core_map = cpu_aggr_map__empty_new(config->aggr_map->nr);
|
|
|
|
if (core_map == NULL) {
|
|
|
|
fprintf(output, "Cannot allocate per-core aggr map for display\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
|
|
|
|
struct perf_cpu curr_cpu = config->aggr_map->map[aggr_idx].cpu;
|
2022-10-17 19:02:26 -07:00
|
|
|
struct aggr_cpu_id core_id = aggr_cpu_id__core(curr_cpu, NULL);
|
|
|
|
bool found = false;
|
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
for (int i = 0; i < core_map_len; i++) {
|
2022-10-17 19:02:26 -07:00
|
|
|
if (aggr_cpu_id__equal(&core_map->map[i], &core_id)) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found)
|
|
|
|
continue;
|
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
print_counter_aggrdata(config, counter, aggr_idx, os);
|
2022-10-17 19:02:26 -07:00
|
|
|
|
2023-02-19 01:28:45 -08:00
|
|
|
core_map->map[core_map_len++] = core_id;
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
}
|
2022-10-17 19:02:26 -07:00
|
|
|
free(core_map);
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
|
|
|
|
if (metric_only)
|
|
|
|
fputc('\n', output);
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:26 -08:00
|
|
|
static void print_cgroup_counter(struct perf_stat_config *config, struct evlist *evlist,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct outstate *os)
|
2022-11-14 15:02:26 -08:00
|
|
|
{
|
|
|
|
struct evsel *counter;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, counter) {
|
2022-11-23 10:02:05 -08:00
|
|
|
if (os->cgrp != counter->cgrp) {
|
|
|
|
if (os->cgrp != NULL)
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
print_metric_end(config, os);
|
2022-11-14 15:02:26 -08:00
|
|
|
|
2022-11-23 10:02:05 -08:00
|
|
|
os->cgrp = counter->cgrp;
|
|
|
|
print_metric_begin(config, evlist, os, /*aggr_idx=*/0);
|
2022-11-14 15:02:26 -08:00
|
|
|
}
|
|
|
|
|
2022-11-23 10:02:05 -08:00
|
|
|
print_counter(config, counter, os);
|
2022-11-14 15:02:26 -08:00
|
|
|
}
|
2022-11-23 10:02:05 -08:00
|
|
|
if (os->cgrp)
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
print_metric_end(config, os);
|
2022-11-14 15:02:26 -08:00
|
|
|
}
|
|
|
|
|
2020-11-30 14:55:12 -03:00
|
|
|
void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
|
2022-11-23 10:02:05 -08:00
|
|
|
struct target *_target, struct timespec *ts,
|
|
|
|
int argc, const char **argv)
|
2018-08-30 08:32:52 +02:00
|
|
|
{
|
|
|
|
bool metric_only = config->metric_only;
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *counter;
|
2022-11-23 10:02:01 -08:00
|
|
|
struct outstate os = {
|
|
|
|
.fh = config->output,
|
2022-11-23 10:02:06 -08:00
|
|
|
.first = true,
|
2022-11-23 10:02:01 -08:00
|
|
|
};
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2025-05-13 14:45:01 -07:00
|
|
|
evlist__uniquify_evsel_names(evlist, config);
|
2024-09-26 15:48:33 +01:00
|
|
|
|
2021-04-19 12:41:44 +03:00
|
|
|
if (config->iostat_run)
|
|
|
|
evlist->selected = evlist__first(evlist);
|
|
|
|
|
2024-11-12 16:00:44 +00:00
|
|
|
if (config->interval)
|
|
|
|
prepare_timestamp(config, &os, ts);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
2022-11-14 15:02:22 -08:00
|
|
|
print_header(config, _target, evlist, argc, argv);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
switch (config->aggr_mode) {
|
|
|
|
case AGGR_CORE:
|
2023-05-17 22:57:42 +05:30
|
|
|
case AGGR_CACHE:
|
perf stat: Support per-cluster aggregation
Some platforms have 'cluster' topology and CPUs in the cluster will
share resources like L3 Cache Tag (for HiSilicon Kunpeng SoC) or L2
cache (for Intel Jacobsville). Currently parsing and building cluster
topology have been supported since [1].
perf stat has already supported aggregation for other topologies like
die or socket, etc. It'll be useful to aggregate per-cluster to find
problems like L3T bandwidth contention.
This patch add support for "--per-cluster" option for per-cluster
aggregation. Also update the docs and related test. The output will
be like:
[root@localhost tmp]# perf stat -a -e LLC-load --per-cluster -- sleep 5
Performance counter stats for 'system wide':
S56-D0-CLS158 4 1,321,521,570 LLC-load
S56-D0-CLS594 4 794,211,453 LLC-load
S56-D0-CLS1030 4 41,623 LLC-load
S56-D0-CLS1466 4 41,646 LLC-load
S56-D0-CLS1902 4 16,863 LLC-load
S56-D0-CLS2338 4 15,721 LLC-load
S56-D0-CLS2774 4 22,671 LLC-load
[...]
On a legacy system without cluster or cluster support, the output will
be look like:
[root@localhost perf]# perf stat -a -e cycles --per-cluster -- sleep 1
Performance counter stats for 'system wide':
S56-D0-CLS0 64 18,011,485 cycles
S7182-D0-CLS0 64 16,548,835 cycles
Note that this patch doesn't mix the cluster information in the outputs
of --per-core to avoid breaking any tools/scripts using it.
Note that perf recently supports "--per-cache" aggregation, but it's not
the same with the cluster although cluster CPUs may share some cache
resources. For example on my machine all clusters within a die share the
same L3 cache:
$ cat /sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list
0-31
$ cat /sys/devices/system/cpu/cpu0/topology/cluster_cpus_list
0-3
[1] commit c5e22feffdd7 ("topology: Represent clusters of CPUs within a die")
Tested-by: Jie Zhan <zhanjie9@hisilicon.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Cc: james.clark@arm.com
Cc: 21cnbao@gmail.com
Cc: prime.zeng@hisilicon.com
Cc: Jonathan.Cameron@huawei.com
Cc: fanghao11@huawei.com
Cc: linuxarm@huawei.com
Cc: tim.c.chen@intel.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240208024026.2691-1-yangyicong@huawei.com
2024-02-08 10:40:26 +08:00
|
|
|
case AGGR_CLUSTER:
|
2019-06-04 15:50:42 -07:00
|
|
|
case AGGR_DIE:
|
2018-08-30 08:32:52 +02:00
|
|
|
case AGGR_SOCKET:
|
perf stat: Add --per-node agregation support
Adding new --per-node option to aggregate counts per NUMA
nodes for system-wide mode measurements.
You can specify --per-node in live mode:
# perf stat -a -I 1000 -e cycles --per-node
# time node cpus counts unit events
1.000542550 N0 20 6,202,097 cycles
1.000542550 N1 20 639,559 cycles
2.002040063 N0 20 7,412,495 cycles
2.002040063 N1 20 2,185,577 cycles
3.003451699 N0 20 6,508,917 cycles
3.003451699 N1 20 765,607 cycles
...
Or in the record/report stat session:
# perf stat record -a -I 1000 -e cycles
# time counts unit events
1.000536937 10,008,468 cycles
2.002090152 9,578,539 cycles
3.003625233 7,647,869 cycles
4.005135036 7,032,086 cycles
^C 4.340902364 3,923,893 cycles
# perf stat report --per-node
# time node cpus counts unit events
1.000536937 N0 20 9,355,086 cycles
1.000536937 N1 20 653,382 cycles
2.002090152 N0 20 7,712,838 cycles
2.002090152 N1 20 1,865,701 cycles
3.003625233 N0 20 6,604,441 cycles
3.003625233 N1 20 1,043,428 cycles
4.005135036 N0 20 6,350,522 cycles
4.005135036 N1 20 681,564 cycles
4.340902364 N0 20 3,403,188 cycles
4.340902364 N1 20 520,705 cycles
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Joe Mario <jmario@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20190904073415.723-4-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-28 10:17:43 +02:00
|
|
|
case AGGR_NODE:
|
2022-11-14 15:02:27 -08:00
|
|
|
if (config->cgroup_list)
|
2022-11-23 10:02:05 -08:00
|
|
|
print_aggr_cgroup(config, evlist, &os);
|
2022-11-14 15:02:27 -08:00
|
|
|
else
|
2022-11-23 10:02:05 -08:00
|
|
|
print_aggr(config, evlist, &os);
|
2018-08-30 08:32:52 +02:00
|
|
|
break;
|
|
|
|
case AGGR_THREAD:
|
|
|
|
case AGGR_GLOBAL:
|
2022-11-14 15:02:26 -08:00
|
|
|
if (config->iostat_run) {
|
2024-11-12 16:00:44 +00:00
|
|
|
iostat_print_counters(evlist, config, ts, os.timestamp,
|
2022-11-23 10:02:05 -08:00
|
|
|
(iostat_print_counter_t)print_counter, &os);
|
2022-11-14 15:02:26 -08:00
|
|
|
} else if (config->cgroup_list) {
|
2022-11-23 10:02:05 -08:00
|
|
|
print_cgroup_counter(config, evlist, &os);
|
2022-11-14 15:02:26 -08:00
|
|
|
} else {
|
2022-11-23 10:02:02 -08:00
|
|
|
print_metric_begin(config, evlist, &os, /*aggr_idx=*/0);
|
2021-04-19 12:41:44 +03:00
|
|
|
evlist__for_each_entry(evlist, counter) {
|
2022-11-23 10:02:05 -08:00
|
|
|
print_counter(config, counter, &os);
|
2021-04-19 12:41:44 +03:00
|
|
|
}
|
perf stat: Tidy up JSON metric-only output when no metrics
It printed empty strings for each metric. I guess it's needed for CSV
output to match the column number. We could just ignore the empty
metrics in JSON but it ended up with a broken JSON object with a
trailing comma.
So I added a dummy '"metric-value" : "none"' part. To do that, it
needs to pass struct outstate to print_metric_end() to check if any
metric value is printed or not.
Before:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : "", "" : ""}
After:
# perf stat -aj --metric-only --per-socket --for-each-cgroup system.slice true
{"socket" : "S0", "cpu-count" : 8, "cgroup" : "system.slice", "metric-value" : "none"}
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221123180208.2068936-16-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-11-23 10:02:08 -08:00
|
|
|
print_metric_end(config, &os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AGGR_NONE:
|
|
|
|
if (metric_only)
|
2022-11-23 10:02:05 -08:00
|
|
|
print_no_aggr_metric(config, evlist, &os);
|
2018-08-30 08:32:52 +02:00
|
|
|
else {
|
|
|
|
evlist__for_each_entry(evlist, counter) {
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
if (counter->percore)
|
2022-11-23 10:02:05 -08:00
|
|
|
print_percore(config, counter, &os);
|
perf stat: Support 'percore' event qualifier
With this patch, we can use the 'percore' event qualifier in perf-stat.
root@skl:/tmp# perf stat -e cpu/event=0,umask=0x3,percore=1/,cpu/event=0,umask=0x3/ -a -A -I1000
1.000773050 S0-C0 98,352,832 cpu/event=0,umask=0x3,percore=1/ (50.01%)
1.000773050 S0-C1 103,763,057 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C2 196,776,995 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 S0-C3 176,493,779 cpu/event=0,umask=0x3,percore=1/ (50.02%)
1.000773050 CPU0 47,699,641 cpu/event=0,umask=0x3/ (50.02%)
1.000773050 CPU1 49,052,451 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU2 102,771,422 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU3 100,784,662 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU4 43,171,342 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU5 54,152,158 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU6 93,618,410 cpu/event=0,umask=0x3/ (49.98%)
1.000773050 CPU7 74,477,589 cpu/event=0,umask=0x3/ (49.99%)
In this example, we count the event 'ref-cycles' per-core and per-CPU in
one perf stat command-line. From the output, we can see:
S0-C0 = CPU0 + CPU4
S0-C1 = CPU1 + CPU5
S0-C2 = CPU2 + CPU6
S0-C3 = CPU3 + CPU7
So the result is expected (tiny difference is ignored).
Note that, the 'percore' event qualifier needs to use with option '-A'.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1555077590-27664-4-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-04-12 21:59:49 +08:00
|
|
|
else
|
2022-11-23 10:02:05 -08:00
|
|
|
print_counter(config, counter, &os);
|
2018-08-30 08:32:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
perf stat: Add JSON output option
CSV output is tricky to format and column layout changes are susceptible
to breaking parsers. New JSON-formatted output has variable names to
identify fields that are consistent and informative, making the output
parseable.
CSV output example:
1.20,msec,task-clock:u,1204272,100.00,0.697,CPUs utilized
0,,context-switches:u,1204272,100.00,0.000,/sec
0,,cpu-migrations:u,1204272,100.00,0.000,/sec
70,,page-faults:u,1204272,100.00,58.126,K/sec
JSON output example:
{"counter-value" : "3805.723968", "unit" : "msec", "event" :
"cpu-clock", "event-runtime" : 3805731510100.00, "pcnt-running"
: 100.00, "metric-value" : 4.007571, "metric-unit" : "CPUs utilized"}
{"counter-value" : "6166.000000", "unit" : "", "event" :
"context-switches", "event-runtime" : 3805723045100.00, "pcnt-running"
: 100.00, "metric-value" : 1.620191, "metric-unit" : "K/sec"}
{"counter-value" : "466.000000", "unit" : "", "event" :
"cpu-migrations", "event-runtime" : 3805727613100.00, "pcnt-running"
: 100.00, "metric-value" : 122.447136, "metric-unit" : "/sec"}
{"counter-value" : "208.000000", "unit" : "", "event" :
"page-faults", "event-runtime" : 3805726799100.00, "pcnt-running"
: 100.00, "metric-value" : 54.654516, "metric-unit" : "/sec"}
Also added documentation for JSON option.
There is some tidy up of CSV code including a potential memory over run
in the os.nfields set up. To facilitate this an AGGR_MAX value is added.
Committer notes:
Fixed up using PRIu64 to format u64 values, not %lu.
Committer testing:
⬢[acme@toolbox perf]$ perf stat -j sleep 1
{"counter-value" : "0.731750", "unit" : "msec", "event" : "task-clock:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000731, "metric-unit" : "CPUs utilized"}
{"counter-value" : "0.000000", "unit" : "", "event" : "context-switches:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "0.000000", "unit" : "", "event" : "cpu-migrations:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 0.000000, "metric-unit" : "/sec"}
{"counter-value" : "75.000000", "unit" : "", "event" : "page-faults:u", "event-runtime" : 731750, "pcnt-running" : 100.00, "metric-value" : 102.494021, "metric-unit" : "K/sec"}
{"counter-value" : "578765.000000", "unit" : "", "event" : "cycles:u", "event-runtime" : 379366, "pcnt-running" : 49.00, "metric-value" : 0.790933, "metric-unit" : "GHz"}
{"counter-value" : "1298.000000", "unit" : "", "event" : "stalled-cycles-frontend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.224271, "metric-unit" : "frontend cycles idle"}
{"counter-value" : "21984.000000", "unit" : "", "event" : "stalled-cycles-backend:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 3.798433, "metric-unit" : "backend cycles idle"}
{"counter-value" : "468197.000000", "unit" : "", "event" : "instructions:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 0.808959, "metric-unit" : "insn per cycle"}
{"metric-value" : 0.046955, "metric-unit" : "stalled cycles per insn"}
{"counter-value" : "103335.000000", "unit" : "", "event" : "branches:u", "event-runtime" : 768020, "pcnt-running" : 100.00, "metric-value" : 141.216262, "metric-unit" : "M/sec"}
{"counter-value" : "2381.000000", "unit" : "", "event" : "branch-misses:u", "event-runtime" : 388654, "pcnt-running" : 50.00, "metric-value" : 2.304156, "metric-unit" : "of all branches"}
⬢[acme@toolbox perf]$
Signed-off-by: Claire Jensen <cjense@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alyssa Ross <hi@alyssa.is>
Cc: Claire Jensen <clairej735@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Like Xu <likexu@tencent.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220805200105.2020995-2-irogers@google.com
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-05 13:01:04 -07:00
|
|
|
case AGGR_MAX:
|
2018-08-30 08:32:52 +02:00
|
|
|
case AGGR_UNSET:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-11-14 15:02:23 -08:00
|
|
|
print_footer(config);
|
2018-08-30 08:32:52 +02:00
|
|
|
|
|
|
|
fflush(config->output);
|
|
|
|
}
|