2019-05-29 07:18:02 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-07-17 19:33:41 +03:00
|
|
|
/*
|
|
|
|
* intel_pt.c: Intel Processor Trace support
|
|
|
|
* Copyright (c) 2013-2015, Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
2017-04-18 10:46:11 -03:00
|
|
|
#include <errno.h>
|
2015-07-17 19:33:41 +03:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/log2.h>
|
2019-07-04 11:32:27 -03:00
|
|
|
#include <linux/zalloc.h>
|
2022-10-03 13:46:46 -07:00
|
|
|
#include <linux/err.h>
|
2015-07-17 19:33:54 +03:00
|
|
|
#include <cpuid.h>
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2020-03-05 23:11:08 -08:00
|
|
|
#include "../../../util/session.h"
|
|
|
|
#include "../../../util/event.h"
|
|
|
|
#include "../../../util/evlist.h"
|
|
|
|
#include "../../../util/evsel.h"
|
|
|
|
#include "../../../util/evsel_config.h"
|
2025-05-12 12:39:31 +03:00
|
|
|
#include "../../../util/config.h"
|
2020-03-05 23:11:08 -08:00
|
|
|
#include "../../../util/cpumap.h"
|
|
|
|
#include "../../../util/mmap.h"
|
2015-12-15 09:39:39 -06:00
|
|
|
#include <subcmd/parse-options.h>
|
2020-03-05 23:11:08 -08:00
|
|
|
#include "../../../util/parse-events.h"
|
2023-05-27 00:22:03 -07:00
|
|
|
#include "../../../util/pmus.h"
|
2020-03-05 23:11:08 -08:00
|
|
|
#include "../../../util/debug.h"
|
|
|
|
#include "../../../util/auxtrace.h"
|
2020-05-05 11:49:08 -03:00
|
|
|
#include "../../../util/perf_api_probe.h"
|
2020-03-05 23:11:08 -08:00
|
|
|
#include "../../../util/record.h"
|
|
|
|
#include "../../../util/target.h"
|
|
|
|
#include "../../../util/tsc.h"
|
2019-08-06 15:25:25 +02:00
|
|
|
#include <internal/lib.h> // page_size
|
2020-03-05 23:11:08 -08:00
|
|
|
#include "../../../util/intel-pt.h"
|
2024-06-25 13:45:32 +03:00
|
|
|
#include <api/fs/fs.h>
|
2015-07-17 19:33:41 +03:00
|
|
|
|
|
|
|
#define KiB(x) ((x) * 1024)
|
|
|
|
#define MiB(x) ((x) * 1024 * 1024)
|
|
|
|
#define KiB_MASK(x) (KiB(x) - 1)
|
|
|
|
#define MiB_MASK(x) (MiB(x) - 1)
|
|
|
|
|
|
|
|
#define INTEL_PT_PSB_PERIOD_NEAR 256
|
|
|
|
|
|
|
|
struct intel_pt_snapshot_ref {
|
|
|
|
void *ref_buf;
|
|
|
|
size_t ref_offset;
|
|
|
|
bool wrapped;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct intel_pt_recording {
|
|
|
|
struct auxtrace_record itr;
|
|
|
|
struct perf_pmu *intel_pt_pmu;
|
|
|
|
int have_sched_switch;
|
2019-07-21 13:23:52 +02:00
|
|
|
struct evlist *evlist;
|
2025-05-12 12:39:31 +03:00
|
|
|
bool all_switch_events;
|
2015-07-17 19:33:41 +03:00
|
|
|
bool snapshot_mode;
|
|
|
|
bool snapshot_init_done;
|
|
|
|
size_t snapshot_size;
|
|
|
|
size_t snapshot_ref_buf_size;
|
|
|
|
int snapshot_ref_cnt;
|
|
|
|
struct intel_pt_snapshot_ref *snapshot_refs;
|
2016-09-23 17:38:45 +03:00
|
|
|
size_t priv_size;
|
2015-07-17 19:33:41 +03:00
|
|
|
};
|
|
|
|
|
2023-10-12 10:56:45 -07:00
|
|
|
static int intel_pt_parse_terms_with_default(const struct perf_pmu *pmu,
|
2015-07-17 19:33:41 +03:00
|
|
|
const char *str,
|
|
|
|
u64 *config)
|
|
|
|
{
|
2023-09-01 16:39:49 -07:00
|
|
|
struct parse_events_terms terms;
|
2015-07-17 19:33:41 +03:00
|
|
|
struct perf_event_attr attr = { .size = 0, };
|
|
|
|
int err;
|
|
|
|
|
2023-09-01 16:39:49 -07:00
|
|
|
parse_events_terms__init(&terms);
|
|
|
|
err = parse_events_terms(&terms, str, /*input=*/ NULL);
|
2015-07-17 19:33:41 +03:00
|
|
|
if (err)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
attr.config = *config;
|
2024-10-01 20:20:05 -07:00
|
|
|
err = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/true, /*apply_hardcoded=*/false,
|
|
|
|
/*err=*/NULL);
|
2015-07-17 19:33:41 +03:00
|
|
|
if (err)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
*config = attr.config;
|
|
|
|
out_free:
|
2023-09-01 16:39:49 -07:00
|
|
|
parse_events_terms__exit(&terms);
|
2015-07-17 19:33:41 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-10-12 10:56:45 -07:00
|
|
|
static int intel_pt_parse_terms(const struct perf_pmu *pmu, const char *str, u64 *config)
|
2015-07-17 19:33:41 +03:00
|
|
|
{
|
|
|
|
*config = 0;
|
2023-08-23 01:08:08 -07:00
|
|
|
return intel_pt_parse_terms_with_default(pmu, str, config);
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:52 +03:00
|
|
|
static u64 intel_pt_masked_bits(u64 mask, u64 bits)
|
2015-07-17 19:33:41 +03:00
|
|
|
{
|
2015-07-17 19:33:52 +03:00
|
|
|
const u64 top_bit = 1ULL << 63;
|
|
|
|
u64 res = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
if (mask & top_bit) {
|
|
|
|
res <<= 1;
|
|
|
|
if (bits & top_bit)
|
|
|
|
res |= 1;
|
|
|
|
}
|
|
|
|
mask <<= 1;
|
|
|
|
bits <<= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
|
2019-07-21 13:23:52 +02:00
|
|
|
struct evlist *evlist, u64 *res)
|
2015-07-17 19:33:52 +03:00
|
|
|
{
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel;
|
2015-07-17 19:33:52 +03:00
|
|
|
u64 mask;
|
|
|
|
|
|
|
|
*res = 0;
|
|
|
|
|
2023-08-23 01:08:10 -07:00
|
|
|
mask = perf_pmu__format_bits(intel_pt_pmu, str);
|
2015-07-17 19:33:52 +03:00
|
|
|
if (!mask)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-06-23 11:26:15 -03:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 13:24:29 +02:00
|
|
|
if (evsel->core.attr.type == intel_pt_pmu->type) {
|
|
|
|
*res = intel_pt_masked_bits(mask, evsel->core.attr.config);
|
2015-07-17 19:33:52 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
|
2019-07-21 13:23:52 +02:00
|
|
|
struct evlist *evlist)
|
2015-07-17 19:33:52 +03:00
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
int err, topa_multiple_entries;
|
|
|
|
size_t psb_period;
|
|
|
|
|
|
|
|
if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
|
|
|
|
"%d", &topa_multiple_entries) != 1)
|
|
|
|
topa_multiple_entries = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use caps/topa_multiple_entries to indicate early hardware that had
|
|
|
|
* extra frequent PSBs.
|
|
|
|
*/
|
|
|
|
if (!topa_multiple_entries) {
|
|
|
|
psb_period = 256;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
|
|
|
|
if (err)
|
|
|
|
val = 0;
|
|
|
|
|
|
|
|
psb_period = 1 << (val + 11);
|
|
|
|
out:
|
|
|
|
pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
|
|
|
|
return psb_period;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_pick_bit(int bits, int target)
|
|
|
|
{
|
|
|
|
int pos, pick = -1;
|
|
|
|
|
|
|
|
for (pos = 0; bits; bits >>= 1, pos++) {
|
|
|
|
if (bits & 1) {
|
|
|
|
if (pos <= target || pick < 0)
|
|
|
|
pick = pos;
|
|
|
|
if (pos >= target)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pick;
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
2023-10-12 10:56:45 -07:00
|
|
|
static u64 intel_pt_default_config(const struct perf_pmu *intel_pt_pmu)
|
2015-07-17 19:33:41 +03:00
|
|
|
{
|
2015-07-17 19:33:52 +03:00
|
|
|
char buf[256];
|
2015-07-17 19:33:56 +03:00
|
|
|
int mtc, mtc_periods = 0, mtc_period;
|
2015-07-17 19:33:52 +03:00
|
|
|
int psb_cyc, psb_periods, psb_period;
|
|
|
|
int pos = 0;
|
2015-07-17 19:33:41 +03:00
|
|
|
u64 config;
|
2017-05-26 11:17:12 +03:00
|
|
|
char c;
|
2023-03-31 13:29:49 -07:00
|
|
|
int dirfd;
|
|
|
|
|
|
|
|
dirfd = perf_pmu__event_source_devices_fd();
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2015-07-17 19:33:52 +03:00
|
|
|
pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc", "%d",
|
|
|
|
&mtc) != 1)
|
2015-07-17 19:33:56 +03:00
|
|
|
mtc = 1;
|
|
|
|
|
|
|
|
if (mtc) {
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc_periods", "%x",
|
|
|
|
&mtc_periods) != 1)
|
2015-07-17 19:33:56 +03:00
|
|
|
mtc_periods = 0;
|
|
|
|
if (mtc_periods) {
|
|
|
|
mtc_period = intel_pt_pick_bit(mtc_periods, 3);
|
|
|
|
pos += scnprintf(buf + pos, sizeof(buf) - pos,
|
|
|
|
",mtc,mtc_period=%d", mtc_period);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_cyc", "%d",
|
|
|
|
&psb_cyc) != 1)
|
2015-07-17 19:33:52 +03:00
|
|
|
psb_cyc = 1;
|
|
|
|
|
2015-07-17 19:33:56 +03:00
|
|
|
if (psb_cyc && mtc_periods) {
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_periods", "%x",
|
|
|
|
&psb_periods) != 1)
|
2015-07-17 19:33:52 +03:00
|
|
|
psb_periods = 0;
|
|
|
|
if (psb_periods) {
|
|
|
|
psb_period = intel_pt_pick_bit(psb_periods, 3);
|
|
|
|
pos += scnprintf(buf + pos, sizeof(buf) - pos,
|
|
|
|
",psb_period=%d", psb_period);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
|
|
|
|
perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/branch", "%c", &c) == 1)
|
2017-05-26 11:17:12 +03:00
|
|
|
pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
|
|
|
|
|
2015-07-17 19:33:52 +03:00
|
|
|
pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
|
|
|
|
|
2023-08-23 01:08:08 -07:00
|
|
|
intel_pt_parse_terms(intel_pt_pmu, buf, &config);
|
2015-07-17 19:33:52 +03:00
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
close(dirfd);
|
2015-07-17 19:33:41 +03:00
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
|
|
|
|
struct record_opts *opts,
|
|
|
|
const char *str)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
|
|
|
unsigned long long snapshot_size = 0;
|
|
|
|
char *endptr;
|
|
|
|
|
|
|
|
if (str) {
|
|
|
|
snapshot_size = strtoull(str, &endptr, 0);
|
|
|
|
if (*endptr || snapshot_size > SIZE_MAX)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
opts->auxtrace_snapshot_mode = true;
|
|
|
|
opts->auxtrace_snapshot_size = snapshot_size;
|
|
|
|
|
|
|
|
ptr->snapshot_size = snapshot_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-12 10:56:45 -07:00
|
|
|
void intel_pt_pmu_default_config(const struct perf_pmu *intel_pt_pmu,
|
|
|
|
struct perf_event_attr *attr)
|
2015-07-17 19:33:41 +03:00
|
|
|
{
|
2023-10-12 10:56:45 -07:00
|
|
|
static u64 config;
|
|
|
|
static bool initialized;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2023-10-12 10:56:45 -07:00
|
|
|
if (!initialized) {
|
|
|
|
config = intel_pt_default_config(intel_pt_pmu);
|
|
|
|
initialized = true;
|
|
|
|
}
|
|
|
|
attr->config = config;
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
2019-07-21 13:23:52 +02:00
|
|
|
static const char *intel_pt_find_filter(struct evlist *evlist,
|
2016-09-23 17:38:45 +03:00
|
|
|
struct perf_pmu *intel_pt_pmu)
|
|
|
|
{
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel;
|
2016-09-23 17:38:45 +03:00
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 13:24:29 +02:00
|
|
|
if (evsel->core.attr.type == intel_pt_pmu->type)
|
2016-09-23 17:38:45 +03:00
|
|
|
return evsel->filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t intel_pt_filter_bytes(const char *filter)
|
|
|
|
{
|
|
|
|
size_t len = filter ? strlen(filter) : 0;
|
|
|
|
|
|
|
|
return len ? roundup(len + 1, 8) : 0;
|
|
|
|
}
|
|
|
|
|
2016-01-14 14:46:15 -07:00
|
|
|
static size_t
|
2019-07-21 13:23:52 +02:00
|
|
|
intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
|
2015-07-17 19:33:41 +03:00
|
|
|
{
|
2016-09-23 17:38:45 +03:00
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
|
|
|
const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
|
|
|
|
|
|
|
|
ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
|
|
|
|
intel_pt_filter_bytes(filter);
|
2022-01-24 10:41:49 +02:00
|
|
|
ptr->priv_size += sizeof(u64); /* Cap Event Trace */
|
2016-09-23 17:38:45 +03:00
|
|
|
|
|
|
|
return ptr->priv_size;
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:54 +03:00
|
|
|
static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
|
|
|
|
{
|
|
|
|
unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
|
|
|
|
|
|
|
|
__get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
|
|
|
|
*n = ebx;
|
|
|
|
*d = eax;
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
static int intel_pt_info_fill(struct auxtrace_record *itr,
|
|
|
|
struct perf_session *session,
|
2019-08-28 15:57:16 +02:00
|
|
|
struct perf_record_auxtrace_info *auxtrace_info,
|
2015-07-17 19:33:41 +03:00
|
|
|
size_t priv_size)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
|
|
|
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
|
|
|
|
struct perf_event_mmap_page *pc;
|
|
|
|
struct perf_tsc_conversion tc = { .time_mult = 0, };
|
|
|
|
bool cap_user_time_zero = false, per_cpu_mmaps;
|
2015-07-17 19:33:54 +03:00
|
|
|
u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
|
|
|
|
u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
|
2016-09-23 17:38:42 +03:00
|
|
|
unsigned long max_non_turbo_ratio;
|
2016-09-23 17:38:45 +03:00
|
|
|
size_t filter_str_len;
|
|
|
|
const char *filter;
|
2022-01-24 10:41:49 +02:00
|
|
|
int event_trace;
|
2019-08-28 15:57:02 +02:00
|
|
|
__u64 *info;
|
2015-07-17 19:33:41 +03:00
|
|
|
int err;
|
|
|
|
|
2016-09-23 17:38:45 +03:00
|
|
|
if (priv_size != ptr->priv_size)
|
2015-07-17 19:33:41 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
2023-08-23 01:08:08 -07:00
|
|
|
intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
|
|
|
|
intel_pt_parse_terms(intel_pt_pmu, "noretcomp", &noretcomp_bit);
|
|
|
|
intel_pt_parse_terms(intel_pt_pmu, "mtc", &mtc_bit);
|
2023-08-23 01:08:10 -07:00
|
|
|
mtc_freq_bits = perf_pmu__format_bits(intel_pt_pmu, "mtc_period");
|
2023-08-23 01:08:08 -07:00
|
|
|
intel_pt_parse_terms(intel_pt_pmu, "cyc", &cyc_bit);
|
2015-07-17 19:33:54 +03:00
|
|
|
|
|
|
|
intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2016-09-23 17:38:42 +03:00
|
|
|
if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
|
|
|
|
"%lu", &max_non_turbo_ratio) != 1)
|
|
|
|
max_non_turbo_ratio = 0;
|
2022-01-24 10:41:49 +02:00
|
|
|
if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace",
|
|
|
|
"%d", &event_trace) != 1)
|
|
|
|
event_trace = 0;
|
2016-09-23 17:38:42 +03:00
|
|
|
|
2016-09-23 17:38:45 +03:00
|
|
|
filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
|
|
|
|
filter_str_len = filter ? strlen(filter) : 0;
|
|
|
|
|
2019-07-30 13:04:59 +02:00
|
|
|
if (!session->evlist->core.nr_mmaps)
|
2015-07-17 19:33:41 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-27 22:07:44 +02:00
|
|
|
pc = session->evlist->mmap[0].core.base;
|
2015-07-17 19:33:41 +03:00
|
|
|
if (pc) {
|
|
|
|
err = perf_read_tsc_conversion(pc, &tc);
|
|
|
|
if (err) {
|
|
|
|
if (err != -EOPNOTSUPP)
|
|
|
|
return err;
|
|
|
|
} else {
|
|
|
|
cap_user_time_zero = tc.time_mult != 0;
|
|
|
|
}
|
|
|
|
if (!cap_user_time_zero)
|
|
|
|
ui__warning("Intel Processor Trace: TSC not available\n");
|
|
|
|
}
|
|
|
|
|
2024-02-02 15:40:53 -08:00
|
|
|
per_cpu_mmaps = !perf_cpu_map__is_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
|
2015-07-17 19:33:41 +03:00
|
|
|
|
|
|
|
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
|
|
|
|
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
|
|
|
|
auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
|
|
|
|
auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
|
|
|
|
auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
|
|
|
|
auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
|
|
|
|
auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
|
|
|
|
auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
|
|
|
|
auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
|
|
|
|
auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
|
|
|
|
auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
|
2015-07-17 19:33:54 +03:00
|
|
|
auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
|
|
|
|
auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
|
|
|
|
auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
|
|
|
|
auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
|
|
|
|
auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
|
2016-09-23 17:38:42 +03:00
|
|
|
auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
|
2016-09-23 17:38:45 +03:00
|
|
|
auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
|
|
|
|
|
|
|
|
info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
|
|
|
|
|
|
|
|
if (filter_str_len) {
|
|
|
|
size_t len = intel_pt_filter_bytes(filter);
|
|
|
|
|
|
|
|
strncpy((char *)info, filter, len);
|
|
|
|
info += len >> 3;
|
|
|
|
}
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2022-01-24 10:41:49 +02:00
|
|
|
*info++ = event_trace;
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
|
|
|
#ifdef HAVE_LIBTRACEEVENT
|
2019-07-21 13:23:52 +02:00
|
|
|
static int intel_pt_track_switches(struct evlist *evlist)
|
2015-07-17 19:33:41 +03:00
|
|
|
{
|
|
|
|
const char *sched_switch = "sched:sched_switch";
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel;
|
2015-07-17 19:33:41 +03:00
|
|
|
int err;
|
|
|
|
|
2020-11-30 15:01:08 -03:00
|
|
|
if (!evlist__can_select_event(evlist, sched_switch))
|
2015-07-17 19:33:41 +03:00
|
|
|
return -EPERM;
|
|
|
|
|
2022-10-03 13:46:46 -07:00
|
|
|
evsel = evlist__add_sched_switch(evlist, true);
|
|
|
|
if (IS_ERR(evsel)) {
|
|
|
|
err = PTR_ERR(evsel);
|
|
|
|
pr_debug2("%s: failed to create %s, error = %d\n",
|
2015-07-17 19:33:41 +03:00
|
|
|
__func__, sched_switch, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
evsel->immediate = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
|
|
|
#endif
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2024-06-25 13:45:32 +03:00
|
|
|
static bool intel_pt_exclude_guest(void)
|
|
|
|
{
|
|
|
|
int pt_mode;
|
|
|
|
|
|
|
|
if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
|
|
|
|
pt_mode = 0;
|
|
|
|
|
|
|
|
return pt_mode == 1;
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:52 +03:00
|
|
|
static void intel_pt_valid_str(char *str, size_t len, u64 valid)
|
|
|
|
{
|
|
|
|
unsigned int val, last = 0, state = 1;
|
|
|
|
int p = 0;
|
|
|
|
|
|
|
|
str[0] = '\0';
|
|
|
|
|
|
|
|
for (val = 0; val <= 64; val++, valid >>= 1) {
|
|
|
|
if (valid & 1) {
|
|
|
|
last = val;
|
|
|
|
switch (state) {
|
|
|
|
case 0:
|
|
|
|
p += scnprintf(str + p, len - p, ",");
|
|
|
|
/* Fall through */
|
|
|
|
case 1:
|
|
|
|
p += scnprintf(str + p, len - p, "%u", val);
|
|
|
|
state = 2;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
state = 3;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
state = 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (state) {
|
|
|
|
case 3:
|
|
|
|
p += scnprintf(str + p, len - p, ",%u", last);
|
|
|
|
state = 0;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
p += scnprintf(str + p, len - p, "-%u", last);
|
|
|
|
state = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (state != 1)
|
|
|
|
state = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, int dirfd,
|
2015-07-17 19:33:52 +03:00
|
|
|
const char *caps, const char *name,
|
|
|
|
const char *supported, u64 config)
|
|
|
|
{
|
|
|
|
char valid_str[256];
|
|
|
|
unsigned int shift;
|
|
|
|
unsigned long long valid;
|
|
|
|
u64 bits;
|
|
|
|
int ok;
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, caps, "%llx", &valid) != 1)
|
2015-07-17 19:33:52 +03:00
|
|
|
valid = 0;
|
|
|
|
|
|
|
|
if (supported &&
|
2023-03-31 13:29:49 -07:00
|
|
|
perf_pmu__scan_file_at(intel_pt_pmu, dirfd, supported, "%d", &ok) == 1 && !ok)
|
2015-07-17 19:33:52 +03:00
|
|
|
valid = 0;
|
|
|
|
|
|
|
|
valid |= 1;
|
|
|
|
|
2023-08-23 01:08:10 -07:00
|
|
|
bits = perf_pmu__format_bits(intel_pt_pmu, name);
|
2015-07-17 19:33:52 +03:00
|
|
|
|
|
|
|
config &= bits;
|
|
|
|
|
|
|
|
for (shift = 0; bits && !(bits & 1); shift++)
|
|
|
|
bits >>= 1;
|
|
|
|
|
|
|
|
config >>= shift;
|
|
|
|
|
|
|
|
if (config > 63)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
if (valid & (1 << config))
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
|
|
|
|
pr_err("Invalid %s for %s. Valid values are: %s\n",
|
|
|
|
name, INTEL_PT_PMU_NAME, valid_str);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel)
|
2015-07-17 19:33:52 +03:00
|
|
|
{
|
2023-03-31 13:29:49 -07:00
|
|
|
int err, dirfd;
|
2018-11-26 14:12:52 +02:00
|
|
|
char c;
|
2015-07-17 19:33:56 +03:00
|
|
|
|
2015-07-17 19:33:52 +03:00
|
|
|
if (!evsel)
|
|
|
|
return 0;
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
dirfd = perf_pmu__event_source_devices_fd();
|
|
|
|
if (dirfd < 0)
|
|
|
|
return dirfd;
|
|
|
|
|
2018-11-26 14:12:52 +02:00
|
|
|
/*
|
|
|
|
* If supported, force pass-through config term (pt=1) even if user
|
|
|
|
* sets pt=0, which avoids senseless kernel errors.
|
|
|
|
*/
|
2023-03-31 13:29:49 -07:00
|
|
|
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
|
2019-07-21 13:24:29 +02:00
|
|
|
!(evsel->core.attr.config & 1)) {
|
2018-11-26 14:12:52 +02:00
|
|
|
pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
|
2019-07-21 13:24:29 +02:00
|
|
|
evsel->core.attr.config |= 1;
|
2018-11-26 14:12:52 +02:00
|
|
|
}
|
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/cycle_thresholds",
|
2015-07-17 19:33:58 +03:00
|
|
|
"cyc_thresh", "caps/psb_cyc",
|
2019-07-21 13:24:29 +02:00
|
|
|
evsel->core.attr.config);
|
2015-07-17 19:33:58 +03:00
|
|
|
if (err)
|
2023-03-31 13:29:49 -07:00
|
|
|
goto out;
|
2015-07-17 19:33:58 +03:00
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/mtc_periods",
|
2015-07-17 19:33:56 +03:00
|
|
|
"mtc_period", "caps/mtc",
|
2019-07-21 13:24:29 +02:00
|
|
|
evsel->core.attr.config);
|
2015-07-17 19:33:56 +03:00
|
|
|
if (err)
|
2023-03-31 13:29:49 -07:00
|
|
|
goto out;
|
2015-07-17 19:33:56 +03:00
|
|
|
|
2023-03-31 13:29:49 -07:00
|
|
|
err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/psb_periods",
|
2015-07-17 19:33:52 +03:00
|
|
|
"psb_period", "caps/psb_cyc",
|
2019-07-21 13:24:29 +02:00
|
|
|
evsel->core.attr.config);
|
2023-03-31 13:29:49 -07:00
|
|
|
|
|
|
|
out:
|
|
|
|
close(dirfd);
|
|
|
|
return err;
|
2015-07-17 19:33:52 +03:00
|
|
|
}
|
|
|
|
|
2019-11-15 14:42:23 +02:00
|
|
|
static void intel_pt_min_max_sample_sz(struct evlist *evlist,
|
|
|
|
size_t *min_sz, size_t *max_sz)
|
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
size_t sz = evsel->core.attr.aux_sample_size;
|
|
|
|
|
|
|
|
if (!sz)
|
|
|
|
continue;
|
|
|
|
if (min_sz && (sz < *min_sz || !*min_sz))
|
|
|
|
*min_sz = sz;
|
|
|
|
if (max_sz && sz > *max_sz)
|
|
|
|
*max_sz = sz;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-06 11:46:04 +03:00
|
|
|
/*
|
|
|
|
* Currently, there is not enough information to disambiguate different PEBS
|
|
|
|
* events, so only allow one.
|
|
|
|
*/
|
|
|
|
static bool intel_pt_too_many_aux_output(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
int aux_output_cnt = 0;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel)
|
|
|
|
aux_output_cnt += !!evsel->core.attr.aux_output;
|
|
|
|
|
|
|
|
if (aux_output_cnt > 1) {
|
|
|
|
pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
static int intel_pt_recording_options(struct auxtrace_record *itr,
|
2019-07-21 13:23:52 +02:00
|
|
|
struct evlist *evlist,
|
2015-07-17 19:33:41 +03:00
|
|
|
struct record_opts *opts)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
|
|
|
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
|
2016-08-15 10:23:04 +03:00
|
|
|
bool have_timing_info, need_immediate = false;
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel, *intel_pt_evsel = NULL;
|
2022-03-28 16:26:44 -07:00
|
|
|
const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
|
2019-08-26 21:39:13 -04:00
|
|
|
bool privileged = perf_event_paranoid_check(-1);
|
2015-07-17 19:33:41 +03:00
|
|
|
u64 tsc_bit;
|
2015-07-17 19:33:52 +03:00
|
|
|
int err;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
|
|
|
ptr->evlist = evlist;
|
|
|
|
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
|
|
|
|
|
2016-06-23 11:26:15 -03:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 13:24:29 +02:00
|
|
|
if (evsel->core.attr.type == intel_pt_pmu->type) {
|
2015-07-17 19:33:41 +03:00
|
|
|
if (intel_pt_evsel) {
|
|
|
|
pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-07-21 13:24:29 +02:00
|
|
|
evsel->core.attr.freq = 0;
|
|
|
|
evsel->core.attr.sample_period = 1;
|
2024-06-25 13:45:32 +03:00
|
|
|
evsel->core.attr.exclude_guest = intel_pt_exclude_guest();
|
2020-06-30 16:39:33 +03:00
|
|
|
evsel->no_aux_samples = true;
|
2022-05-06 15:25:47 +03:00
|
|
|
evsel->needs_auxtrace_mmap = true;
|
2015-07-17 19:33:41 +03:00
|
|
|
intel_pt_evsel = evsel;
|
|
|
|
opts->full_auxtrace = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
|
|
|
|
pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-11-15 14:42:23 +02:00
|
|
|
if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) {
|
|
|
|
pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
if (opts->use_clockid) {
|
|
|
|
pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-08-06 11:46:04 +03:00
|
|
|
if (intel_pt_too_many_aux_output(evlist))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
if (!opts->full_auxtrace)
|
|
|
|
return 0;
|
|
|
|
|
2019-11-15 14:42:23 +02:00
|
|
|
if (opts->auxtrace_sample_mode)
|
2023-04-24 14:47:42 +01:00
|
|
|
evsel__set_config_if_unset(intel_pt_pmu, intel_pt_evsel,
|
|
|
|
"psb_period", 0);
|
2019-11-15 14:42:23 +02:00
|
|
|
|
2015-07-17 19:33:52 +03:00
|
|
|
err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
/* Set default sizes for snapshot mode */
|
|
|
|
if (opts->auxtrace_snapshot_mode) {
|
|
|
|
size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
|
|
|
|
|
|
|
|
if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
|
|
|
|
if (privileged) {
|
|
|
|
opts->auxtrace_mmap_pages = MiB(4) / page_size;
|
|
|
|
} else {
|
|
|
|
opts->auxtrace_mmap_pages = KiB(128) / page_size;
|
|
|
|
if (opts->mmap_pages == UINT_MAX)
|
|
|
|
opts->mmap_pages = KiB(256) / page_size;
|
|
|
|
}
|
|
|
|
} else if (!opts->auxtrace_mmap_pages && !privileged &&
|
|
|
|
opts->mmap_pages == UINT_MAX) {
|
|
|
|
opts->mmap_pages = KiB(256) / page_size;
|
|
|
|
}
|
|
|
|
if (!opts->auxtrace_snapshot_size)
|
|
|
|
opts->auxtrace_snapshot_size =
|
|
|
|
opts->auxtrace_mmap_pages * (size_t)page_size;
|
|
|
|
if (!opts->auxtrace_mmap_pages) {
|
|
|
|
size_t sz = opts->auxtrace_snapshot_size;
|
|
|
|
|
|
|
|
sz = round_up(sz, page_size) / page_size;
|
|
|
|
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
|
|
|
|
}
|
|
|
|
if (opts->auxtrace_snapshot_size >
|
|
|
|
opts->auxtrace_mmap_pages * (size_t)page_size) {
|
|
|
|
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
|
|
|
|
opts->auxtrace_snapshot_size,
|
|
|
|
opts->auxtrace_mmap_pages * (size_t)page_size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
|
|
|
|
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
pr_debug2("Intel PT snapshot size: %zu\n",
|
|
|
|
opts->auxtrace_snapshot_size);
|
|
|
|
if (psb_period &&
|
|
|
|
opts->auxtrace_snapshot_size <= psb_period +
|
|
|
|
INTEL_PT_PSB_PERIOD_NEAR)
|
|
|
|
ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
|
|
|
|
opts->auxtrace_snapshot_size, psb_period);
|
|
|
|
}
|
|
|
|
|
2019-11-15 14:42:23 +02:00
|
|
|
/* Set default sizes for sample mode */
|
|
|
|
if (opts->auxtrace_sample_mode) {
|
|
|
|
size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
|
|
|
|
size_t min_sz = 0, max_sz = 0;
|
|
|
|
|
|
|
|
intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz);
|
|
|
|
if (!opts->auxtrace_mmap_pages && !privileged &&
|
|
|
|
opts->mmap_pages == UINT_MAX)
|
|
|
|
opts->mmap_pages = KiB(256) / page_size;
|
|
|
|
if (!opts->auxtrace_mmap_pages) {
|
|
|
|
size_t sz = round_up(max_sz, page_size) / page_size;
|
|
|
|
|
|
|
|
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
|
|
|
|
}
|
|
|
|
if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
|
|
|
|
pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
|
|
|
|
max_sz,
|
|
|
|
opts->auxtrace_mmap_pages * (size_t)page_size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
|
|
|
|
min_sz, max_sz);
|
|
|
|
if (psb_period &&
|
|
|
|
min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
|
|
|
|
ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
|
|
|
|
min_sz, psb_period);
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
/* Set default sizes for full trace mode */
|
|
|
|
if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
|
|
|
|
if (privileged) {
|
|
|
|
opts->auxtrace_mmap_pages = MiB(4) / page_size;
|
|
|
|
} else {
|
|
|
|
opts->auxtrace_mmap_pages = KiB(128) / page_size;
|
|
|
|
if (opts->mmap_pages == UINT_MAX)
|
|
|
|
opts->mmap_pages = KiB(256) / page_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate auxtrace_mmap_pages */
|
|
|
|
if (opts->auxtrace_mmap_pages) {
|
|
|
|
size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
|
|
|
|
size_t min_sz;
|
|
|
|
|
2019-11-15 14:42:23 +02:00
|
|
|
if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode)
|
2015-07-17 19:33:41 +03:00
|
|
|
min_sz = KiB(4);
|
|
|
|
else
|
|
|
|
min_sz = KiB(8);
|
|
|
|
|
|
|
|
if (sz < min_sz || !is_power_of_2(sz)) {
|
|
|
|
pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
|
|
|
|
min_sz / 1024);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
perf intel-pt: Use aux_watermark
Turns out, the default setting of attr.aux_watermark to half of the total
buffer size is not very useful, especially with smaller buffers. The
problem is that, after half of the buffer is filled up, the kernel updates
->aux_head and sets up the next "transaction", while observing that
->aux_tail is still zero (as userspace haven't had the chance to update
it), meaning that the trace will have to stop at the end of this second
"transaction". This means, for example, that the second PERF_RECORD_AUX in
every trace comes with TRUNCATED flag set.
Setting attr.aux_watermark to quarter of the buffer gives enough space for
the ->aux_tail update to be observed and prevents the data loss.
The obligatory before/after showcase:
> # perf_before record -e intel_pt//u -m,8 uname
> Linux
> [ perf record: Woken up 6 times to write data ]
> Warning:
> AUX data lost 4 times out of 10!
>
> [ perf record: Captured and wrote 0.099 MB perf.data ]
> # perf record -e intel_pt//u -m,8 uname
> Linux
> [ perf record: Woken up 4 times to write data ]
> [ perf record: Captured and wrote 0.039 MB perf.data ]
The effect is still visible with large workloads and large buffers,
although less pronounced.
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210414154955.49603-3-alexander.shishkin@linux.intel.com
2021-04-14 18:49:55 +03:00
|
|
|
if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
|
2024-06-25 13:45:31 +03:00
|
|
|
size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
|
|
|
|
u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
|
perf intel-pt: Use aux_watermark
Turns out, the default setting of attr.aux_watermark to half of the total
buffer size is not very useful, especially with smaller buffers. The
problem is that, after half of the buffer is filled up, the kernel updates
->aux_head and sets up the next "transaction", while observing that
->aux_tail is still zero (as userspace haven't had the chance to update
it), meaning that the trace will have to stop at the end of this second
"transaction". This means, for example, that the second PERF_RECORD_AUX in
every trace comes with TRUNCATED flag set.
Setting attr.aux_watermark to quarter of the buffer gives enough space for
the ->aux_tail update to be observed and prevents the data loss.
The obligatory before/after showcase:
> # perf_before record -e intel_pt//u -m,8 uname
> Linux
> [ perf record: Woken up 6 times to write data ]
> Warning:
> AUX data lost 4 times out of 10!
>
> [ perf record: Captured and wrote 0.099 MB perf.data ]
> # perf record -e intel_pt//u -m,8 uname
> Linux
> [ perf record: Woken up 4 times to write data ]
> [ perf record: Captured and wrote 0.039 MB perf.data ]
The effect is still visible with large workloads and large buffers,
although less pronounced.
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210414154955.49603-3-alexander.shishkin@linux.intel.com
2021-04-14 18:49:55 +03:00
|
|
|
|
|
|
|
intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
|
|
|
|
}
|
|
|
|
|
2023-08-23 01:08:08 -07:00
|
|
|
intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2019-07-21 13:24:29 +02:00
|
|
|
if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
|
2015-07-17 19:33:41 +03:00
|
|
|
have_timing_info = true;
|
|
|
|
else
|
|
|
|
have_timing_info = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-cpu recording needs sched_switch events to distinguish different
|
|
|
|
* threads.
|
|
|
|
*/
|
2024-02-02 15:40:53 -08:00
|
|
|
if (have_timing_info && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) &&
|
2020-05-28 15:08:58 +03:00
|
|
|
!record_opts__no_switch_events(opts)) {
|
2015-08-13 12:40:57 +03:00
|
|
|
if (perf_can_record_switch_events()) {
|
|
|
|
bool cpu_wide = !target__none(&opts->target) &&
|
|
|
|
!target__has_task(&opts->target);
|
|
|
|
|
2025-05-12 12:39:31 +03:00
|
|
|
if (ptr->all_switch_events && !cpu_wide && perf_can_record_cpu_wide()) {
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *switch_evsel;
|
2015-08-13 12:40:57 +03:00
|
|
|
|
2022-05-24 10:54:28 +03:00
|
|
|
switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
|
|
|
|
if (!switch_evsel)
|
|
|
|
return -ENOMEM;
|
2015-08-13 12:40:57 +03:00
|
|
|
|
2019-07-21 13:24:29 +02:00
|
|
|
switch_evsel->core.attr.context_switch = 1;
|
2015-08-13 12:40:57 +03:00
|
|
|
switch_evsel->immediate = true;
|
|
|
|
|
2020-04-29 16:12:15 -03:00
|
|
|
evsel__set_sample_bit(switch_evsel, TID);
|
|
|
|
evsel__set_sample_bit(switch_evsel, TIME);
|
|
|
|
evsel__set_sample_bit(switch_evsel, CPU);
|
|
|
|
evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
|
2015-08-13 12:40:57 +03:00
|
|
|
|
|
|
|
opts->record_switch_events = false;
|
|
|
|
ptr->have_sched_switch = 3;
|
|
|
|
} else {
|
|
|
|
opts->record_switch_events = true;
|
2016-08-15 10:23:04 +03:00
|
|
|
need_immediate = true;
|
2015-08-13 12:40:57 +03:00
|
|
|
if (cpu_wide)
|
|
|
|
ptr->have_sched_switch = 3;
|
|
|
|
else
|
|
|
|
ptr->have_sched_switch = 2;
|
|
|
|
}
|
|
|
|
} else {
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
|
|
|
#ifdef HAVE_LIBTRACEEVENT
|
2015-08-13 12:40:57 +03:00
|
|
|
err = intel_pt_track_switches(evlist);
|
|
|
|
if (err == -EPERM)
|
|
|
|
pr_debug2("Unable to select sched:sched_switch\n");
|
|
|
|
else if (err)
|
|
|
|
return err;
|
|
|
|
else
|
|
|
|
ptr->have_sched_switch = 1;
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
|
|
|
#endif
|
2015-08-13 12:40:57 +03:00
|
|
|
}
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
perf intel-pt: Add support for text poke events
Select text poke events when available and the kernel is being traced.
Process text poke events to invalidate entries in Intel PT's instruction
cache.
Example:
The example requires kernel config:
CONFIG_PROC_SYSCTL=y
CONFIG_SCHED_DEBUG=y
CONFIG_SCHEDSTATS=y
Before:
# perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M &
# cat /proc/sys/kernel/sched_schedstats
0
# echo 1 > /proc/sys/kernel/sched_schedstats
# cat /proc/sys/kernel/sched_schedstats
1
# echo 0 > /proc/sys/kernel/sched_schedstats
# cat /proc/sys/kernel/sched_schedstats
0
# kill %1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 3.341 MB perf.data.before ]
[1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M
# perf script -i perf.data.before --itrace=e >/dev/null
Warning:
474 instruction trace errors
After:
# perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M &
# cat /proc/sys/kernel/sched_schedstats
0
# echo 1 > /proc/sys/kernel/sched_schedstats
# cat /proc/sys/kernel/sched_schedstats
1
# echo 0 > /proc/sys/kernel/sched_schedstats
# cat /proc/sys/kernel/sched_schedstats
0
# kill %1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 2.646 MB perf.data.after ]
[1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M
# perf script -i perf.data.after --itrace=e >/dev/null
Example:
The example requires kernel config:
# CONFIG_FUNCTION_TRACER is not set
Before:
# perf record --kcore -m,64M -o t1 -a -e intel_pt//k &
# perf probe __schedule
Added new event:
probe:__schedule (on __schedule)
You can now use it in all perf tools, such as:
perf record -e probe:__schedule -aR sleep 1
# perf record -e probe:__schedule -aR sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ]
# perf probe -d probe:__schedule
Removed event: probe:__schedule
# kill %1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 41.268 MB t1 ]
[1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k
# perf script -i t1 --itrace=e >/dev/null
Warning:
207 instruction trace errors
After:
# perf record --kcore -m,64M -o t1 -a -e intel_pt//k &
# perf probe __schedule
Added new event:
probe:__schedule (on __schedule)
You can now use it in all perf tools, such as:
perf record -e probe:__schedule -aR sleep 1
# perf record -e probe:__schedule -aR sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ]
# perf probe -d probe:__schedule
Removed event: probe:__schedule
# kill %1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 39.978 MB t1 ]
[1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k
# perf script -i t1 --itrace=e >/dev/null
# perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL'
6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page
6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6
6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page
6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106
6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5
7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5
6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0
6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0
Example:
The example requires kernel config:
CONFIG_FUNCTION_TRACER=y
Before:
# perf record --kcore -m,64M -o t1 -a -e intel_pt//k &
# perf probe __kmalloc
Added new event:
probe:__kmalloc (on __kmalloc)
You can now use it in all perf tools, such as:
perf record -e probe:__kmalloc -aR sleep 1
# perf record -e probe:__kmalloc -aR sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ]
# perf probe -d probe:__kmalloc
Removed event: probe:__kmalloc
# kill %1
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 43.850 MB t1 ]
[1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k
# perf script -i t1 --itrace=e >/dev/null
Warning:
8 instruction trace errors
After:
# perf record --kcore -m,64M -o t1 -a -e intel_pt//k &
# perf probe __kmalloc
Added new event:
probe:__kmalloc (on __kmalloc)
You can now use it in all perf tools, such as:
perf record -e probe:__kmalloc -aR sleep 1
# perf record -e probe:__kmalloc -aR sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ]
# perf probe -d probe:__kmalloc
Removed event: probe:__kmalloc
# kill %1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 41.442 MB t1 ]
[1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k
# perf script -i t1 --itrace=e >/dev/null
# perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL'
5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline
5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415
5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5
5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5
5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5
5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5
5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5
7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5
7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5
7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5
7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5
7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: x86@kernel.org
Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 15:19:20 +03:00
|
|
|
if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel &&
|
|
|
|
perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
|
|
|
|
opts->text_poke = true;
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
if (intel_pt_evsel) {
|
|
|
|
/*
|
|
|
|
* To obtain the auxtrace buffer file descriptor, the auxtrace
|
|
|
|
* event must come first.
|
|
|
|
*/
|
2020-11-30 14:52:44 -03:00
|
|
|
evlist__to_front(evlist, intel_pt_evsel);
|
2015-07-17 19:33:41 +03:00
|
|
|
/*
|
|
|
|
* In the case of per-cpu mmaps, we need the CPU on the
|
|
|
|
* AUX event.
|
|
|
|
*/
|
2024-02-02 15:40:53 -08:00
|
|
|
if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus))
|
2020-04-29 16:12:15 -03:00
|
|
|
evsel__set_sample_bit(intel_pt_evsel, CPU);
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Add dummy event to keep tracking */
|
|
|
|
if (opts->full_auxtrace) {
|
2022-05-24 10:54:29 +03:00
|
|
|
bool need_system_wide_tracking;
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *tracking_evsel;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2022-05-24 10:54:29 +03:00
|
|
|
/*
|
|
|
|
* User space tasks can migrate between CPUs, so when tracing
|
|
|
|
* selected CPUs, sideband for all CPUs is still needed.
|
|
|
|
*/
|
2022-10-12 11:22:59 +03:00
|
|
|
need_system_wide_tracking = opts->target.cpu_list &&
|
2022-05-24 10:54:29 +03:00
|
|
|
!intel_pt_evsel->core.attr.exclude_user;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2022-05-24 10:54:29 +03:00
|
|
|
tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
|
|
|
|
if (!tracking_evsel)
|
|
|
|
return -ENOMEM;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2020-11-30 14:39:41 -03:00
|
|
|
evlist__set_tracking_event(evlist, tracking_evsel);
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2016-08-15 10:23:04 +03:00
|
|
|
if (need_immediate)
|
|
|
|
tracking_evsel->immediate = true;
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
/* In per-cpu case, always need the time of mmap events etc */
|
2024-02-02 15:40:53 -08:00
|
|
|
if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
|
2020-04-29 16:12:15 -03:00
|
|
|
evsel__set_sample_bit(tracking_evsel, TIME);
|
2015-08-13 12:40:57 +03:00
|
|
|
/* And the CPU for switch events */
|
2020-04-29 16:12:15 -03:00
|
|
|
evsel__set_sample_bit(tracking_evsel, CPU);
|
2015-08-13 12:40:57 +03:00
|
|
|
}
|
2020-04-29 16:12:15 -03:00
|
|
|
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Warn the user when we do not have enough information to decode i.e.
|
|
|
|
* per-cpu with no sched_switch (except workload-only).
|
|
|
|
*/
|
2024-02-02 15:40:53 -08:00
|
|
|
if (!ptr->have_sched_switch && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) &&
|
2020-05-28 15:08:59 +03:00
|
|
|
!target__none(&opts->target) &&
|
|
|
|
!intel_pt_evsel->core.attr.exclude_user)
|
2015-07-17 19:33:41 +03:00
|
|
|
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_snapshot_start(struct auxtrace_record *itr)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2016-06-23 11:26:15 -03:00
|
|
|
evlist__for_each_entry(ptr->evlist, evsel) {
|
2019-07-21 13:24:29 +02:00
|
|
|
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
|
2019-07-21 13:24:03 +02:00
|
|
|
return evsel__disable(evsel);
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
2019-07-21 13:23:51 +02:00
|
|
|
struct evsel *evsel;
|
2015-07-17 19:33:41 +03:00
|
|
|
|
2016-06-23 11:26:15 -03:00
|
|
|
evlist__for_each_entry(ptr->evlist, evsel) {
|
2019-07-21 13:24:29 +02:00
|
|
|
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
|
2019-07-21 13:24:02 +02:00
|
|
|
return evsel__enable(evsel);
|
2015-07-17 19:33:41 +03:00
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
|
|
|
|
{
|
|
|
|
const size_t sz = sizeof(struct intel_pt_snapshot_ref);
|
|
|
|
int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
|
|
|
|
struct intel_pt_snapshot_ref *refs;
|
|
|
|
|
|
|
|
if (!new_cnt)
|
|
|
|
new_cnt = 16;
|
|
|
|
|
|
|
|
while (new_cnt <= idx)
|
|
|
|
new_cnt *= 2;
|
|
|
|
|
|
|
|
refs = calloc(new_cnt, sz);
|
|
|
|
if (!refs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(refs, ptr->snapshot_refs, cnt * sz);
|
|
|
|
|
|
|
|
ptr->snapshot_refs = refs;
|
|
|
|
ptr->snapshot_ref_cnt = new_cnt;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ptr->snapshot_ref_cnt; i++)
|
|
|
|
zfree(&ptr->snapshot_refs[i].ref_buf);
|
|
|
|
zfree(&ptr->snapshot_refs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pt_recording_free(struct auxtrace_record *itr)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
|
|
|
|
|
|
|
intel_pt_free_snapshot_refs(ptr);
|
|
|
|
free(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
|
|
|
|
size_t snapshot_buf_size)
|
|
|
|
{
|
|
|
|
size_t ref_buf_size = ptr->snapshot_ref_buf_size;
|
|
|
|
void *ref_buf;
|
|
|
|
|
|
|
|
ref_buf = zalloc(ref_buf_size);
|
|
|
|
if (!ref_buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ptr->snapshot_refs[idx].ref_buf = ref_buf;
|
|
|
|
ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
|
|
|
|
size_t snapshot_buf_size)
|
|
|
|
{
|
|
|
|
const size_t max_size = 256 * 1024;
|
|
|
|
size_t buf_size = 0, psb_period;
|
|
|
|
|
|
|
|
if (ptr->snapshot_size <= 64 * 1024)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
|
|
|
|
if (psb_period)
|
|
|
|
buf_size = psb_period * 2;
|
|
|
|
|
|
|
|
if (!buf_size || buf_size > max_size)
|
|
|
|
buf_size = max_size;
|
|
|
|
|
|
|
|
if (buf_size >= snapshot_buf_size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (buf_size >= ptr->snapshot_size / 2)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return buf_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
|
|
|
|
size_t snapshot_buf_size)
|
|
|
|
{
|
|
|
|
if (ptr->snapshot_init_done)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ptr->snapshot_init_done = true;
|
|
|
|
|
|
|
|
ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
|
|
|
|
snapshot_buf_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
|
|
|
|
* @buf1: first buffer
|
|
|
|
* @compare_size: number of bytes to compare
|
|
|
|
* @buf2: second buffer (a circular buffer)
|
|
|
|
* @offs2: offset in second buffer
|
|
|
|
* @buf2_size: size of second buffer
|
|
|
|
*
|
|
|
|
* The comparison allows for the possibility that the bytes to compare in the
|
|
|
|
* circular buffer are not contiguous. It is assumed that @compare_size <=
|
|
|
|
* @buf2_size. This function returns %false if the bytes are identical, %true
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
|
|
|
|
void *buf2, size_t offs2, size_t buf2_size)
|
|
|
|
{
|
|
|
|
size_t end2 = offs2 + compare_size, part_size;
|
|
|
|
|
|
|
|
if (end2 <= buf2_size)
|
|
|
|
return memcmp(buf1, buf2 + offs2, compare_size);
|
|
|
|
|
|
|
|
part_size = end2 - buf2_size;
|
|
|
|
if (memcmp(buf1, buf2 + offs2, part_size))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
compare_size -= part_size;
|
|
|
|
|
|
|
|
return memcmp(buf1 + part_size, buf2, compare_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
|
|
|
|
size_t ref_size, size_t buf_size,
|
|
|
|
void *data, size_t head)
|
|
|
|
{
|
|
|
|
size_t ref_end = ref_offset + ref_size;
|
|
|
|
|
|
|
|
if (ref_end > buf_size) {
|
|
|
|
if (head > ref_offset || head < ref_end - buf_size)
|
|
|
|
return true;
|
|
|
|
} else if (head > ref_offset && head < ref_end) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
|
|
|
|
buf_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
|
|
|
|
void *data, size_t head)
|
|
|
|
{
|
|
|
|
if (head >= ref_size) {
|
|
|
|
memcpy(ref_buf, data + head - ref_size, ref_size);
|
|
|
|
} else {
|
|
|
|
memcpy(ref_buf, data, head);
|
|
|
|
ref_size -= head;
|
|
|
|
memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
|
|
|
|
struct auxtrace_mmap *mm, unsigned char *data,
|
|
|
|
u64 head)
|
|
|
|
{
|
|
|
|
struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
|
|
|
|
bool wrapped;
|
|
|
|
|
|
|
|
wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
|
|
|
|
ptr->snapshot_ref_buf_size, mm->len,
|
|
|
|
data, head);
|
|
|
|
|
|
|
|
intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
|
|
|
|
data, head);
|
|
|
|
|
|
|
|
return wrapped;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
|
|
|
|
{
|
|
|
|
int i, a, b;
|
|
|
|
|
|
|
|
b = buf_size >> 3;
|
|
|
|
a = b - 512;
|
|
|
|
if (a < 0)
|
|
|
|
a = 0;
|
|
|
|
|
|
|
|
for (i = a; i < b; i++) {
|
|
|
|
if (data[i])
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
|
|
|
|
struct auxtrace_mmap *mm, unsigned char *data,
|
|
|
|
u64 *head, u64 *old)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr =
|
|
|
|
container_of(itr, struct intel_pt_recording, itr);
|
|
|
|
bool wrapped;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
|
|
|
|
__func__, idx, (size_t)*old, (size_t)*head);
|
|
|
|
|
|
|
|
err = intel_pt_snapshot_init(ptr, mm->len);
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
if (idx >= ptr->snapshot_ref_cnt) {
|
|
|
|
err = intel_pt_alloc_snapshot_refs(ptr, idx);
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ptr->snapshot_ref_buf_size) {
|
|
|
|
if (!ptr->snapshot_refs[idx].ref_buf) {
|
|
|
|
err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
|
|
|
|
} else {
|
|
|
|
wrapped = ptr->snapshot_refs[idx].wrapped;
|
|
|
|
if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
|
|
|
|
ptr->snapshot_refs[idx].wrapped = true;
|
|
|
|
wrapped = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In full trace mode 'head' continually increases. However in snapshot
|
|
|
|
* mode 'head' is an offset within the buffer. Here 'old' and 'head'
|
|
|
|
* are adjusted to match the full trace case which expects that 'old' is
|
|
|
|
* always less than 'head'.
|
|
|
|
*/
|
|
|
|
if (wrapped) {
|
|
|
|
*old = *head;
|
|
|
|
*head += mm->len;
|
|
|
|
} else {
|
|
|
|
if (mm->mask)
|
|
|
|
*old &= mm->mask;
|
|
|
|
else
|
|
|
|
*old %= mm->len;
|
|
|
|
if (*old > *head)
|
|
|
|
*head += mm->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
|
|
|
|
__func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
pr_err("%s: failed, error %d\n", __func__, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
|
|
|
|
{
|
|
|
|
return rdtsc();
|
|
|
|
}
|
|
|
|
|
2025-05-12 12:39:31 +03:00
|
|
|
static int intel_pt_perf_config(const char *var, const char *value, void *data)
|
|
|
|
{
|
|
|
|
struct intel_pt_recording *ptr = data;
|
|
|
|
|
|
|
|
if (!strcmp(var, "intel-pt.all-switch-events"))
|
|
|
|
ptr->all_switch_events = perf_config_bool(var, value);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
struct auxtrace_record *intel_pt_recording_init(int *err)
|
|
|
|
{
|
2023-05-27 00:22:03 -07:00
|
|
|
struct perf_pmu *intel_pt_pmu = perf_pmus__find(INTEL_PT_PMU_NAME);
|
2015-07-17 19:33:41 +03:00
|
|
|
struct intel_pt_recording *ptr;
|
|
|
|
|
|
|
|
if (!intel_pt_pmu)
|
|
|
|
return NULL;
|
|
|
|
|
2016-03-08 10:38:53 +02:00
|
|
|
if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
|
|
|
|
*err = -errno;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
ptr = zalloc(sizeof(struct intel_pt_recording));
|
|
|
|
if (!ptr) {
|
|
|
|
*err = -ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2025-05-12 12:39:31 +03:00
|
|
|
perf_config(intel_pt_perf_config, ptr);
|
|
|
|
|
2015-07-17 19:33:41 +03:00
|
|
|
ptr->intel_pt_pmu = intel_pt_pmu;
|
|
|
|
ptr->itr.recording_options = intel_pt_recording_options;
|
|
|
|
ptr->itr.info_priv_size = intel_pt_info_priv_size;
|
|
|
|
ptr->itr.info_fill = intel_pt_info_fill;
|
|
|
|
ptr->itr.free = intel_pt_recording_free;
|
|
|
|
ptr->itr.snapshot_start = intel_pt_snapshot_start;
|
|
|
|
ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
|
|
|
|
ptr->itr.find_snapshot = intel_pt_find_snapshot;
|
|
|
|
ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
|
|
|
|
ptr->itr.reference = intel_pt_reference;
|
2020-02-17 10:23:00 +02:00
|
|
|
ptr->itr.read_finish = auxtrace_record__read_finish;
|
2019-11-15 14:42:23 +02:00
|
|
|
/*
|
|
|
|
* Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
|
|
|
|
* should give at least 1 PSB per sample.
|
|
|
|
*/
|
|
|
|
ptr->itr.default_aux_sample_size = 4096;
|
2015-07-17 19:33:41 +03:00
|
|
|
return &ptr->itr;
|
|
|
|
}
|