linux/tools/perf/arch/x86/util/intel-pt.c

1232 lines
32 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
*/
#include <errno.h>
#include <stdbool.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <linux/err.h>
#include <cpuid.h>
tools: Fix off-by 1 relative directory includes This is currently working due to extra include paths in the build. Committer testing: $ cd tools/include/uapi/asm/ Before this patch: $ ls -la ../../arch/x86/include/uapi/asm/errno.h ls: cannot access '../../arch/x86/include/uapi/asm/errno.h': No such file or directory $ After this patch; $ ls -la ../../../arch/x86/include/uapi/asm/errno.h -rw-rw-r--. 1 acme acme 31 Feb 20 12:42 ../../../arch/x86/include/uapi/asm/errno.h $ Check that that is still under tools/, i.e. hasn't escaped into the main kernel sources: $ cd ../../../arch/x86/include/uapi/asm/ $ pwd /home/acme/git/perf/tools/arch/x86/include/uapi/asm $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Igor Lubashev <ilubashe@akamai.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Li <liwei391@huawei.com> Link: http://lore.kernel.org/lkml/20200306071110.130202-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-05 23:11:08 -08:00
#include "../../../util/session.h"
#include "../../../util/event.h"
#include "../../../util/evlist.h"
#include "../../../util/evsel.h"
#include "../../../util/evsel_config.h"
perf intel-pt: Do not default to recording all switch events On systems with many CPUs, recording extra context switch events can be excessive and unnecessary. Add perf config intel-pt.all-switch-events=false to control the behaviour. Example: # perf config intel-pt.all-switch-events=false # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.082 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 5 PERF_RECORD_SWITCH # perf config intel-pt.all-switch-events=true # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.102 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 180 PERF_RECORD_SWITCH_CPU_WIDE Committer testing: While doing a make -j28 allmodconfig: root@five:~# grep "model name" -m1 /proc/cpuinfo model name : Intel(R) Core(TM) i7-14700K root@five:~# root@five:~# perf config intel-pt.all-switch-events=false root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.019 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE root@five:~# root@five:~# perf config intel-pt.all-switch-events=true root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.047 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE SWITCH_CPU_WIDE events: 542 (96.4%) root@five:~# Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20250512093932.79854-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2025-05-12 12:39:31 +03:00
#include "../../../util/config.h"
tools: Fix off-by 1 relative directory includes This is currently working due to extra include paths in the build. Committer testing: $ cd tools/include/uapi/asm/ Before this patch: $ ls -la ../../arch/x86/include/uapi/asm/errno.h ls: cannot access '../../arch/x86/include/uapi/asm/errno.h': No such file or directory $ After this patch; $ ls -la ../../../arch/x86/include/uapi/asm/errno.h -rw-rw-r--. 1 acme acme 31 Feb 20 12:42 ../../../arch/x86/include/uapi/asm/errno.h $ Check that that is still under tools/, i.e. hasn't escaped into the main kernel sources: $ cd ../../../arch/x86/include/uapi/asm/ $ pwd /home/acme/git/perf/tools/arch/x86/include/uapi/asm $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Igor Lubashev <ilubashe@akamai.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Li <liwei391@huawei.com> Link: http://lore.kernel.org/lkml/20200306071110.130202-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-05 23:11:08 -08:00
#include "../../../util/cpumap.h"
#include "../../../util/mmap.h"
#include <subcmd/parse-options.h>
tools: Fix off-by 1 relative directory includes This is currently working due to extra include paths in the build. Committer testing: $ cd tools/include/uapi/asm/ Before this patch: $ ls -la ../../arch/x86/include/uapi/asm/errno.h ls: cannot access '../../arch/x86/include/uapi/asm/errno.h': No such file or directory $ After this patch; $ ls -la ../../../arch/x86/include/uapi/asm/errno.h -rw-rw-r--. 1 acme acme 31 Feb 20 12:42 ../../../arch/x86/include/uapi/asm/errno.h $ Check that that is still under tools/, i.e. hasn't escaped into the main kernel sources: $ cd ../../../arch/x86/include/uapi/asm/ $ pwd /home/acme/git/perf/tools/arch/x86/include/uapi/asm $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Igor Lubashev <ilubashe@akamai.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Li <liwei391@huawei.com> Link: http://lore.kernel.org/lkml/20200306071110.130202-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-05 23:11:08 -08:00
#include "../../../util/parse-events.h"
perf pmu: Separate pmu and pmus Separate and hide the pmus list in pmus.[ch]. Move pmus functionality out of pmu.[ch] into pmus.[ch] renaming pmus functions which were prefixed perf_pmu__ to perf_pmus__. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-28-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 00:22:03 -07:00
#include "../../../util/pmus.h"
tools: Fix off-by 1 relative directory includes This is currently working due to extra include paths in the build. Committer testing: $ cd tools/include/uapi/asm/ Before this patch: $ ls -la ../../arch/x86/include/uapi/asm/errno.h ls: cannot access '../../arch/x86/include/uapi/asm/errno.h': No such file or directory $ After this patch; $ ls -la ../../../arch/x86/include/uapi/asm/errno.h -rw-rw-r--. 1 acme acme 31 Feb 20 12:42 ../../../arch/x86/include/uapi/asm/errno.h $ Check that that is still under tools/, i.e. hasn't escaped into the main kernel sources: $ cd ../../../arch/x86/include/uapi/asm/ $ pwd /home/acme/git/perf/tools/arch/x86/include/uapi/asm $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Igor Lubashev <ilubashe@akamai.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Li <liwei391@huawei.com> Link: http://lore.kernel.org/lkml/20200306071110.130202-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-05 23:11:08 -08:00
#include "../../../util/debug.h"
#include "../../../util/auxtrace.h"
#include "../../../util/perf_api_probe.h"
tools: Fix off-by 1 relative directory includes This is currently working due to extra include paths in the build. Committer testing: $ cd tools/include/uapi/asm/ Before this patch: $ ls -la ../../arch/x86/include/uapi/asm/errno.h ls: cannot access '../../arch/x86/include/uapi/asm/errno.h': No such file or directory $ After this patch; $ ls -la ../../../arch/x86/include/uapi/asm/errno.h -rw-rw-r--. 1 acme acme 31 Feb 20 12:42 ../../../arch/x86/include/uapi/asm/errno.h $ Check that that is still under tools/, i.e. hasn't escaped into the main kernel sources: $ cd ../../../arch/x86/include/uapi/asm/ $ pwd /home/acme/git/perf/tools/arch/x86/include/uapi/asm $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Igor Lubashev <ilubashe@akamai.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Li <liwei391@huawei.com> Link: http://lore.kernel.org/lkml/20200306071110.130202-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-05 23:11:08 -08:00
#include "../../../util/record.h"
#include "../../../util/target.h"
#include "../../../util/tsc.h"
#include <internal/lib.h> // page_size
tools: Fix off-by 1 relative directory includes This is currently working due to extra include paths in the build. Committer testing: $ cd tools/include/uapi/asm/ Before this patch: $ ls -la ../../arch/x86/include/uapi/asm/errno.h ls: cannot access '../../arch/x86/include/uapi/asm/errno.h': No such file or directory $ After this patch; $ ls -la ../../../arch/x86/include/uapi/asm/errno.h -rw-rw-r--. 1 acme acme 31 Feb 20 12:42 ../../../arch/x86/include/uapi/asm/errno.h $ Check that that is still under tools/, i.e. hasn't escaped into the main kernel sources: $ cd ../../../arch/x86/include/uapi/asm/ $ pwd /home/acme/git/perf/tools/arch/x86/include/uapi/asm $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Igor Lubashev <ilubashe@akamai.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Li <liwei391@huawei.com> Link: http://lore.kernel.org/lkml/20200306071110.130202-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-05 23:11:08 -08:00
#include "../../../util/intel-pt.h"
#include <api/fs/fs.h>
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
#define KiB_MASK(x) (KiB(x) - 1)
#define MiB_MASK(x) (MiB(x) - 1)
#define INTEL_PT_PSB_PERIOD_NEAR 256
struct intel_pt_snapshot_ref {
void *ref_buf;
size_t ref_offset;
bool wrapped;
};
struct intel_pt_recording {
struct auxtrace_record itr;
struct perf_pmu *intel_pt_pmu;
int have_sched_switch;
struct evlist *evlist;
perf intel-pt: Do not default to recording all switch events On systems with many CPUs, recording extra context switch events can be excessive and unnecessary. Add perf config intel-pt.all-switch-events=false to control the behaviour. Example: # perf config intel-pt.all-switch-events=false # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.082 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 5 PERF_RECORD_SWITCH # perf config intel-pt.all-switch-events=true # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.102 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 180 PERF_RECORD_SWITCH_CPU_WIDE Committer testing: While doing a make -j28 allmodconfig: root@five:~# grep "model name" -m1 /proc/cpuinfo model name : Intel(R) Core(TM) i7-14700K root@five:~# root@five:~# perf config intel-pt.all-switch-events=false root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.019 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE root@five:~# root@five:~# perf config intel-pt.all-switch-events=true root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.047 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE SWITCH_CPU_WIDE events: 542 (96.4%) root@five:~# Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20250512093932.79854-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2025-05-12 12:39:31 +03:00
bool all_switch_events;
bool snapshot_mode;
bool snapshot_init_done;
size_t snapshot_size;
size_t snapshot_ref_buf_size;
int snapshot_ref_cnt;
struct intel_pt_snapshot_ref *snapshot_refs;
size_t priv_size;
};
perf pmu: Lazily compute default config The default config is computed during creation of the PMU and may do things like scanning sysfs, when the PMU may just be used as part of scanning. Change default_config to perf_event_attr_init_default, a callback that is used when a default config needs initializing. This avoids holding onto the memory for a perf_event_attr and copying. On a tigerlake laptop running the pmu-scan benchmark: Before: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 28.780 usec (+- 0.503 usec) Average PMU scanning took: 283.480 usec (+- 18.471 usec) Number of openat syscalls: 30,227 After: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 27.880 usec (+- 0.169 usec) Average PMU scanning took: 245.260 usec (+- 15.758 usec) Number of openat syscalls: 28,914 Over 3 runs it is a nearly 12% reduction in execution time and a 4.3% of openat calls. Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: James Clark <james.clark@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: John Garry <john.g.garry@oracle.com> Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231012175645.1849503-8-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2023-10-12 10:56:45 -07:00
static int intel_pt_parse_terms_with_default(const struct perf_pmu *pmu,
const char *str,
u64 *config)
{
struct parse_events_terms terms;
struct perf_event_attr attr = { .size = 0, };
int err;
parse_events_terms__init(&terms);
err = parse_events_terms(&terms, str, /*input=*/ NULL);
if (err)
goto out_free;
attr.config = *config;
err = perf_pmu__config_terms(pmu, &attr, &terms, /*zero=*/true, /*apply_hardcoded=*/false,
/*err=*/NULL);
if (err)
goto out_free;
*config = attr.config;
out_free:
parse_events_terms__exit(&terms);
return err;
}
perf pmu: Lazily compute default config The default config is computed during creation of the PMU and may do things like scanning sysfs, when the PMU may just be used as part of scanning. Change default_config to perf_event_attr_init_default, a callback that is used when a default config needs initializing. This avoids holding onto the memory for a perf_event_attr and copying. On a tigerlake laptop running the pmu-scan benchmark: Before: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 28.780 usec (+- 0.503 usec) Average PMU scanning took: 283.480 usec (+- 18.471 usec) Number of openat syscalls: 30,227 After: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 27.880 usec (+- 0.169 usec) Average PMU scanning took: 245.260 usec (+- 15.758 usec) Number of openat syscalls: 28,914 Over 3 runs it is a nearly 12% reduction in execution time and a 4.3% of openat calls. Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: James Clark <james.clark@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: John Garry <john.g.garry@oracle.com> Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231012175645.1849503-8-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2023-10-12 10:56:45 -07:00
static int intel_pt_parse_terms(const struct perf_pmu *pmu, const char *str, u64 *config)
{
*config = 0;
return intel_pt_parse_terms_with_default(pmu, str, config);
}
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
static u64 intel_pt_masked_bits(u64 mask, u64 bits)
{
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
const u64 top_bit = 1ULL << 63;
u64 res = 0;
int i;
for (i = 0; i < 64; i++) {
if (mask & top_bit) {
res <<= 1;
if (bits & top_bit)
res |= 1;
}
mask <<= 1;
bits <<= 1;
}
return res;
}
static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
struct evlist *evlist, u64 *res)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
{
struct evsel *evsel;
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
u64 mask;
*res = 0;
mask = perf_pmu__format_bits(intel_pt_pmu, str);
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
if (!mask)
return -EINVAL;
evlist__for_each_entry(evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
if (evsel->core.attr.type == intel_pt_pmu->type) {
*res = intel_pt_masked_bits(mask, evsel->core.attr.config);
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
return 0;
}
}
return -EINVAL;
}
static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
struct evlist *evlist)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
{
u64 val;
int err, topa_multiple_entries;
size_t psb_period;
if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
"%d", &topa_multiple_entries) != 1)
topa_multiple_entries = 0;
/*
* Use caps/topa_multiple_entries to indicate early hardware that had
* extra frequent PSBs.
*/
if (!topa_multiple_entries) {
psb_period = 256;
goto out;
}
err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
if (err)
val = 0;
psb_period = 1 << (val + 11);
out:
pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
return psb_period;
}
static int intel_pt_pick_bit(int bits, int target)
{
int pos, pick = -1;
for (pos = 0; bits; bits >>= 1, pos++) {
if (bits & 1) {
if (pos <= target || pick < 0)
pick = pos;
if (pos >= target)
break;
}
}
return pick;
}
perf pmu: Lazily compute default config The default config is computed during creation of the PMU and may do things like scanning sysfs, when the PMU may just be used as part of scanning. Change default_config to perf_event_attr_init_default, a callback that is used when a default config needs initializing. This avoids holding onto the memory for a perf_event_attr and copying. On a tigerlake laptop running the pmu-scan benchmark: Before: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 28.780 usec (+- 0.503 usec) Average PMU scanning took: 283.480 usec (+- 18.471 usec) Number of openat syscalls: 30,227 After: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 27.880 usec (+- 0.169 usec) Average PMU scanning took: 245.260 usec (+- 15.758 usec) Number of openat syscalls: 28,914 Over 3 runs it is a nearly 12% reduction in execution time and a 4.3% of openat calls. Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: James Clark <james.clark@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: John Garry <john.g.garry@oracle.com> Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231012175645.1849503-8-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2023-10-12 10:56:45 -07:00
static u64 intel_pt_default_config(const struct perf_pmu *intel_pt_pmu)
{
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
char buf[256];
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
int mtc, mtc_periods = 0, mtc_period;
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
int psb_cyc, psb_periods, psb_period;
int pos = 0;
u64 config;
char c;
int dirfd;
dirfd = perf_pmu__event_source_devices_fd();
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc", "%d",
&mtc) != 1)
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
mtc = 1;
if (mtc) {
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc_periods", "%x",
&mtc_periods) != 1)
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
mtc_periods = 0;
if (mtc_periods) {
mtc_period = intel_pt_pick_bit(mtc_periods, 3);
pos += scnprintf(buf + pos, sizeof(buf) - pos,
",mtc,mtc_period=%d", mtc_period);
}
}
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_cyc", "%d",
&psb_cyc) != 1)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
psb_cyc = 1;
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
if (psb_cyc && mtc_periods) {
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_periods", "%x",
&psb_periods) != 1)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
psb_periods = 0;
if (psb_periods) {
psb_period = intel_pt_pick_bit(psb_periods, 3);
pos += scnprintf(buf + pos, sizeof(buf) - pos,
",psb_period=%d", psb_period);
}
}
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/branch", "%c", &c) == 1)
pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
intel_pt_parse_terms(intel_pt_pmu, buf, &config);
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
close(dirfd);
return config;
}
static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
unsigned long long snapshot_size = 0;
char *endptr;
if (str) {
snapshot_size = strtoull(str, &endptr, 0);
if (*endptr || snapshot_size > SIZE_MAX)
return -1;
}
opts->auxtrace_snapshot_mode = true;
opts->auxtrace_snapshot_size = snapshot_size;
ptr->snapshot_size = snapshot_size;
return 0;
}
perf pmu: Lazily compute default config The default config is computed during creation of the PMU and may do things like scanning sysfs, when the PMU may just be used as part of scanning. Change default_config to perf_event_attr_init_default, a callback that is used when a default config needs initializing. This avoids holding onto the memory for a perf_event_attr and copying. On a tigerlake laptop running the pmu-scan benchmark: Before: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 28.780 usec (+- 0.503 usec) Average PMU scanning took: 283.480 usec (+- 18.471 usec) Number of openat syscalls: 30,227 After: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 27.880 usec (+- 0.169 usec) Average PMU scanning took: 245.260 usec (+- 15.758 usec) Number of openat syscalls: 28,914 Over 3 runs it is a nearly 12% reduction in execution time and a 4.3% of openat calls. Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: James Clark <james.clark@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: John Garry <john.g.garry@oracle.com> Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231012175645.1849503-8-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2023-10-12 10:56:45 -07:00
void intel_pt_pmu_default_config(const struct perf_pmu *intel_pt_pmu,
struct perf_event_attr *attr)
{
perf pmu: Lazily compute default config The default config is computed during creation of the PMU and may do things like scanning sysfs, when the PMU may just be used as part of scanning. Change default_config to perf_event_attr_init_default, a callback that is used when a default config needs initializing. This avoids holding onto the memory for a perf_event_attr and copying. On a tigerlake laptop running the pmu-scan benchmark: Before: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 28.780 usec (+- 0.503 usec) Average PMU scanning took: 283.480 usec (+- 18.471 usec) Number of openat syscalls: 30,227 After: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 27.880 usec (+- 0.169 usec) Average PMU scanning took: 245.260 usec (+- 15.758 usec) Number of openat syscalls: 28,914 Over 3 runs it is a nearly 12% reduction in execution time and a 4.3% of openat calls. Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: James Clark <james.clark@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: John Garry <john.g.garry@oracle.com> Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231012175645.1849503-8-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2023-10-12 10:56:45 -07:00
static u64 config;
static bool initialized;
perf pmu: Lazily compute default config The default config is computed during creation of the PMU and may do things like scanning sysfs, when the PMU may just be used as part of scanning. Change default_config to perf_event_attr_init_default, a callback that is used when a default config needs initializing. This avoids holding onto the memory for a perf_event_attr and copying. On a tigerlake laptop running the pmu-scan benchmark: Before: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 28.780 usec (+- 0.503 usec) Average PMU scanning took: 283.480 usec (+- 18.471 usec) Number of openat syscalls: 30,227 After: Running 'internals/pmu-scan' benchmark: Computing performance of sysfs PMU event scan for 100 times Average core PMU scanning took: 27.880 usec (+- 0.169 usec) Average PMU scanning took: 245.260 usec (+- 15.758 usec) Number of openat syscalls: 28,914 Over 3 runs it is a nearly 12% reduction in execution time and a 4.3% of openat calls. Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: James Clark <james.clark@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: John Garry <john.g.garry@oracle.com> Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231012175645.1849503-8-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2023-10-12 10:56:45 -07:00
if (!initialized) {
config = intel_pt_default_config(intel_pt_pmu);
initialized = true;
}
attr->config = config;
}
static const char *intel_pt_find_filter(struct evlist *evlist,
struct perf_pmu *intel_pt_pmu)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
if (evsel->core.attr.type == intel_pt_pmu->type)
return evsel->filter;
}
return NULL;
}
static size_t intel_pt_filter_bytes(const char *filter)
{
size_t len = filter ? strlen(filter) : 0;
return len ? roundup(len + 1, 8) : 0;
}
static size_t
intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
intel_pt_filter_bytes(filter);
ptr->priv_size += sizeof(u64); /* Cap Event Trace */
return ptr->priv_size;
}
static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
{
unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
__get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
*n = ebx;
*d = eax;
}
static int intel_pt_info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct perf_record_auxtrace_info *auxtrace_info,
size_t priv_size)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
struct perf_event_mmap_page *pc;
struct perf_tsc_conversion tc = { .time_mult = 0, };
bool cap_user_time_zero = false, per_cpu_mmaps;
u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
unsigned long max_non_turbo_ratio;
size_t filter_str_len;
const char *filter;
int event_trace;
__u64 *info;
int err;
if (priv_size != ptr->priv_size)
return -EINVAL;
intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
intel_pt_parse_terms(intel_pt_pmu, "noretcomp", &noretcomp_bit);
intel_pt_parse_terms(intel_pt_pmu, "mtc", &mtc_bit);
mtc_freq_bits = perf_pmu__format_bits(intel_pt_pmu, "mtc_period");
intel_pt_parse_terms(intel_pt_pmu, "cyc", &cyc_bit);
intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
"%lu", &max_non_turbo_ratio) != 1)
max_non_turbo_ratio = 0;
if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace",
"%d", &event_trace) != 1)
event_trace = 0;
filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
filter_str_len = filter ? strlen(filter) : 0;
if (!session->evlist->core.nr_mmaps)
return -EINVAL;
pc = session->evlist->mmap[0].core.base;
if (pc) {
err = perf_read_tsc_conversion(pc, &tc);
if (err) {
if (err != -EOPNOTSUPP)
return err;
} else {
cap_user_time_zero = tc.time_mult != 0;
}
if (!cap_user_time_zero)
ui__warning("Intel Processor Trace: TSC not available\n");
}
perf intel-pt/intel-bts: Switch perf_cpu_map__has_any_cpu_or_is_empty use Switch perf_cpu_map__has_any_cpu_or_is_empty() to perf_cpu_map__is_any_cpu_or_is_empty() as a CPU map may contain CPUs as well as the dummy event and perf_cpu_map__is_any_cpu_or_is_empty() is a more correct alternative. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andrew Jones <ajones@ventanamicro.com> Cc: André Almeida <andrealmeid@igalia.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Atish Patra <atishp@rivosinc.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Yang Li <yang.lee@linux.alibaba.com> Cc: Yanteng Si <siyanteng@loongson.cn> Link: https://lore.kernel.org/r/20240202234057.2085863-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-02-02 15:40:53 -08:00
per_cpu_mmaps = !perf_cpu_map__is_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
if (filter_str_len) {
size_t len = intel_pt_filter_bytes(filter);
strncpy((char *)info, filter, len);
info += len >> 3;
}
*info++ = event_trace;
return 0;
}
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
#ifdef HAVE_LIBTRACEEVENT
static int intel_pt_track_switches(struct evlist *evlist)
{
const char *sched_switch = "sched:sched_switch";
struct evsel *evsel;
int err;
if (!evlist__can_select_event(evlist, sched_switch))
return -EPERM;
evsel = evlist__add_sched_switch(evlist, true);
if (IS_ERR(evsel)) {
err = PTR_ERR(evsel);
pr_debug2("%s: failed to create %s, error = %d\n",
__func__, sched_switch, err);
return err;
}
evsel->immediate = true;
return 0;
}
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
#endif
static bool intel_pt_exclude_guest(void)
{
int pt_mode;
if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
pt_mode = 0;
return pt_mode == 1;
}
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
static void intel_pt_valid_str(char *str, size_t len, u64 valid)
{
unsigned int val, last = 0, state = 1;
int p = 0;
str[0] = '\0';
for (val = 0; val <= 64; val++, valid >>= 1) {
if (valid & 1) {
last = val;
switch (state) {
case 0:
p += scnprintf(str + p, len - p, ",");
/* Fall through */
case 1:
p += scnprintf(str + p, len - p, "%u", val);
state = 2;
break;
case 2:
state = 3;
break;
case 3:
state = 4;
break;
default:
break;
}
} else {
switch (state) {
case 3:
p += scnprintf(str + p, len - p, ",%u", last);
state = 0;
break;
case 4:
p += scnprintf(str + p, len - p, "-%u", last);
state = 0;
break;
default:
break;
}
if (state != 1)
state = 0;
}
}
}
static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, int dirfd,
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
const char *caps, const char *name,
const char *supported, u64 config)
{
char valid_str[256];
unsigned int shift;
unsigned long long valid;
u64 bits;
int ok;
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, caps, "%llx", &valid) != 1)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
valid = 0;
if (supported &&
perf_pmu__scan_file_at(intel_pt_pmu, dirfd, supported, "%d", &ok) == 1 && !ok)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
valid = 0;
valid |= 1;
bits = perf_pmu__format_bits(intel_pt_pmu, name);
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
config &= bits;
for (shift = 0; bits && !(bits & 1); shift++)
bits >>= 1;
config >>= shift;
if (config > 63)
goto out_err;
if (valid & (1 << config))
return 0;
out_err:
intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
pr_err("Invalid %s for %s. Valid values are: %s\n",
name, INTEL_PT_PMU_NAME, valid_str);
return -EINVAL;
}
static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
struct evsel *evsel)
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
{
int err, dirfd;
char c;
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
if (!evsel)
return 0;
dirfd = perf_pmu__event_source_devices_fd();
if (dirfd < 0)
return dirfd;
/*
* If supported, force pass-through config term (pt=1) even if user
* sets pt=0, which avoids senseless kernel errors.
*/
if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
!(evsel->core.attr.config & 1)) {
pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
evsel->core.attr.config |= 1;
}
err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/cycle_thresholds",
perf tools: Add Intel PT support for using CYC packets CYC packets are a new Intel PT feature. CYC packets provide even finer grain timestamp information than MTC and TSC packets. A CYC packet contains the number of CPU cycles since the last CYC packet. Unlike MTC and TSC packets, CYC packets are only sent when another packet is also sent. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. CYC packets can be requested using a PMU config term e.g. perf record -e intel_pt/cyc/u sleep 1 The frequency of CYC packets can also be specified. e.g. perf record -e intel_pt/cyc,cyc_thresh=2/u sleep 1 CYC packets are not requested by default. Valid cyc_thresh values are given by: /sys/bus/event_source/devices/intel_pt/caps/cycle_thresholds which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value represents the minimum number of CPU cycles that must have passed before a CYC packet can be sent. The number of CPU cycles is: 2 ^ (value - 1) e.g. value 4 means 8 CPU cycles must pass before a CYC packet can be sent. Note a CYC packet is still only sent when another packet is sent, not at, e.g. every 8 CPU cycles. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/cyc,cyc_thresh=15/u uname Invalid cyc_thresh for intel_pt. Valid values are: 0-12 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-24-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:58 +03:00
"cyc_thresh", "caps/psb_cyc",
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
evsel->core.attr.config);
perf tools: Add Intel PT support for using CYC packets CYC packets are a new Intel PT feature. CYC packets provide even finer grain timestamp information than MTC and TSC packets. A CYC packet contains the number of CPU cycles since the last CYC packet. Unlike MTC and TSC packets, CYC packets are only sent when another packet is also sent. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. CYC packets can be requested using a PMU config term e.g. perf record -e intel_pt/cyc/u sleep 1 The frequency of CYC packets can also be specified. e.g. perf record -e intel_pt/cyc,cyc_thresh=2/u sleep 1 CYC packets are not requested by default. Valid cyc_thresh values are given by: /sys/bus/event_source/devices/intel_pt/caps/cycle_thresholds which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value represents the minimum number of CPU cycles that must have passed before a CYC packet can be sent. The number of CPU cycles is: 2 ^ (value - 1) e.g. value 4 means 8 CPU cycles must pass before a CYC packet can be sent. Note a CYC packet is still only sent when another packet is sent, not at, e.g. every 8 CPU cycles. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/cyc,cyc_thresh=15/u uname Invalid cyc_thresh for intel_pt. Valid values are: 0-12 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-24-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:58 +03:00
if (err)
goto out;
perf tools: Add Intel PT support for using CYC packets CYC packets are a new Intel PT feature. CYC packets provide even finer grain timestamp information than MTC and TSC packets. A CYC packet contains the number of CPU cycles since the last CYC packet. Unlike MTC and TSC packets, CYC packets are only sent when another packet is also sent. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. CYC packets can be requested using a PMU config term e.g. perf record -e intel_pt/cyc/u sleep 1 The frequency of CYC packets can also be specified. e.g. perf record -e intel_pt/cyc,cyc_thresh=2/u sleep 1 CYC packets are not requested by default. Valid cyc_thresh values are given by: /sys/bus/event_source/devices/intel_pt/caps/cycle_thresholds which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value represents the minimum number of CPU cycles that must have passed before a CYC packet can be sent. The number of CPU cycles is: 2 ^ (value - 1) e.g. value 4 means 8 CPU cycles must pass before a CYC packet can be sent. Note a CYC packet is still only sent when another packet is sent, not at, e.g. every 8 CPU cycles. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/cyc,cyc_thresh=15/u uname Invalid cyc_thresh for intel_pt. Valid values are: 0-12 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-24-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:58 +03:00
err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/mtc_periods",
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
"mtc_period", "caps/mtc",
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
evsel->core.attr.config);
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
if (err)
goto out;
perf tools: Add Intel PT support for using MTC packets MTC packets are a new Intel PT feature. MTC packets provide finer grain timestamp information than TSC packets. Support for this feature is indicated by: /sys/bus/event_source/devices/intel_pt/caps/mtc which contains "1" if the feature is supported and "0" otherwise. MTC packets can be requested using a PMU config term e.g. perf record -e intel_pt/mtc/u sleep 1 The frequency of MTC packets can also be specified. e.g. perf record -e intel_pt/mtc,mtc_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/mtc_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the MTC frequency as: CTC-frequency / (2 ^ value) e.g. value 3 means one eighth of CTC-frequency Where CTC is the hardware crystal clock, the frequency of which can be related to TSC via values provided in cpuid leaf 0x15. If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/mtc_period=15/u uname Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information refer to the June 2015 or later Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-22-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:56 +03:00
err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/psb_periods",
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
"psb_period", "caps/psb_cyc",
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
evsel->core.attr.config);
out:
close(dirfd);
return err;
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
}
static void intel_pt_min_max_sample_sz(struct evlist *evlist,
size_t *min_sz, size_t *max_sz)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
size_t sz = evsel->core.attr.aux_sample_size;
if (!sz)
continue;
if (min_sz && (sz < *min_sz || !*min_sz))
*min_sz = sz;
if (max_sz && sz > *max_sz)
*max_sz = sz;
}
}
/*
* Currently, there is not enough information to disambiguate different PEBS
* events, so only allow one.
*/
static bool intel_pt_too_many_aux_output(struct evlist *evlist)
{
struct evsel *evsel;
int aux_output_cnt = 0;
evlist__for_each_entry(evlist, evsel)
aux_output_cnt += !!evsel->core.attr.aux_output;
if (aux_output_cnt > 1) {
pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n");
return true;
}
return false;
}
static int intel_pt_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false;
struct evsel *evsel, *intel_pt_evsel = NULL;
perf evlist: Rename cpus to user_requested_cpus evlist contains cpus and all_cpus. all_cpus is the union of the cpu maps of all evsels. For non-task targets, cpus is set to be cpus requested from the command line, defaulting to all online cpus if no cpus are specified. For an uncore event, all_cpus may be just CPU 0 or every online CPU. This causes all_cpus to have fewer values than the cpus variable which is confusing given the 'all' in the name. To try to make the behavior clearer, rename cpus to user_requested_cpus and add comments on the two struct variables. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Antonov <alexander.antonov@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: German Gomez <german.gomez@arm.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Fastabend <john.fastabend@gmail.com> Cc: John Garry <john.garry@huawei.com> Cc: KP Singh <kpsingh@kernel.org> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Martin KaFai Lau <kafai@fb.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Song Liu <songliubraving@fb.com> Cc: Stephane Eranian <eranian@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Yonghong Song <yhs@fb.com> Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: netdev@vger.kernel.org Link: http://lore.kernel.org/lkml/20220328232648.2127340-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-28 16:26:44 -07:00
const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
u64 tsc_bit;
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
int err;
ptr->evlist = evlist;
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
evlist__for_each_entry(evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
if (evsel->core.attr.type == intel_pt_pmu->type) {
if (intel_pt_evsel) {
pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
return -EINVAL;
}
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
evsel->core.attr.exclude_guest = intel_pt_exclude_guest();
evsel->no_aux_samples = true;
evsel->needs_auxtrace_mmap = true;
intel_pt_evsel = evsel;
opts->full_auxtrace = true;
}
}
if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
return -EINVAL;
}
if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) {
pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n");
return -EINVAL;
}
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
return -EINVAL;
}
if (intel_pt_too_many_aux_output(evlist))
return -EINVAL;
if (!opts->full_auxtrace)
return 0;
if (opts->auxtrace_sample_mode)
evsel__set_config_if_unset(intel_pt_pmu, intel_pt_evsel,
"psb_period", 0);
perf tools: Add Intel PT support for PSB periods The PSB packet is a synchronization packet that provides a starting point for decoding or recovery from errors. This patch adds support for a new Intel PT feature that allows the frequency of PSB packets to be specified. Support for this feature is indicated by /sys/bus/event_source/devices/intel_pt/caps/psb_cyc which contains "1" if the feature is supported and "0" otherwise. The PSB period can be specified as a PMU config term e.g. perf record -e intel_pt/psb_period=2/u sleep 1 The default value is 3 or the nearest lower value that is supported. 0 is always supported. Valid values are given by: /sys/bus/event_source/devices/intel_pt/caps/psb_periods which contains a hexadecimal value, the bits of which represent valid values e.g. bit 2 set means value 2 is valid. The value is converted to the approximate number of trace bytes between PSB packets as: 2 ^ (value + 11) e.g. value 3 means 16KiB bytes between PSBs If an invalid value is entered, the error message will give a list of valid values e.g. $ perf record -e intel_pt/psb_period=15/u uname Invalid psb_period for intel_pt. Valid values are: 0-5 tools/perf/Documentation/intel-pt.txt is updated in a later patch as there are a number of new features being added. For more information about PSB periods refer to the Intel 64 and IA-32 Architectures SDM Chapter 36 Intel Processor Trace from June 2015 or later. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1437150840-31811-18-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:52 +03:00
err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
if (err)
return err;
/* Set default sizes for snapshot mode */
if (opts->auxtrace_snapshot_mode) {
size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
} else if (!opts->auxtrace_mmap_pages && !privileged &&
opts->mmap_pages == UINT_MAX) {
opts->mmap_pages = KiB(256) / page_size;
}
if (!opts->auxtrace_snapshot_size)
opts->auxtrace_snapshot_size =
opts->auxtrace_mmap_pages * (size_t)page_size;
if (!opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_snapshot_size;
sz = round_up(sz, page_size) / page_size;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
if (opts->auxtrace_snapshot_size >
opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
opts->auxtrace_snapshot_size,
opts->auxtrace_mmap_pages * (size_t)page_size);
return -EINVAL;
}
if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
return -EINVAL;
}
pr_debug2("Intel PT snapshot size: %zu\n",
opts->auxtrace_snapshot_size);
if (psb_period &&
opts->auxtrace_snapshot_size <= psb_period +
INTEL_PT_PSB_PERIOD_NEAR)
ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
opts->auxtrace_snapshot_size, psb_period);
}
/* Set default sizes for sample mode */
if (opts->auxtrace_sample_mode) {
size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
size_t min_sz = 0, max_sz = 0;
intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz);
if (!opts->auxtrace_mmap_pages && !privileged &&
opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
if (!opts->auxtrace_mmap_pages) {
size_t sz = round_up(max_sz, page_size) / page_size;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
max_sz,
opts->auxtrace_mmap_pages * (size_t)page_size);
return -EINVAL;
}
pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
min_sz, max_sz);
if (psb_period &&
min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
min_sz, psb_period);
}
/* Set default sizes for full trace mode */
if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
}
/* Validate auxtrace_mmap_pages */
if (opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
size_t min_sz;
if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode)
min_sz = KiB(4);
else
min_sz = KiB(8);
if (sz < min_sz || !is_power_of_2(sz)) {
pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
min_sz / 1024);
return -EINVAL;
}
}
if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
}
intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
have_timing_info = true;
else
have_timing_info = false;
/*
* Per-cpu recording needs sched_switch events to distinguish different
* threads.
*/
perf intel-pt/intel-bts: Switch perf_cpu_map__has_any_cpu_or_is_empty use Switch perf_cpu_map__has_any_cpu_or_is_empty() to perf_cpu_map__is_any_cpu_or_is_empty() as a CPU map may contain CPUs as well as the dummy event and perf_cpu_map__is_any_cpu_or_is_empty() is a more correct alternative. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andrew Jones <ajones@ventanamicro.com> Cc: André Almeida <andrealmeid@igalia.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Atish Patra <atishp@rivosinc.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Yang Li <yang.lee@linux.alibaba.com> Cc: Yanteng Si <siyanteng@loongson.cn> Link: https://lore.kernel.org/r/20240202234057.2085863-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-02-02 15:40:53 -08:00
if (have_timing_info && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) &&
!record_opts__no_switch_events(opts)) {
if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) &&
!target__has_task(&opts->target);
perf intel-pt: Do not default to recording all switch events On systems with many CPUs, recording extra context switch events can be excessive and unnecessary. Add perf config intel-pt.all-switch-events=false to control the behaviour. Example: # perf config intel-pt.all-switch-events=false # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.082 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 5 PERF_RECORD_SWITCH # perf config intel-pt.all-switch-events=true # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.102 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 180 PERF_RECORD_SWITCH_CPU_WIDE Committer testing: While doing a make -j28 allmodconfig: root@five:~# grep "model name" -m1 /proc/cpuinfo model name : Intel(R) Core(TM) i7-14700K root@five:~# root@five:~# perf config intel-pt.all-switch-events=false root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.019 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE root@five:~# root@five:~# perf config intel-pt.all-switch-events=true root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.047 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE SWITCH_CPU_WIDE events: 542 (96.4%) root@five:~# Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20250512093932.79854-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2025-05-12 12:39:31 +03:00
if (ptr->all_switch_events && !cpu_wide && perf_can_record_cpu_wide()) {
struct evsel *switch_evsel;
switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
if (!switch_evsel)
return -ENOMEM;
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
switch_evsel->core.attr.context_switch = 1;
switch_evsel->immediate = true;
evsel__set_sample_bit(switch_evsel, TID);
evsel__set_sample_bit(switch_evsel, TIME);
evsel__set_sample_bit(switch_evsel, CPU);
evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
opts->record_switch_events = false;
ptr->have_sched_switch = 3;
} else {
opts->record_switch_events = true;
need_immediate = true;
if (cpu_wide)
ptr->have_sched_switch = 3;
else
ptr->have_sched_switch = 2;
}
} else {
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
#ifdef HAVE_LIBTRACEEVENT
err = intel_pt_track_switches(evlist);
if (err == -EPERM)
pr_debug2("Unable to select sched:sched_switch\n");
else if (err)
return err;
else
ptr->have_sched_switch = 1;
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-05 14:59:39 -08:00
#endif
}
}
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 15:19:20 +03:00
if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel &&
perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
opts->text_poke = true;
if (intel_pt_evsel) {
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
evlist__to_front(evlist, intel_pt_evsel);
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
perf intel-pt/intel-bts: Switch perf_cpu_map__has_any_cpu_or_is_empty use Switch perf_cpu_map__has_any_cpu_or_is_empty() to perf_cpu_map__is_any_cpu_or_is_empty() as a CPU map may contain CPUs as well as the dummy event and perf_cpu_map__is_any_cpu_or_is_empty() is a more correct alternative. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andrew Jones <ajones@ventanamicro.com> Cc: André Almeida <andrealmeid@igalia.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Atish Patra <atishp@rivosinc.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Yang Li <yang.lee@linux.alibaba.com> Cc: Yanteng Si <siyanteng@loongson.cn> Link: https://lore.kernel.org/r/20240202234057.2085863-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-02-02 15:40:53 -08:00
if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_pt_evsel, CPU);
}
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
bool need_system_wide_tracking;
struct evsel *tracking_evsel;
/*
* User space tasks can migrate between CPUs, so when tracing
* selected CPUs, sideband for all CPUs is still needed.
*/
need_system_wide_tracking = opts->target.cpu_list &&
!intel_pt_evsel->core.attr.exclude_user;
tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
if (!tracking_evsel)
return -ENOMEM;
evlist__set_tracking_event(evlist, tracking_evsel);
if (need_immediate)
tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */
perf intel-pt/intel-bts: Switch perf_cpu_map__has_any_cpu_or_is_empty use Switch perf_cpu_map__has_any_cpu_or_is_empty() to perf_cpu_map__is_any_cpu_or_is_empty() as a CPU map may contain CPUs as well as the dummy event and perf_cpu_map__is_any_cpu_or_is_empty() is a more correct alternative. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andrew Jones <ajones@ventanamicro.com> Cc: André Almeida <andrealmeid@igalia.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Atish Patra <atishp@rivosinc.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Yang Li <yang.lee@linux.alibaba.com> Cc: Yanteng Si <siyanteng@loongson.cn> Link: https://lore.kernel.org/r/20240202234057.2085863-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-02-02 15:40:53 -08:00
if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */
evsel__set_sample_bit(tracking_evsel, CPU);
}
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
}
/*
* Warn the user when we do not have enough information to decode i.e.
* per-cpu with no sched_switch (except workload-only).
*/
perf intel-pt/intel-bts: Switch perf_cpu_map__has_any_cpu_or_is_empty use Switch perf_cpu_map__has_any_cpu_or_is_empty() to perf_cpu_map__is_any_cpu_or_is_empty() as a CPU map may contain CPUs as well as the dummy event and perf_cpu_map__is_any_cpu_or_is_empty() is a more correct alternative. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andrew Jones <ajones@ventanamicro.com> Cc: André Almeida <andrealmeid@igalia.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Atish Patra <atishp@rivosinc.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Yang Li <yang.lee@linux.alibaba.com> Cc: Yanteng Si <siyanteng@loongson.cn> Link: https://lore.kernel.org/r/20240202234057.2085863-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2024-02-02 15:40:53 -08:00
if (!ptr->have_sched_switch && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) &&
!target__none(&opts->target) &&
!intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
return 0;
}
static int intel_pt_snapshot_start(struct auxtrace_record *itr)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct evsel *evsel;
evlist__for_each_entry(ptr->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
return evsel__disable(evsel);
}
return -EINVAL;
}
static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct evsel *evsel;
evlist__for_each_entry(ptr->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:29 +02:00
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
return evsel__enable(evsel);
}
return -EINVAL;
}
static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
{
const size_t sz = sizeof(struct intel_pt_snapshot_ref);
int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
struct intel_pt_snapshot_ref *refs;
if (!new_cnt)
new_cnt = 16;
while (new_cnt <= idx)
new_cnt *= 2;
refs = calloc(new_cnt, sz);
if (!refs)
return -ENOMEM;
memcpy(refs, ptr->snapshot_refs, cnt * sz);
ptr->snapshot_refs = refs;
ptr->snapshot_ref_cnt = new_cnt;
return 0;
}
static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
{
int i;
for (i = 0; i < ptr->snapshot_ref_cnt; i++)
zfree(&ptr->snapshot_refs[i].ref_buf);
zfree(&ptr->snapshot_refs);
}
static void intel_pt_recording_free(struct auxtrace_record *itr)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
intel_pt_free_snapshot_refs(ptr);
free(ptr);
}
static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
size_t snapshot_buf_size)
{
size_t ref_buf_size = ptr->snapshot_ref_buf_size;
void *ref_buf;
ref_buf = zalloc(ref_buf_size);
if (!ref_buf)
return -ENOMEM;
ptr->snapshot_refs[idx].ref_buf = ref_buf;
ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
return 0;
}
static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
size_t snapshot_buf_size)
{
const size_t max_size = 256 * 1024;
size_t buf_size = 0, psb_period;
if (ptr->snapshot_size <= 64 * 1024)
return 0;
psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
if (psb_period)
buf_size = psb_period * 2;
if (!buf_size || buf_size > max_size)
buf_size = max_size;
if (buf_size >= snapshot_buf_size)
return 0;
if (buf_size >= ptr->snapshot_size / 2)
return 0;
return buf_size;
}
static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
size_t snapshot_buf_size)
{
if (ptr->snapshot_init_done)
return 0;
ptr->snapshot_init_done = true;
ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
snapshot_buf_size);
return 0;
}
/**
* intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
* @buf1: first buffer
* @compare_size: number of bytes to compare
* @buf2: second buffer (a circular buffer)
* @offs2: offset in second buffer
* @buf2_size: size of second buffer
*
* The comparison allows for the possibility that the bytes to compare in the
* circular buffer are not contiguous. It is assumed that @compare_size <=
* @buf2_size. This function returns %false if the bytes are identical, %true
* otherwise.
*/
static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
void *buf2, size_t offs2, size_t buf2_size)
{
size_t end2 = offs2 + compare_size, part_size;
if (end2 <= buf2_size)
return memcmp(buf1, buf2 + offs2, compare_size);
part_size = end2 - buf2_size;
if (memcmp(buf1, buf2 + offs2, part_size))
return true;
compare_size -= part_size;
return memcmp(buf1 + part_size, buf2, compare_size);
}
static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
size_t ref_size, size_t buf_size,
void *data, size_t head)
{
size_t ref_end = ref_offset + ref_size;
if (ref_end > buf_size) {
if (head > ref_offset || head < ref_end - buf_size)
return true;
} else if (head > ref_offset && head < ref_end) {
return true;
}
return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
buf_size);
}
static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
void *data, size_t head)
{
if (head >= ref_size) {
memcpy(ref_buf, data + head - ref_size, ref_size);
} else {
memcpy(ref_buf, data, head);
ref_size -= head;
memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
}
}
static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
struct auxtrace_mmap *mm, unsigned char *data,
u64 head)
{
struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
bool wrapped;
wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
ptr->snapshot_ref_buf_size, mm->len,
data, head);
intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
data, head);
return wrapped;
}
static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
{
int i, a, b;
b = buf_size >> 3;
a = b - 512;
if (a < 0)
a = 0;
for (i = a; i < b; i++) {
if (data[i])
return true;
}
return false;
}
static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm, unsigned char *data,
u64 *head, u64 *old)
{
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
bool wrapped;
int err;
pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
__func__, idx, (size_t)*old, (size_t)*head);
err = intel_pt_snapshot_init(ptr, mm->len);
if (err)
goto out_err;
if (idx >= ptr->snapshot_ref_cnt) {
err = intel_pt_alloc_snapshot_refs(ptr, idx);
if (err)
goto out_err;
}
if (ptr->snapshot_ref_buf_size) {
if (!ptr->snapshot_refs[idx].ref_buf) {
err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
if (err)
goto out_err;
}
wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
} else {
wrapped = ptr->snapshot_refs[idx].wrapped;
if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
ptr->snapshot_refs[idx].wrapped = true;
wrapped = true;
}
}
/*
* In full trace mode 'head' continually increases. However in snapshot
* mode 'head' is an offset within the buffer. Here 'old' and 'head'
* are adjusted to match the full trace case which expects that 'old' is
* always less than 'head'.
*/
if (wrapped) {
*old = *head;
*head += mm->len;
} else {
if (mm->mask)
*old &= mm->mask;
else
*old %= mm->len;
if (*old > *head)
*head += mm->len;
}
pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
__func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
return 0;
out_err:
pr_err("%s: failed, error %d\n", __func__, err);
return err;
}
static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
{
return rdtsc();
}
perf intel-pt: Do not default to recording all switch events On systems with many CPUs, recording extra context switch events can be excessive and unnecessary. Add perf config intel-pt.all-switch-events=false to control the behaviour. Example: # perf config intel-pt.all-switch-events=false # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.082 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 5 PERF_RECORD_SWITCH # perf config intel-pt.all-switch-events=true # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.102 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 180 PERF_RECORD_SWITCH_CPU_WIDE Committer testing: While doing a make -j28 allmodconfig: root@five:~# grep "model name" -m1 /proc/cpuinfo model name : Intel(R) Core(TM) i7-14700K root@five:~# root@five:~# perf config intel-pt.all-switch-events=false root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.019 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE root@five:~# root@five:~# perf config intel-pt.all-switch-events=true root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.047 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE SWITCH_CPU_WIDE events: 542 (96.4%) root@five:~# Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20250512093932.79854-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2025-05-12 12:39:31 +03:00
static int intel_pt_perf_config(const char *var, const char *value, void *data)
{
struct intel_pt_recording *ptr = data;
if (!strcmp(var, "intel-pt.all-switch-events"))
ptr->all_switch_events = perf_config_bool(var, value);
return 0;
}
struct auxtrace_record *intel_pt_recording_init(int *err)
{
perf pmu: Separate pmu and pmus Separate and hide the pmus list in pmus.[ch]. Move pmus functionality out of pmu.[ch] into pmus.[ch] renaming pmus functions which were prefixed perf_pmu__ to perf_pmus__. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-28-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 00:22:03 -07:00
struct perf_pmu *intel_pt_pmu = perf_pmus__find(INTEL_PT_PMU_NAME);
struct intel_pt_recording *ptr;
if (!intel_pt_pmu)
return NULL;
if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
*err = -errno;
return NULL;
}
ptr = zalloc(sizeof(struct intel_pt_recording));
if (!ptr) {
*err = -ENOMEM;
return NULL;
}
perf intel-pt: Do not default to recording all switch events On systems with many CPUs, recording extra context switch events can be excessive and unnecessary. Add perf config intel-pt.all-switch-events=false to control the behaviour. Example: # perf config intel-pt.all-switch-events=false # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.082 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 5 PERF_RECORD_SWITCH # perf config intel-pt.all-switch-events=true # perf record -eintel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.102 MB perf.data ] # perf script -D | grep PERF_RECORD_SWITCH | awk '{print $5}' | uniq -c 180 PERF_RECORD_SWITCH_CPU_WIDE Committer testing: While doing a make -j28 allmodconfig: root@five:~# grep "model name" -m1 /proc/cpuinfo model name : Intel(R) Core(TM) i7-14700K root@five:~# root@five:~# perf config intel-pt.all-switch-events=false root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.019 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE root@five:~# root@five:~# perf config intel-pt.all-switch-events=true root@five:~# perf record -e intel_pt//u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.047 MB perf.data ] root@five:~# perf report --stats | grep SWITCH_CPU_WIDE SWITCH_CPU_WIDE events: 542 (96.4%) root@five:~# Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20250512093932.79854-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2025-05-12 12:39:31 +03:00
perf_config(intel_pt_perf_config, ptr);
ptr->intel_pt_pmu = intel_pt_pmu;
ptr->itr.recording_options = intel_pt_recording_options;
ptr->itr.info_priv_size = intel_pt_info_priv_size;
ptr->itr.info_fill = intel_pt_info_fill;
ptr->itr.free = intel_pt_recording_free;
ptr->itr.snapshot_start = intel_pt_snapshot_start;
ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
ptr->itr.find_snapshot = intel_pt_find_snapshot;
ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
ptr->itr.reference = intel_pt_reference;
ptr->itr.read_finish = auxtrace_record__read_finish;
/*
* Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
* should give at least 1 PSB per sample.
*/
ptr->itr.default_aux_sample_size = 4096;
return &ptr->itr;
}