linux/tools/perf/util/affinity.c
Ian Rogers 588d22b404 perf test: Expand user space event reading (rdpmc) tests
Test that disabling rdpmc support via /sys/bus/event_source/cpu*/rdpmc
disables reading in the mmap (libperf read support will fallback to
using a system call).
Test all hybrid PMUs support rdpmc.
Ensure hybrid PMUs use the correct CPU to rdpmc the correct
event. Previously the test would open cycles or instructions with no
extended type then rdpmc it on whatever CPU. This could fail/skip due
to which CPU the test was scheduled upon.

Signed-off-by: Ian Rogers <irogers@google.com>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20250614004528.1652860-1-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
2025-06-20 13:14:07 -07:00

103 lines
2.4 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Manage affinity to optimize IPIs inside the kernel perf API. */
#define _GNU_SOURCE 1
#include <sched.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include "perf.h"
#include "cpumap.h"
#include "affinity.h"
static int get_cpu_set_size(void)
{
int sz = cpu__max_cpu().cpu + 8 - 1;
/*
* sched_getaffinity doesn't like masks smaller than the kernel.
* Hopefully that's big enough.
*/
if (sz < 4096)
sz = 4096;
return sz / 8;
}
int affinity__setup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus)
return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) {
zfree(&a->orig_cpus);
return -1;
}
bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
a->changed = false;
return 0;
}
/*
* perf_event_open does an IPI internally to the target CPU.
* It is more efficient to change perf's affinity to the target
* CPU and then set up all events on that CPU, so we amortize
* CPU communication.
*/
void affinity__set(struct affinity *a, int cpu)
{
int cpu_set_size = get_cpu_set_size();
/*
* Return:
* - if cpu is -1
* - restrict out of bound access to sched_cpus
*/
if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
return;
a->changed = true;
__set_bit(cpu, a->sched_cpus);
/*
* We ignore errors because affinity is just an optimization.
* This could happen for example with isolated CPUs or cpusets.
* In this case the IPIs inside the kernel's perf API still work.
*/
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
__clear_bit(cpu, a->sched_cpus);
}
static void __affinity__cleanup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
if (a->changed)
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
zfree(&a->sched_cpus);
zfree(&a->orig_cpus);
}
void affinity__cleanup(struct affinity *a)
{
if (a != NULL)
__affinity__cleanup(a);
}
void cpu_map__set_affinity(const struct perf_cpu_map *cpumap)
{
int cpu_set_size = get_cpu_set_size();
unsigned long *cpuset = bitmap_zalloc(cpu_set_size * 8);
struct perf_cpu cpu;
int idx;
if (!cpuset)
return;
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpumap)
__set_bit(cpu.cpu, cpuset);
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)cpuset);
zfree(&cpuset);
}