mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

This is to slow down lock acquistion (on contention locks) deliberately. A possible use case is to estimate impact on application performance by optimization of kernel locking behavior. By delaying the lock it can simulate the worse condition as a control group, and then compare with the current behavior as a optimized condition. The syntax is 'time@function' and the time can have unit suffix like "us" and "ms". For example, I ran a simple test like below. $ sudo perf lock con -abl -L tasklist_lock -- \ sh -c 'for i in $(seq 1000); do sleep 1 & done; wait' contended total wait max wait avg wait address symbol 92 1.18 ms 199.54 us 12.79 us ffffffff8a806080 tasklist_lock (rwlock) The contention count was 92 and the average wait time was around 10 us. But if I add 100 usec of delay to the tasklist_lock, $ sudo perf lock con -abl -L tasklist_lock -J 100us@tasklist_lock -- \ sh -c 'for i in $(seq 1000); do sleep 1 & done; wait' contended total wait max wait avg wait address symbol 190 15.67 ms 230.10 us 82.46 us ffffffff8a806080 tasklist_lock (rwlock) The contention count increased and the average wait time was up closed to 100 usec. If I increase the delay even more, $ sudo perf lock con -abl -L tasklist_lock -J 1ms@tasklist_lock -- \ sh -c 'for i in $(seq 1000); do sleep 1 & done; wait' contended total wait max wait avg wait address symbol 1002 2.80 s 3.01 ms 2.80 ms ffffffff8a806080 tasklist_lock (rwlock) Now every sleep process had contention and the wait time was more than 1 msec. This is on my 4 CPU laptop so I guess one CPU has the lock while other 3 are waiting for it mostly. For simplicity, it only supports global locks for now. Committer testing: root@number:~# grep -m1 'model name' /proc/cpuinfo model name : AMD Ryzen 9 9950X3D 16-Core Processor root@number:~# perf lock con -abl -L tasklist_lock -- sh -c 'for i in $(seq 1000); do sleep 1 & done; wait' contended total wait max wait avg wait address symbol 142 453.85 us 25.39 us 3.20 us ffffffffae808080 tasklist_lock (rwlock) root@number:~# perf lock con -abl -L tasklist_lock -J 100us@tasklist_lock -- sh -c 'for i in $(seq 1000); do sleep 1 & done; wait' contended total wait max wait avg wait address symbol 1040 2.39 s 3.11 ms 2.30 ms ffffffffae808080 tasklist_lock (rwlock) root@number:~# perf lock con -abl -L tasklist_lock -J 1ms@tasklist_lock -- sh -c 'for i in $(seq 1000); do sleep 1 & done; wait' contended total wait max wait avg wait address symbol 1025 24.72 s 31.01 ms 24.12 ms ffffffffae808080 tasklist_lock (rwlock) root@number:~# Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20250509171950.183591-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
208 lines
4.6 KiB
C
208 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef PERF_LOCK_CONTENTION_H
|
|
#define PERF_LOCK_CONTENTION_H
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/rbtree.h>
|
|
|
|
struct lock_filter {
|
|
int nr_types;
|
|
int nr_addrs;
|
|
int nr_syms;
|
|
int nr_cgrps;
|
|
int nr_slabs;
|
|
unsigned int *types;
|
|
unsigned long *addrs;
|
|
char **syms;
|
|
u64 *cgrps;
|
|
char **slabs;
|
|
};
|
|
|
|
struct lock_delay {
|
|
char *sym;
|
|
unsigned long addr;
|
|
unsigned long time;
|
|
};
|
|
|
|
struct lock_stat {
|
|
struct hlist_node hash_entry;
|
|
struct rb_node rb; /* used for sorting */
|
|
|
|
u64 addr; /* address of lockdep_map, used as ID */
|
|
char *name; /* for strcpy(), we cannot use const */
|
|
u64 *callstack;
|
|
|
|
unsigned int nr_acquire;
|
|
unsigned int nr_acquired;
|
|
unsigned int nr_contended;
|
|
unsigned int nr_release;
|
|
|
|
union {
|
|
unsigned int nr_readlock;
|
|
unsigned int flags;
|
|
};
|
|
unsigned int nr_trylock;
|
|
|
|
/* these times are in nano sec. */
|
|
u64 avg_wait_time;
|
|
u64 wait_time_total;
|
|
u64 wait_time_min;
|
|
u64 wait_time_max;
|
|
|
|
int broken; /* flag of blacklist */
|
|
int combined;
|
|
};
|
|
|
|
/*
|
|
* States of lock_seq_stat
|
|
*
|
|
* UNINITIALIZED is required for detecting first event of acquire.
|
|
* As the nature of lock events, there is no guarantee
|
|
* that the first event for the locks are acquire,
|
|
* it can be acquired, contended or release.
|
|
*/
|
|
#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
|
|
#define SEQ_STATE_RELEASED 1
|
|
#define SEQ_STATE_ACQUIRING 2
|
|
#define SEQ_STATE_ACQUIRED 3
|
|
#define SEQ_STATE_READ_ACQUIRED 4
|
|
#define SEQ_STATE_CONTENDED 5
|
|
|
|
/*
|
|
* MAX_LOCK_DEPTH
|
|
* Imported from include/linux/sched.h.
|
|
* Should this be synchronized?
|
|
*/
|
|
#define MAX_LOCK_DEPTH 48
|
|
|
|
/* based on kernel/lockdep.c */
|
|
#define LOCKHASH_BITS 12
|
|
#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
|
|
|
|
extern struct hlist_head *lockhash_table;
|
|
|
|
/*
|
|
* struct lock_seq_stat:
|
|
* Place to put on state of one lock sequence
|
|
* 1) acquire -> acquired -> release
|
|
* 2) acquire -> contended -> acquired -> release
|
|
* 3) acquire (with read or try) -> release
|
|
* 4) Are there other patterns?
|
|
*/
|
|
struct lock_seq_stat {
|
|
struct list_head list;
|
|
int state;
|
|
u64 prev_event_time;
|
|
u64 addr;
|
|
|
|
int read_count;
|
|
};
|
|
|
|
struct thread_stat {
|
|
struct rb_node rb;
|
|
|
|
u32 tid;
|
|
struct list_head seq_list;
|
|
};
|
|
|
|
/*
|
|
* CONTENTION_STACK_DEPTH
|
|
* Number of stack trace entries to find callers
|
|
*/
|
|
#define CONTENTION_STACK_DEPTH 8
|
|
|
|
/*
|
|
* CONTENTION_STACK_SKIP
|
|
* Number of stack trace entries to skip when finding callers.
|
|
* The first few entries belong to the locking implementation itself.
|
|
*/
|
|
#define CONTENTION_STACK_SKIP 4
|
|
|
|
/*
|
|
* flags for lock:contention_begin
|
|
* Imported from include/trace/events/lock.h.
|
|
*/
|
|
#define LCB_F_SPIN (1U << 0)
|
|
#define LCB_F_READ (1U << 1)
|
|
#define LCB_F_WRITE (1U << 2)
|
|
#define LCB_F_RT (1U << 3)
|
|
#define LCB_F_PERCPU (1U << 4)
|
|
#define LCB_F_MUTEX (1U << 5)
|
|
|
|
struct evlist;
|
|
struct machine;
|
|
struct target;
|
|
|
|
struct lock_contention_fails {
|
|
int task;
|
|
int stack;
|
|
int time;
|
|
int data;
|
|
};
|
|
|
|
struct lock_contention {
|
|
struct evlist *evlist;
|
|
struct target *target;
|
|
struct machine *machine;
|
|
struct hlist_head *result;
|
|
struct lock_filter *filters;
|
|
struct lock_delay *delays;
|
|
struct lock_contention_fails fails;
|
|
struct rb_root cgroups;
|
|
void *btf;
|
|
unsigned long map_nr_entries;
|
|
int max_stack;
|
|
int stack_skip;
|
|
int aggr_mode;
|
|
int owner;
|
|
int nr_filtered;
|
|
int nr_delays;
|
|
bool save_callstack;
|
|
};
|
|
|
|
struct option;
|
|
int parse_call_stack(const struct option *opt, const char *str, int unset);
|
|
bool needs_callstack(void);
|
|
|
|
struct lock_stat *lock_stat_find(u64 addr);
|
|
struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
|
|
|
|
bool match_callstack_filter(struct machine *machine, u64 *callstack, int max_stack_depth);
|
|
|
|
|
|
#ifdef HAVE_BPF_SKEL
|
|
int lock_contention_prepare(struct lock_contention *con);
|
|
int lock_contention_start(void);
|
|
int lock_contention_stop(void);
|
|
int lock_contention_read(struct lock_contention *con);
|
|
int lock_contention_finish(struct lock_contention *con);
|
|
|
|
struct lock_stat *pop_owner_stack_trace(struct lock_contention *con);
|
|
|
|
#else /* !HAVE_BPF_SKEL */
|
|
|
|
static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int lock_contention_start(void) { return 0; }
|
|
static inline int lock_contention_stop(void) { return 0; }
|
|
static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct lock_stat *pop_owner_stack_trace(struct lock_contention *con __maybe_unused)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
#endif /* HAVE_BPF_SKEL */
|
|
|
|
#endif /* PERF_LOCK_CONTENTION_H */
|