mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
locking/seqlock: Split out seqlock_types.h
Trimming down sched.h dependencies: we don't want to include more than the base types. Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Will Deacon <will@kernel.org> Cc: Waiman Long <longman@redhat.com> Cc: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
53d31ba842
commit
f038cc1379
3 changed files with 96 additions and 78 deletions
|
@ -33,7 +33,7 @@
|
|||
#include <linux/task_io_accounting.h>
|
||||
#include <linux/posix-timers_types.h>
|
||||
#include <linux/rseq.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/seqlock_types.h>
|
||||
#include <linux/kcsan.h>
|
||||
#include <linux/rv.h>
|
||||
#include <linux/livepatch_sched.h>
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/lockdep.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/seqlock_types.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
@ -37,37 +38,6 @@
|
|||
*/
|
||||
#define KCSAN_SEQLOCK_REGION_MAX 1000
|
||||
|
||||
/*
|
||||
* Sequence counters (seqcount_t)
|
||||
*
|
||||
* This is the raw counting mechanism, without any writer protection.
|
||||
*
|
||||
* Write side critical sections must be serialized and non-preemptible.
|
||||
*
|
||||
* If readers can be invoked from hardirq or softirq contexts,
|
||||
* interrupts or bottom halves must also be respectively disabled before
|
||||
* entering the write section.
|
||||
*
|
||||
* This mechanism can't be used if the protected data contains pointers,
|
||||
* as the writer can invalidate a pointer that a reader is following.
|
||||
*
|
||||
* If the write serialization mechanism is one of the common kernel
|
||||
* locking primitives, use a sequence counter with associated lock
|
||||
* (seqcount_LOCKNAME_t) instead.
|
||||
*
|
||||
* If it's desired to automatically handle the sequence counter writer
|
||||
* serialization and non-preemptibility requirements, use a sequential
|
||||
* lock (seqlock_t) instead.
|
||||
*
|
||||
* See Documentation/locking/seqlock.rst
|
||||
*/
|
||||
typedef struct seqcount {
|
||||
unsigned sequence;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} seqcount_t;
|
||||
|
||||
static inline void __seqcount_init(seqcount_t *s, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
|
@ -131,28 +101,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
|||
* See Documentation/locking/seqlock.rst
|
||||
*/
|
||||
|
||||
/*
|
||||
* For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
|
||||
* disable preemption. It can lead to higher latencies, and the write side
|
||||
* sections will not be able to acquire locks which become sleeping locks
|
||||
* (e.g. spinlock_t).
|
||||
*
|
||||
* To remain preemptible while avoiding a possible livelock caused by the
|
||||
* reader preempting the writer, use a different technique: let the reader
|
||||
* detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
|
||||
* case, acquire then release the associated LOCKNAME writer serialization
|
||||
* lock. This will allow any possibly-preempted writer to make progress
|
||||
* until the end of its writer serialization lock critical section.
|
||||
*
|
||||
* This lock-unlock technique must be implemented for all of PREEMPT_RT
|
||||
* sleeping locks. See Documentation/locking/locktypes.rst
|
||||
*/
|
||||
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
|
||||
#define __SEQ_LOCK(expr) expr
|
||||
#else
|
||||
#define __SEQ_LOCK(expr)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
|
||||
* @seqcount: The real sequence counter
|
||||
|
@ -194,11 +142,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
|||
* @lockbase: prefix for associated lock/unlock
|
||||
*/
|
||||
#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
|
||||
typedef struct seqcount_##lockname { \
|
||||
seqcount_t seqcount; \
|
||||
__SEQ_LOCK(locktype *lock); \
|
||||
} seqcount_##lockname##_t; \
|
||||
\
|
||||
static __always_inline seqcount_t * \
|
||||
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
|
||||
{ \
|
||||
|
@ -284,6 +227,7 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
|
|||
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
|
||||
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
|
||||
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
|
||||
#undef SEQCOUNT_LOCKNAME
|
||||
|
||||
/*
|
||||
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
|
||||
|
@ -794,25 +738,6 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
|
|||
smp_wmb(); /* increment "sequence" before following stores */
|
||||
}
|
||||
|
||||
/*
|
||||
* Sequential locks (seqlock_t)
|
||||
*
|
||||
* Sequence counters with an embedded spinlock for writer serialization
|
||||
* and non-preemptibility.
|
||||
*
|
||||
* For more info, see:
|
||||
* - Comments on top of seqcount_t
|
||||
* - Documentation/locking/seqlock.rst
|
||||
*/
|
||||
typedef struct {
|
||||
/*
|
||||
* Make sure that readers don't starve writers on PREEMPT_RT: use
|
||||
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
|
||||
*/
|
||||
seqcount_spinlock_t seqcount;
|
||||
spinlock_t lock;
|
||||
} seqlock_t;
|
||||
|
||||
#define __SEQLOCK_UNLOCKED(lockname) \
|
||||
{ \
|
||||
.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
|
||||
|
|
93
include/linux/seqlock_types.h
Normal file
93
include/linux/seqlock_types.h
Normal file
|
@ -0,0 +1,93 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_SEQLOCK_TYPES_H
|
||||
#define __LINUX_SEQLOCK_TYPES_H
|
||||
|
||||
#include <linux/lockdep_types.h>
|
||||
#include <linux/mutex_types.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
/*
|
||||
* Sequence counters (seqcount_t)
|
||||
*
|
||||
* This is the raw counting mechanism, without any writer protection.
|
||||
*
|
||||
* Write side critical sections must be serialized and non-preemptible.
|
||||
*
|
||||
* If readers can be invoked from hardirq or softirq contexts,
|
||||
* interrupts or bottom halves must also be respectively disabled before
|
||||
* entering the write section.
|
||||
*
|
||||
* This mechanism can't be used if the protected data contains pointers,
|
||||
* as the writer can invalidate a pointer that a reader is following.
|
||||
*
|
||||
* If the write serialization mechanism is one of the common kernel
|
||||
* locking primitives, use a sequence counter with associated lock
|
||||
* (seqcount_LOCKNAME_t) instead.
|
||||
*
|
||||
* If it's desired to automatically handle the sequence counter writer
|
||||
* serialization and non-preemptibility requirements, use a sequential
|
||||
* lock (seqlock_t) instead.
|
||||
*
|
||||
* See Documentation/locking/seqlock.rst
|
||||
*/
|
||||
typedef struct seqcount {
|
||||
unsigned sequence;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} seqcount_t;
|
||||
|
||||
/*
|
||||
* For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
|
||||
* disable preemption. It can lead to higher latencies, and the write side
|
||||
* sections will not be able to acquire locks which become sleeping locks
|
||||
* (e.g. spinlock_t).
|
||||
*
|
||||
* To remain preemptible while avoiding a possible livelock caused by the
|
||||
* reader preempting the writer, use a different technique: let the reader
|
||||
* detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
|
||||
* case, acquire then release the associated LOCKNAME writer serialization
|
||||
* lock. This will allow any possibly-preempted writer to make progress
|
||||
* until the end of its writer serialization lock critical section.
|
||||
*
|
||||
* This lock-unlock technique must be implemented for all of PREEMPT_RT
|
||||
* sleeping locks. See Documentation/locking/locktypes.rst
|
||||
*/
|
||||
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
|
||||
#define __SEQ_LOCK(expr) expr
|
||||
#else
|
||||
#define __SEQ_LOCK(expr)
|
||||
#endif
|
||||
|
||||
#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
|
||||
typedef struct seqcount_##lockname { \
|
||||
seqcount_t seqcount; \
|
||||
__SEQ_LOCK(locktype *lock); \
|
||||
} seqcount_##lockname##_t;
|
||||
|
||||
SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
|
||||
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
|
||||
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
|
||||
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
|
||||
#undef SEQCOUNT_LOCKNAME
|
||||
|
||||
/*
|
||||
* Sequential locks (seqlock_t)
|
||||
*
|
||||
* Sequence counters with an embedded spinlock for writer serialization
|
||||
* and non-preemptibility.
|
||||
*
|
||||
* For more info, see:
|
||||
* - Comments on top of seqcount_t
|
||||
* - Documentation/locking/seqlock.rst
|
||||
*/
|
||||
typedef struct {
|
||||
/*
|
||||
* Make sure that readers don't starve writers on PREEMPT_RT: use
|
||||
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
|
||||
*/
|
||||
seqcount_spinlock_t seqcount;
|
||||
spinlock_t lock;
|
||||
} seqlock_t;
|
||||
|
||||
#endif /* __LINUX_SEQLOCK_TYPES_H */
|
Loading…
Add table
Reference in a new issue