timekeeping: Move timekeeper_lock into tk_core

timekeeper_lock protects updates to struct tk_core but is not part of
struct tk_core. As long as there is only a single timekeeper, this is not a
problem. But when the timekeeper infrastructure will be reused for per ptp
clock timekeepers, timekeeper_lock needs to be part of tk_core.

Move the lock into tk_core, move initialisation of the lock and sequence
counter into timekeeping_init() and update all users of timekeeper_lock.

As this is touching all lock sites, convert them to use:

  guard(raw_spinlock_irqsave)(&tk_core.lock);

instead of lock/unlock functions whenever possible.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: John Stultz <jstultz@google.com>
Link: https://lore.kernel.org/all/20241009-devel-anna-maria-b4-timers-ptp-timekeeping-v2-9-554456a44a15@linutronix.de
This commit is contained in:
Anna-Maria Behnsen 2024-10-09 10:29:02 +02:00 committed by Thomas Gleixner
parent dbdcf8c4ca
commit 8c4799b184

View file

@ -41,8 +41,6 @@ enum timekeeping_adv_mode {
TK_ADV_FREQ TK_ADV_FREQ
}; };
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
/* /*
* The most important data for readout fits into a single 64 byte * The most important data for readout fits into a single 64 byte
* cache line. * cache line.
@ -51,10 +49,8 @@ static struct {
seqcount_raw_spinlock_t seq; seqcount_raw_spinlock_t seq;
struct timekeeper timekeeper; struct timekeeper timekeeper;
struct timekeeper shadow_timekeeper; struct timekeeper shadow_timekeeper;
} tk_core ____cacheline_aligned = { raw_spinlock_t lock;
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock), } tk_core ____cacheline_aligned;
};
/* flag for if timekeeping is suspended */ /* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended; int __read_mostly timekeeping_suspended;
@ -118,13 +114,13 @@ unsigned long timekeeper_lock_irqsave(void)
{ {
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
return flags; return flags;
} }
void timekeeper_unlock_irqrestore(unsigned long flags) void timekeeper_unlock_irqrestore(unsigned long flags)
{ {
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
} }
/* /*
@ -216,7 +212,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
* the tkr's clocksource may change between the read reference, and the * the tkr's clocksource may change between the read reference, and the
* clock reference passed to the read function. This can cause crashes if * clock reference passed to the read function. This can cause crashes if
* the wrong clocksource is passed to the wrong read function. * the wrong clocksource is passed to the wrong read function.
* This isn't necessary to use when holding the timekeeper_lock or doing * This isn't necessary to use when holding the tk_core.lock or doing
* a read of the fast-timekeeper tkrs (which is protected by its own locking * a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic). * and update logic).
*/ */
@ -708,13 +704,11 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
int pvclock_gtod_register_notifier(struct notifier_block *nb) int pvclock_gtod_register_notifier(struct notifier_block *nb)
{ {
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
int ret; int ret;
raw_spin_lock_irqsave(&timekeeper_lock, flags); guard(raw_spinlock_irqsave)(&tk_core.lock);
ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
update_pvclock_gtod(tk, true); update_pvclock_gtod(tk, true);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return ret; return ret;
} }
@ -727,14 +721,8 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
*/ */
int pvclock_gtod_unregister_notifier(struct notifier_block *nb) int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{ {
unsigned long flags; guard(raw_spinlock_irqsave)(&tk_core.lock);
int ret; return raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
raw_spin_lock_irqsave(&timekeeper_lock, flags);
ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
return ret;
} }
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
@ -782,7 +770,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC); tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
} }
/* must hold timekeeper_lock */ /* must hold tk_core.lock */
static void timekeeping_update(struct timekeeper *tk, unsigned int action) static void timekeeping_update(struct timekeeper *tk, unsigned int action)
{ {
if (action & TK_CLEAR_NTP) { if (action & TK_CLEAR_NTP) {
@ -1491,7 +1479,7 @@ int do_settimeofday64(const struct timespec64 *ts)
if (!timespec64_valid_settod(ts)) if (!timespec64_valid_settod(ts))
return -EINVAL; return -EINVAL;
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk); timekeeping_forward_now(tk);
@ -1511,7 +1499,7 @@ out:
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
/* Signal hrtimers about time change */ /* Signal hrtimers about time change */
clock_was_set(CLOCK_SET_WALL); clock_was_set(CLOCK_SET_WALL);
@ -1541,7 +1529,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts)
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL; return -EINVAL;
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk); timekeeping_forward_now(tk);
@ -1561,7 +1549,7 @@ error: /* even if we error out, we forwarded the time, so call update */
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
/* Signal hrtimers about time change */ /* Signal hrtimers about time change */
clock_was_set(CLOCK_SET_WALL); clock_was_set(CLOCK_SET_WALL);
@ -1637,7 +1625,7 @@ static int change_clocksource(void *data)
return 0; return 0;
} }
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk); timekeeping_forward_now(tk);
@ -1646,7 +1634,7 @@ static int change_clocksource(void *data)
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
if (old) { if (old) {
if (old->disable) if (old->disable)
@ -1801,7 +1789,9 @@ void __init timekeeping_init(void)
struct timespec64 wall_time, boot_offset, wall_to_mono; struct timespec64 wall_time, boot_offset, wall_to_mono;
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *clock; struct clocksource *clock;
unsigned long flags;
raw_spin_lock_init(&tk_core.lock);
seqcount_raw_spinlock_init(&tk_core.seq, &tkd->lock);
read_persistent_wall_and_boot_offset(&wall_time, &boot_offset); read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
if (timespec64_valid_settod(&wall_time) && if (timespec64_valid_settod(&wall_time) &&
@ -1821,7 +1811,7 @@ void __init timekeeping_init(void)
*/ */
wall_to_mono = timespec64_sub(boot_offset, wall_time); wall_to_mono = timespec64_sub(boot_offset, wall_time);
raw_spin_lock_irqsave(&timekeeper_lock, flags); guard(raw_spinlock_irqsave)(&tk_core.lock);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
ntp_init(); ntp_init();
@ -1838,7 +1828,6 @@ void __init timekeeping_init(void)
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
} }
/* time in seconds when suspend began for persistent clock */ /* time in seconds when suspend began for persistent clock */
@ -1919,7 +1908,7 @@ void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
suspend_timing_needed = false; suspend_timing_needed = false;
@ -1931,7 +1920,7 @@ void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
/* Signal hrtimers about time change */ /* Signal hrtimers about time change */
clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT); clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
@ -1955,7 +1944,7 @@ void timekeeping_resume(void)
clockevents_resume(); clockevents_resume();
clocksource_resume(); clocksource_resume();
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
/* /*
@ -1993,7 +1982,7 @@ void timekeeping_resume(void)
timekeeping_suspended = 0; timekeeping_suspended = 0;
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
touch_softlockup_watchdog(); touch_softlockup_watchdog();
@ -2024,7 +2013,7 @@ int timekeeping_suspend(void)
suspend_timing_needed = true; suspend_timing_needed = true;
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk); timekeeping_forward_now(tk);
timekeeping_suspended = 1; timekeeping_suspended = 1;
@ -2063,7 +2052,7 @@ int timekeeping_suspend(void)
timekeeping_update(tk, TK_MIRROR); timekeeping_update(tk, TK_MIRROR);
halt_fast_timekeeper(tk); halt_fast_timekeeper(tk);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
tick_suspend(); tick_suspend();
clocksource_suspend(); clocksource_suspend();
@ -2323,7 +2312,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
int shift = 0, maxshift; int shift = 0, maxshift;
u64 offset; u64 offset;
guard(raw_spinlock_irqsave)(&timekeeper_lock); guard(raw_spinlock_irqsave)(&tk_core.lock);
/* Make sure we're fully resumed: */ /* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended)) if (unlikely(timekeeping_suspended))
@ -2708,7 +2697,7 @@ int do_adjtimex(struct __kernel_timex *txc)
ktime_get_real_ts64(&ts); ktime_get_real_ts64(&ts);
add_device_randomness(&ts, sizeof(ts)); add_device_randomness(&ts, sizeof(ts));
raw_spin_lock_irqsave(&timekeeper_lock, flags); raw_spin_lock_irqsave(&tk_core.lock, flags);
write_seqcount_begin(&tk_core.seq); write_seqcount_begin(&tk_core.seq);
orig_tai = tai = tk->tai_offset; orig_tai = tai = tk->tai_offset;
@ -2723,7 +2712,7 @@ int do_adjtimex(struct __kernel_timex *txc)
} }
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&tk_core.lock, flags);
audit_ntp_log(&ad); audit_ntp_log(&ad);
@ -2747,11 +2736,8 @@ int do_adjtimex(struct __kernel_timex *txc)
*/ */
void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
{ {
unsigned long flags; guard(raw_spinlock_irqsave)(&tk_core.lock);
raw_spin_lock_irqsave(&timekeeper_lock, flags);
__hardpps(phase_ts, raw_ts); __hardpps(phase_ts, raw_ts);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
} }
EXPORT_SYMBOL(hardpps); EXPORT_SYMBOL(hardpps);
#endif /* CONFIG_NTP_PPS */ #endif /* CONFIG_NTP_PPS */