timekeeping: Remove unused ktime_get_fast_timestamps()

ktime_get_fast_timestamps() was added in 2020 by commit e2d977c9f1
("timekeeping: Provide multi-timestamp accessor to NMI safe timekeeper")
but has remained unused.

Remove it.

[ tglx: Fold the inline as David suggested in the submission ]

Signed-off-by: Dr. David Alan Gilbert <linux@treblig.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250112160132.450209-1-linux@treblig.org
This commit is contained in:
Dr. David Alan Gilbert 2025-01-12 16:01:32 +00:00 committed by Thomas Gleixner
parent 4477b06014
commit 2d2a46cf23
2 changed files with 13 additions and 89 deletions

View file

@ -263,18 +263,6 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/**
* struct ktime_timestamps - Simultaneous mono/boot/real timestamps
* @mono: Monotonic timestamp
* @boot: Boottime timestamp
* @real: Realtime timestamp
*/
struct ktime_timestamps {
u64 mono;
u64 boot;
u64 real;
};
/**
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
@ -345,9 +333,6 @@ extern int get_device_system_crosststamp(
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
/* NMI safe mono/boot/realtime timestamps */
extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap);
/*
* Persistent clock related interfaces
*/

View file

@ -485,25 +485,6 @@ u64 notrace ktime_get_tai_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
{
struct tk_read_base *tkr;
u64 basem, baser, delta;
unsigned int seq;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
basem = ktime_to_ns(tkr->base);
baser = ktime_to_ns(tkr->base_real);
delta = timekeeping_get_ns(tkr);
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
if (mono)
*mono = basem + delta;
return baser + delta;
}
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
*
@ -511,64 +492,22 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
*/
u64 ktime_get_real_fast_ns(void)
{
return __ktime_get_real_fast(&tk_fast_mono, NULL);
struct tk_fast *tkf = &tk_fast_mono;
struct tk_read_base *tkr;
u64 baser, delta;
unsigned int seq;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
baser = ktime_to_ns(tkr->base_real);
delta = timekeeping_get_ns(tkr);
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
return baser + delta;
}
EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
/**
* ktime_get_fast_timestamps: - NMI safe timestamps
* @snapshot: Pointer to timestamp storage
*
* Stores clock monotonic, boottime and realtime timestamps.
*
* Boot time is a racy access on 32bit systems if the sleep time injection
* happens late during resume and not in timekeeping_resume(). That could
* be avoided by expanding struct tk_read_base with boot offset for 32bit
* and adding more overhead to the update. As this is a hard to observe
* once per resume event which can be filtered with reasonable effort using
* the accurate mono/real timestamps, it's probably not worth the trouble.
*
* Aside of that it might be possible on 32 and 64 bit to observe the
* following when the sleep time injection happens late:
*
* CPU 0 CPU 1
* timekeeping_resume()
* ktime_get_fast_timestamps()
* mono, real = __ktime_get_real_fast()
* inject_sleep_time()
* update boot offset
* boot = mono + bootoffset;
*
* That means that boot time already has the sleep time adjustment, but
* real time does not. On the next readout both are in sync again.
*
* Preventing this for 64bit is not really feasible without destroying the
* careful cache layout of the timekeeper because the sequence count and
* struct tk_read_base would then need two cache lines instead of one.
*
* Access to the time keeper clock source is disabled across the innermost
* steps of suspend/resume. The accessors still work, but the timestamps
* are frozen until time keeping is resumed which happens very early.
*
* For regular suspend/resume there is no observable difference vs. sched
* clock, but it might affect some of the nasty low level debug printks.
*
* OTOH, access to sched clock is not guaranteed across suspend/resume on
* all systems either so it depends on the hardware in use.
*
* If that turns out to be a real problem then this could be mitigated by
* using sched clock in a similar way as during early boot. But it's not as
* trivial as on early boot because it needs some careful protection
* against the clock monotonic timestamp jumping backwards on resume.
*/
void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
{
struct timekeeper *tk = &tk_core.timekeeper;
snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
}
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.