vdso: Rework struct vdso_time_data and introduce struct vdso_clock

To support multiple PTP clocks, the VDSO data structure needs to be
reworked. All clock specific data will end up in struct vdso_clock and in
struct vdso_time_data there will be an array of VDSO clocks.

Now that all preparatory changes are in place:

Split the clock related struct members into a separate struct
vdso_clock. Make sure all users are aware, that vdso_time_data is no longer
initialized as an array and vdso_clock is now the array inside
vdso_data. Remove the vdso_clock define, which mapped it to vdso_time_data
for the transition.

Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250303-vdso-clock-v1-19-c1b5c69a166f@linutronix.de
This commit is contained in:
Anna-Maria Behnsen 2025-03-03 12:11:21 +01:00 committed by Thomas Gleixner
parent 97a5a90ca2
commit 886653e366
10 changed files with 57 additions and 54 deletions

View file

@ -149,7 +149,7 @@ static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(
* where __aarch64_get_vdso_u_time_data() is called, and then keep the * where __aarch64_get_vdso_u_time_data() is called, and then keep the
* result in a register. * result in a register.
*/ */
asm volatile("mov %0, %1" : "=r"(ret) : "r"(vdso_u_time_data)); asm volatile("mov %0, %1" : "=r"(ret) : "r"(&vdso_u_time_data));
return ret; return ret;
} }

View file

@ -15,8 +15,8 @@
static __always_inline static __always_inline
void __arm64_update_vsyscall(struct vdso_time_data *vdata) void __arm64_update_vsyscall(struct vdso_time_data *vdata)
{ {
vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK; vdata->clock_data[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
vdata[CS_RAW].mask = VDSO_PRECISION_MASK; vdata->clock_data[CS_RAW].mask = VDSO_PRECISION_MASK;
} }
#define __arch_update_vsyscall __arm64_update_vsyscall #define __arch_update_vsyscall __arm64_update_vsyscall

View file

@ -79,12 +79,10 @@ void __init time_early_init(void)
{ {
struct ptff_qto qto; struct ptff_qto qto;
struct ptff_qui qui; struct ptff_qui qui;
int cs;
/* Initialize TOD steering parameters */ /* Initialize TOD steering parameters */
tod_steering_end = tod_clock_base.tod; tod_steering_end = tod_clock_base.tod;
for (cs = 0; cs < CS_BASES; cs++) vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end;
vdso_k_time_data[cs].arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28)) if (!test_facility(28))
return; return;
@ -373,7 +371,6 @@ static void clock_sync_global(long delta)
{ {
unsigned long now, adj; unsigned long now, adj;
struct ptff_qto qto; struct ptff_qto qto;
int cs;
/* Fixup the monotonic sched clock. */ /* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta; tod_clock_base.eitod += delta;
@ -389,10 +386,8 @@ static void clock_sync_global(long delta)
panic("TOD clock sync offset %li is too large to drift\n", panic("TOD clock sync offset %li is too large to drift\n",
tod_steering_delta); tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15); tod_steering_end = now + (abs(tod_steering_delta) << 15);
for (cs = 0; cs < CS_BASES; cs++) { vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end;
vdso_k_time_data[cs].arch_data.tod_steering_end = tod_steering_end; vdso_k_time_data->arch_data.tod_steering_delta = tod_steering_delta;
vdso_k_time_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
}
/* Update LPAR offset. */ /* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)

View file

@ -9,7 +9,7 @@
#ifndef __arch_get_vdso_u_time_data #ifndef __arch_get_vdso_u_time_data
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void) static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{ {
return vdso_u_time_data; return &vdso_u_time_data;
} }
#endif #endif

View file

@ -69,9 +69,7 @@ struct vdso_timestamp {
}; };
/** /**
* struct vdso_time_data - vdso datapage representation * struct vdso_clock - vdso per clocksource datapage representation
* @arch_data: architecture specific data (optional, defaults
* to an empty struct)
* @seq: timebase sequence counter * @seq: timebase sequence counter
* @clock_mode: clock mode * @clock_mode: clock mode
* @cycle_last: timebase at clocksource init * @cycle_last: timebase at clocksource init
@ -81,17 +79,9 @@ struct vdso_timestamp {
* @shift: clocksource shift * @shift: clocksource shift
* @basetime[clock_id]: basetime per clock_id * @basetime[clock_id]: basetime per clock_id
* @offset[clock_id]: time namespace offset per clock_id * @offset[clock_id]: time namespace offset per clock_id
* @tz_minuteswest: minutes west of Greenwich
* @tz_dsttime: type of DST correction
* @hrtimer_res: hrtimer resolution
* @__unused: unused
* *
* vdso_time_data will be accessed by 64 bit and compat code at the same time * See also struct vdso_time_data for basic access and ordering information as
* so we should be careful before modifying this structure. * struct vdso_clock is used there.
*
* The ordering of the struct members is optimized to have fast access to the
* often required struct members which are related to CLOCK_REALTIME and
* CLOCK_MONOTONIC. This information is stored in the first cache lines.
* *
* @basetime is used to store the base time for the system wide time getter * @basetime is used to store the base time for the system wide time getter
* VVAR page. * VVAR page.
@ -104,9 +94,7 @@ struct vdso_timestamp {
* For clocks which are not affected by time namespace adjustment the * For clocks which are not affected by time namespace adjustment the
* offset must be zero. * offset must be zero.
*/ */
struct vdso_time_data { struct vdso_clock {
struct arch_vdso_time_data arch_data;
u32 seq; u32 seq;
s32 clock_mode; s32 clock_mode;
@ -122,15 +110,36 @@ struct vdso_time_data {
struct vdso_timestamp basetime[VDSO_BASES]; struct vdso_timestamp basetime[VDSO_BASES];
struct timens_offset offset[VDSO_BASES]; struct timens_offset offset[VDSO_BASES];
}; };
};
s32 tz_minuteswest; /**
s32 tz_dsttime; * struct vdso_time_data - vdso datapage representation
u32 hrtimer_res; * @arch_data: architecture specific data (optional, defaults
u32 __unused; * to an empty struct)
* @clock_data: clocksource related data (array)
* @tz_minuteswest: minutes west of Greenwich
* @tz_dsttime: type of DST correction
* @hrtimer_res: hrtimer resolution
* @__unused: unused
*
* vdso_time_data will be accessed by 64 bit and compat code at the same time
* so we should be careful before modifying this structure.
*
* The ordering of the struct members is optimized to have fast acces to the
* often required struct members which are related to CLOCK_REALTIME and
* CLOCK_MONOTONIC. This information is stored in the first cache lines.
*/
struct vdso_time_data {
struct arch_vdso_time_data arch_data;
struct vdso_clock clock_data[CS_BASES];
s32 tz_minuteswest;
s32 tz_dsttime;
u32 hrtimer_res;
u32 __unused;
} ____cacheline_aligned; } ____cacheline_aligned;
#define vdso_clock vdso_time_data
/** /**
* struct vdso_rng_data - vdso RNG state information * struct vdso_rng_data - vdso RNG state information
* @generation: counter representing the number of RNG reseeds * @generation: counter representing the number of RNG reseeds
@ -151,7 +160,7 @@ struct vdso_rng_data {
* relocation, and this is what we need. * relocation, and this is what we need.
*/ */
#ifdef CONFIG_GENERIC_VDSO_DATA_STORE #ifdef CONFIG_GENERIC_VDSO_DATA_STORE
extern struct vdso_time_data vdso_u_time_data[CS_BASES] __attribute__((visibility("hidden"))); extern struct vdso_time_data vdso_u_time_data __attribute__((visibility("hidden")));
extern struct vdso_rng_data vdso_u_rng_data __attribute__((visibility("hidden"))); extern struct vdso_rng_data vdso_u_rng_data __attribute__((visibility("hidden")));
extern struct vdso_arch_data vdso_u_arch_data __attribute__((visibility("hidden"))); extern struct vdso_arch_data vdso_u_arch_data __attribute__((visibility("hidden")));

View file

@ -30,7 +30,7 @@ static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
static __always_inline void vdso_write_begin(struct vdso_time_data *vd) static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
{ {
struct vdso_clock *vc = vd; struct vdso_clock *vc = vd->clock_data;
/* /*
* WRITE_ONCE() is required otherwise the compiler can validly tear * WRITE_ONCE() is required otherwise the compiler can validly tear
@ -44,7 +44,7 @@ static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
static __always_inline void vdso_write_end(struct vdso_time_data *vd) static __always_inline void vdso_write_end(struct vdso_time_data *vd)
{ {
struct vdso_clock *vc = vd; struct vdso_clock *vc = vd->clock_data;
smp_wmb(); smp_wmb();
/* /*

View file

@ -237,7 +237,7 @@ static void timens_set_vvar_page(struct task_struct *task,
ns->frozen_offsets = true; ns->frozen_offsets = true;
vdata = page_address(ns->vvar_page); vdata = page_address(ns->vvar_page);
vc = vdata; vc = vdata->clock_data;
for (i = 0; i < CS_BASES; i++) for (i = 0; i < CS_BASES; i++)
timens_setup_vdso_clock_data(&vc[i], ns); timens_setup_vdso_clock_data(&vc[i], ns);

View file

@ -17,8 +17,8 @@
static inline void update_vdso_time_data(struct vdso_time_data *vdata, struct timekeeper *tk) static inline void update_vdso_time_data(struct vdso_time_data *vdata, struct timekeeper *tk)
{ {
struct vdso_clock *vc = vdata->clock_data;
struct vdso_timestamp *vdso_ts; struct vdso_timestamp *vdso_ts;
struct vdso_clock *vc = vdata;
u64 nsec, sec; u64 nsec, sec;
vc[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; vc[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
@ -78,8 +78,8 @@ static inline void update_vdso_time_data(struct vdso_time_data *vdata, struct ti
void update_vsyscall(struct timekeeper *tk) void update_vsyscall(struct timekeeper *tk)
{ {
struct vdso_time_data *vdata = vdso_k_time_data; struct vdso_time_data *vdata = vdso_k_time_data;
struct vdso_clock *vc = vdata->clock_data;
struct vdso_timestamp *vdso_ts; struct vdso_timestamp *vdso_ts;
struct vdso_clock *vc = vdata;
s32 clock_mode; s32 clock_mode;
u64 nsec; u64 nsec;
@ -109,9 +109,8 @@ void update_vsyscall(struct timekeeper *tk)
/* /*
* Read without the seqlock held by clock_getres(). * Read without the seqlock held by clock_getres().
* Note: No need to have a second copy.
*/ */
WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); WRITE_ONCE(vdata->hrtimer_res, hrtimer_resolution);
/* /*
* If the current clocksource is not VDSO capable, then spare the * If the current clocksource is not VDSO capable, then spare the
@ -131,8 +130,8 @@ void update_vsyscall_tz(void)
{ {
struct vdso_time_data *vdata = vdso_k_time_data; struct vdso_time_data *vdata = vdso_k_time_data;
vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest; vdata->tz_minuteswest = sys_tz.tz_minuteswest;
vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime; vdata->tz_dsttime = sys_tz.tz_dsttime;
__arch_sync_vdso_time_data(vdata); __arch_sync_vdso_time_data(vdata);
} }

View file

@ -13,10 +13,10 @@
*/ */
#ifdef CONFIG_HAVE_GENERIC_VDSO #ifdef CONFIG_HAVE_GENERIC_VDSO
static union { static union {
struct vdso_time_data data[CS_BASES]; struct vdso_time_data data;
u8 page[PAGE_SIZE]; u8 page[PAGE_SIZE];
} vdso_time_data_store __page_aligned_data; } vdso_time_data_store __page_aligned_data;
struct vdso_time_data *vdso_k_time_data = vdso_time_data_store.data; struct vdso_time_data *vdso_k_time_data = &vdso_time_data_store.data;
static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE); static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
#endif /* CONFIG_HAVE_GENERIC_VDSO */ #endif /* CONFIG_HAVE_GENERIC_VDSO */

View file

@ -87,8 +87,8 @@ int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *v
{ {
const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns); const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
const struct timens_offset *offs = &vcns->offset[clk]; const struct timens_offset *offs = &vcns->offset[clk];
const struct vdso_clock *vc = vd->clock_data;
const struct vdso_timestamp *vdso_ts; const struct vdso_timestamp *vdso_ts;
const struct vdso_clock *vc = vd;
u64 cycles, ns; u64 cycles, ns;
u32 seq; u32 seq;
s64 sec; s64 sec;
@ -199,8 +199,8 @@ int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock
{ {
const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns); const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
const struct timens_offset *offs = &vcns->offset[clk]; const struct timens_offset *offs = &vcns->offset[clk];
const struct vdso_clock *vc = vd->clock_data;
const struct vdso_timestamp *vdso_ts; const struct vdso_timestamp *vdso_ts;
const struct vdso_clock *vc = vd;
u64 nsec; u64 nsec;
s64 sec; s64 sec;
s32 seq; s32 seq;
@ -265,7 +265,7 @@ static __always_inline int
__cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock, __cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
const struct vdso_clock *vc = vd; const struct vdso_clock *vc = vd->clock_data;
u32 msk; u32 msk;
/* Check for negative values or invalid clocks */ /* Check for negative values or invalid clocks */
@ -337,7 +337,7 @@ static __maybe_unused int
__cvdso_gettimeofday_data(const struct vdso_time_data *vd, __cvdso_gettimeofday_data(const struct vdso_time_data *vd,
struct __kernel_old_timeval *tv, struct timezone *tz) struct __kernel_old_timeval *tv, struct timezone *tz)
{ {
const struct vdso_clock *vc = vd; const struct vdso_clock *vc = vd->clock_data;
if (likely(tv != NULL)) { if (likely(tv != NULL)) {
struct __kernel_timespec ts; struct __kernel_timespec ts;
@ -371,13 +371,13 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
static __maybe_unused __kernel_old_time_t static __maybe_unused __kernel_old_time_t
__cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time) __cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time)
{ {
const struct vdso_clock *vc = vd; const struct vdso_clock *vc = vd->clock_data;
__kernel_old_time_t t; __kernel_old_time_t t;
if (IS_ENABLED(CONFIG_TIME_NS) && if (IS_ENABLED(CONFIG_TIME_NS) &&
vc->clock_mode == VDSO_CLOCKMODE_TIMENS) { vc->clock_mode == VDSO_CLOCKMODE_TIMENS) {
vd = __arch_get_vdso_u_timens_data(vd); vd = __arch_get_vdso_u_timens_data(vd);
vc = vd; vc = vd->clock_data;
} }
t = READ_ONCE(vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec); t = READ_ONCE(vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@ -399,7 +399,7 @@ static __maybe_unused
int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock, int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res) struct __kernel_timespec *res)
{ {
const struct vdso_clock *vc = vd; const struct vdso_clock *vc = vd->clock_data;
u32 msk; u32 msk;
u64 ns; u64 ns;
@ -420,7 +420,7 @@ int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock
/* /*
* Preserves the behaviour of posix_get_hrtimer_res(). * Preserves the behaviour of posix_get_hrtimer_res().
*/ */
ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); ns = READ_ONCE(vd->hrtimer_res);
} else if (msk & VDSO_COARSE) { } else if (msk & VDSO_COARSE) {
/* /*
* Preserves the behaviour of posix_get_coarse_res(). * Preserves the behaviour of posix_get_coarse_res().