2019-06-21 10:52:31 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2018 ARM Limited
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
|
|
|
|
#define __ASM_VDSO_GETTIMEOFDAY_H
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2022-08-30 11:48:33 +01:00
|
|
|
#include <asm/alternative.h>
|
2025-04-07 04:33:25 -07:00
|
|
|
#include <asm/arch_timer.h>
|
2020-07-10 14:49:40 +01:00
|
|
|
#include <asm/barrier.h>
|
2019-06-21 10:52:31 +01:00
|
|
|
#include <asm/unistd.h>
|
2022-08-30 11:48:33 +01:00
|
|
|
#include <asm/sysreg.h>
|
2019-06-21 10:52:31 +01:00
|
|
|
|
|
|
|
#define VDSO_HAS_CLOCK_GETRES 1
|
|
|
|
|
|
|
|
static __always_inline
|
|
|
|
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
|
|
|
|
struct timezone *_tz)
|
|
|
|
{
|
|
|
|
register struct timezone *tz asm("x1") = _tz;
|
|
|
|
register struct __kernel_old_timeval *tv asm("x0") = _tv;
|
|
|
|
register long ret asm ("x0");
|
|
|
|
register long nr asm("x8") = __NR_gettimeofday;
|
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
" svc #0\n"
|
|
|
|
: "=r" (ret)
|
|
|
|
: "r" (tv), "r" (tz), "r" (nr)
|
|
|
|
: "memory");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline
|
|
|
|
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|
|
|
{
|
|
|
|
register struct __kernel_timespec *ts asm("x1") = _ts;
|
|
|
|
register clockid_t clkid asm("x0") = _clkid;
|
|
|
|
register long ret asm ("x0");
|
|
|
|
register long nr asm("x8") = __NR_clock_gettime;
|
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
" svc #0\n"
|
|
|
|
: "=r" (ret)
|
|
|
|
: "r" (clkid), "r" (ts), "r" (nr)
|
|
|
|
: "memory");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline
|
|
|
|
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|
|
|
{
|
|
|
|
register struct __kernel_timespec *ts asm("x1") = _ts;
|
|
|
|
register clockid_t clkid asm("x0") = _clkid;
|
|
|
|
register long ret asm ("x0");
|
|
|
|
register long nr asm("x8") = __NR_clock_getres;
|
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
" svc #0\n"
|
|
|
|
: "=r" (ret)
|
|
|
|
: "r" (clkid), "r" (ts), "r" (nr)
|
|
|
|
: "memory");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-04 22:37:48 +02:00
|
|
|
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
|
2025-02-04 13:05:40 +01:00
|
|
|
const struct vdso_time_data *vd)
|
2019-06-21 10:52:31 +01:00
|
|
|
{
|
2019-06-25 17:18:03 +01:00
|
|
|
/*
|
2020-02-07 13:38:58 +01:00
|
|
|
* Core checks for mode already, so this raced against a concurrent
|
|
|
|
* update. Return something. Core will do another round and then
|
|
|
|
* see the mode change and fallback to the syscall.
|
2019-06-25 17:18:03 +01:00
|
|
|
*/
|
2020-02-07 13:38:58 +01:00
|
|
|
if (clock_mode == VDSO_CLOCKMODE_NONE)
|
|
|
|
return 0;
|
2019-06-25 17:18:03 +01:00
|
|
|
|
2025-04-07 04:33:25 -07:00
|
|
|
return __arch_counter_get_cntvct();
|
2019-06-21 10:52:31 +01:00
|
|
|
}
|
|
|
|
|
2025-04-30 11:20:13 +02:00
|
|
|
#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
|
|
|
|
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
|
|
|
|
{
|
|
|
|
const struct vdso_time_data *ret = &vdso_u_time_data;
|
|
|
|
|
|
|
|
/* Work around invalid absolute relocations */
|
|
|
|
OPTIMIZER_HIDE_VAR(ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
|
|
|
|
#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
|
|
|
|
|
2019-06-21 10:52:31 +01:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
|