2019-05-27 08:55:01 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-10-20 09:23:26 +10:00
|
|
|
/*
|
|
|
|
* Common time prototypes and such for all ppc machines.
|
|
|
|
*
|
|
|
|
* Written by Cort Dougan (cort@cs.nmt.edu) to merge
|
|
|
|
* Paul Mackerras' version and mine for PReP and Pmac.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __POWERPC_TIME_H
|
|
|
|
#define __POWERPC_TIME_H
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
|
|
|
#include <asm/processor.h>
|
2016-07-23 14:42:40 +05:30
|
|
|
#include <asm/cpu_has_feature.h>
|
2020-11-27 00:10:00 +11:00
|
|
|
#include <asm/vdso/timebase.h>
|
2005-10-20 09:23:26 +10:00
|
|
|
|
|
|
|
/* time.c */
|
2021-11-23 19:51:43 +10:00
|
|
|
extern u64 decrementer_max;
|
|
|
|
|
2005-10-20 09:23:26 +10:00
|
|
|
extern unsigned long tb_ticks_per_jiffy;
|
|
|
|
extern unsigned long tb_ticks_per_usec;
|
|
|
|
extern unsigned long tb_ticks_per_sec;
|
2012-04-18 06:01:19 +00:00
|
|
|
extern struct clock_event_device decrementer_clockevent;
|
2021-03-24 05:09:39 -04:00
|
|
|
extern u64 decrementer_max;
|
2005-10-20 09:23:26 +10:00
|
|
|
|
|
|
|
|
|
|
|
extern void generic_calibrate_decr(void);
|
|
|
|
|
|
|
|
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
|
|
|
|
extern unsigned long ppc_proc_freq;
|
|
|
|
#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
|
|
|
|
extern unsigned long ppc_tb_freq;
|
|
|
|
#define DEFAULT_TB_FREQ 125000000UL
|
|
|
|
|
2019-03-05 01:12:19 +05:30
|
|
|
extern bool tb_invalid;
|
|
|
|
|
2005-10-20 09:23:26 +10:00
|
|
|
struct div_result {
|
|
|
|
u64 result_high;
|
|
|
|
u64 result_low;
|
|
|
|
};
|
|
|
|
|
2014-06-05 17:38:02 +05:30
|
|
|
static inline u64 get_vtb(void)
|
|
|
|
{
|
|
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
2016-07-23 14:42:39 +05:30
|
|
|
return mfspr(SPRN_VTB);
|
2020-10-01 10:59:20 +00:00
|
|
|
|
2014-06-05 17:38:02 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-10-20 09:23:26 +10:00
|
|
|
/* Accessor functions for the decrementer register.
|
|
|
|
* The 4xx doesn't even have a decrementer. I tried to use the
|
|
|
|
* generic timer interrupt code, which seems OK, with the 4xx PIT
|
|
|
|
* in auto-reload mode. The problem is PIT stops counting when it
|
|
|
|
* hits zero. If it would wrap, we could use it just like a decrementer.
|
|
|
|
*/
|
2016-07-01 16:20:39 +10:00
|
|
|
static inline u64 get_dec(void)
|
2005-10-20 09:23:26 +10:00
|
|
|
{
|
2020-10-01 10:59:19 +00:00
|
|
|
if (IS_ENABLED(CONFIG_40x))
|
|
|
|
return mfspr(SPRN_PIT);
|
|
|
|
|
|
|
|
return mfspr(SPRN_DEC);
|
2005-10-20 09:23:26 +10:00
|
|
|
}
|
|
|
|
|
2007-10-31 22:25:35 +11:00
|
|
|
/*
|
|
|
|
* Note: Book E and 4xx processors differ from other PowerPC processors
|
|
|
|
* in when the decrementer generates its interrupt: on the 1 to 0
|
|
|
|
* transition for Book E/4xx, but on the 0 to -1 transition for others.
|
|
|
|
*/
|
2016-07-01 16:20:39 +10:00
|
|
|
static inline void set_dec(u64 val)
|
2005-10-20 09:23:26 +10:00
|
|
|
{
|
2020-10-01 10:59:19 +00:00
|
|
|
if (IS_ENABLED(CONFIG_40x))
|
|
|
|
mtspr(SPRN_PIT, (u32)val);
|
|
|
|
else if (IS_ENABLED(CONFIG_BOOKE))
|
|
|
|
mtspr(SPRN_DEC, val);
|
|
|
|
else
|
|
|
|
mtspr(SPRN_DEC, val - 1);
|
2005-10-20 09:23:26 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long tb_ticks_since(unsigned long tstamp)
|
|
|
|
{
|
2020-10-01 12:42:41 +00:00
|
|
|
return mftb() - tstamp;
|
2005-10-20 09:23:26 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
#define mulhwu(x,y) \
|
|
|
|
({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define mulhdu(x,y) \
|
|
|
|
({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
|
|
|
|
#else
|
|
|
|
extern u64 mulhdu(u64, u64);
|
|
|
|
#endif
|
|
|
|
|
2005-10-22 14:55:23 +10:00
|
|
|
extern void div128_by_32(u64 dividend_high, u64 dividend_low,
|
|
|
|
unsigned divisor, struct div_result *dr);
|
2005-10-20 09:23:26 +10:00
|
|
|
|
2007-09-21 13:26:03 +10:00
|
|
|
extern void secondary_cpu_time_init(void);
|
2018-02-25 18:22:27 +01:00
|
|
|
extern void __init time_init(void);
|
2007-06-22 16:54:30 +10:00
|
|
|
|
powerpc/time: Optimise decrementer_check_overflow
decrementer_check_overflow is called from arch_local_irq_restore so
we want to make it as light weight as possible. As such, turn
decrementer_check_overflow into an inline function.
To avoid a circular mess of includes, separate out the two components
of struct decrementer_clock and keep the struct clock_event_device
part local to time.c.
The fast path improves from:
arch_local_irq_restore
0: mflr r0
4: std r0,16(r1)
8: stdu r1,-112(r1)
c: stb r3,578(r13)
10: cmpdi cr7,r3,0
14: beq- cr7,24 <.arch_local_irq_restore+0x24>
...
24: addi r1,r1,112
28: ld r0,16(r1)
2c: mtlr r0
30: blr
to:
arch_local_irq_restore
0: std r30,-16(r1)
4: ld r30,0(r2)
8: stb r3,578(r13)
c: cmpdi cr7,r3,0
10: beq- cr7,6c <.arch_local_irq_restore+0x6c>
...
6c: ld r30,-16(r1)
70: blr
Unfortunately we still setup a local TOC (due to -mminimal-toc). Yet
another sign we should be moving to -mcmodel=medium.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-11-23 20:07:22 +00:00
|
|
|
DECLARE_PER_CPU(u64, decrementers_next_tb);
|
2011-11-23 20:07:17 +00:00
|
|
|
|
2021-11-23 19:51:42 +10:00
|
|
|
static inline u64 timer_get_next_tb(void)
|
|
|
|
{
|
|
|
|
return __this_cpu_read(decrementers_next_tb);
|
|
|
|
}
|
|
|
|
|
2021-11-23 19:51:45 +10:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
|
|
void timer_rearm_host_dec(u64 now);
|
|
|
|
#endif
|
|
|
|
|
KVM: PPC: Book3S HV: Accumulate timing information for real-mode code
This reads the timebase at various points in the real-mode guest
entry/exit code and uses that to accumulate total, minimum and
maximum time spent in those parts of the code. Currently these
times are accumulated per vcpu in 5 parts of the code:
* rm_entry - time taken from the start of kvmppc_hv_entry() until
just before entering the guest.
* rm_intr - time from when we take a hypervisor interrupt in the
guest until we either re-enter the guest or decide to exit to the
host. This includes time spent handling hcalls in real mode.
* rm_exit - time from when we decide to exit the guest until the
return from kvmppc_hv_entry().
* guest - time spend in the guest
* cede - time spent napping in real mode due to an H_CEDE hcall
while other threads in the same vcore are active.
These times are exposed in debugfs in a directory per vcpu that
contains a file called "timings". This file contains one line for
each of the 5 timings above, with the name followed by a colon and
4 numbers, which are the count (number of times the code has been
executed), the total time, the minimum time, and the maximum time,
all in nanoseconds.
The overhead of the extra code amounts to about 30ns for an hcall that
is handled in real mode (e.g. H_SET_DABR), which is about 25%. Since
production environments may not wish to incur this overhead, the new
code is conditional on a new config symbol,
CONFIG_KVM_BOOK3S_HV_EXIT_TIMING.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2015-03-28 14:21:02 +11:00
|
|
|
/* Convert timebase ticks to nanoseconds */
|
|
|
|
unsigned long long tb_to_ns(unsigned long long tb_ticks);
|
|
|
|
|
2021-01-30 23:08:30 +10:00
|
|
|
void timer_broadcast_interrupt(void);
|
|
|
|
|
2022-09-02 18:53:16 +10:00
|
|
|
/* SPLPAR and VIRT_CPU_ACCOUNTING_NATIVE */
|
|
|
|
void pseries_accumulate_stolen_time(void);
|
|
|
|
u64 pseries_calculate_stolen_time(u64 stop_tb);
|
2020-02-26 03:35:34 +10:00
|
|
|
|
2005-10-20 09:23:26 +10:00
|
|
|
#endif /* __KERNEL__ */
|
2006-09-26 17:46:37 -05:00
|
|
|
#endif /* __POWERPC_TIME_H */
|