xref: /linux-6.15/include/linux/sched/clock.h (revision fb7d4948)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2e6017571SIngo Molnar #ifndef _LINUX_SCHED_CLOCK_H
3e6017571SIngo Molnar #define _LINUX_SCHED_CLOCK_H
4e6017571SIngo Molnar 
5ea947639SIngo Molnar #include <linux/smp.h>
6e6017571SIngo Molnar 
756898103SIngo Molnar /*
856898103SIngo Molnar  * Do not use outside of architecture code which knows its limitations.
956898103SIngo Molnar  *
1056898103SIngo Molnar  * sched_clock() has no promise of monotonicity or bounded drift between
1156898103SIngo Molnar  * CPUs, use (which you should not) requires disabling IRQs.
1256898103SIngo Molnar  *
1356898103SIngo Molnar  * Please use one of the three interfaces below.
1456898103SIngo Molnar  */
15*fb7d4948SPeter Zijlstra extern u64 sched_clock(void);
16*fb7d4948SPeter Zijlstra 
17*fb7d4948SPeter Zijlstra #if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
18*fb7d4948SPeter Zijlstra extern u64 sched_clock_noinstr(void);
19*fb7d4948SPeter Zijlstra #else
sched_clock_noinstr(void)20*fb7d4948SPeter Zijlstra static __always_inline u64 sched_clock_noinstr(void)
21*fb7d4948SPeter Zijlstra {
22*fb7d4948SPeter Zijlstra 	return sched_clock();
23*fb7d4948SPeter Zijlstra }
24*fb7d4948SPeter Zijlstra #endif
2556898103SIngo Molnar 
2656898103SIngo Molnar /*
2756898103SIngo Molnar  * See the comment in kernel/sched/clock.c
2856898103SIngo Molnar  */
2956898103SIngo Molnar extern u64 running_clock(void);
3056898103SIngo Molnar extern u64 sched_clock_cpu(int cpu);
3156898103SIngo Molnar 
3256898103SIngo Molnar 
3356898103SIngo Molnar extern void sched_clock_init(void);
3456898103SIngo Molnar 
3556898103SIngo Molnar #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_tick(void)3656898103SIngo Molnar static inline void sched_clock_tick(void)
3756898103SIngo Molnar {
3856898103SIngo Molnar }
3956898103SIngo Molnar 
clear_sched_clock_stable(void)4056898103SIngo Molnar static inline void clear_sched_clock_stable(void)
4156898103SIngo Molnar {
4256898103SIngo Molnar }
4356898103SIngo Molnar 
sched_clock_idle_sleep_event(void)4456898103SIngo Molnar static inline void sched_clock_idle_sleep_event(void)
4556898103SIngo Molnar {
4656898103SIngo Molnar }
4756898103SIngo Molnar 
sched_clock_idle_wakeup_event(void)48ac1e843fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(void)
4956898103SIngo Molnar {
5056898103SIngo Molnar }
5156898103SIngo Molnar 
cpu_clock(int cpu)5256898103SIngo Molnar static inline u64 cpu_clock(int cpu)
5356898103SIngo Molnar {
5456898103SIngo Molnar 	return sched_clock();
5556898103SIngo Molnar }
5656898103SIngo Molnar 
local_clock_noinstr(void)57*fb7d4948SPeter Zijlstra static __always_inline u64 local_clock_noinstr(void)
58*fb7d4948SPeter Zijlstra {
59*fb7d4948SPeter Zijlstra 	return sched_clock_noinstr();
60*fb7d4948SPeter Zijlstra }
61*fb7d4948SPeter Zijlstra 
local_clock(void)62776f2291SPeter Zijlstra static __always_inline u64 local_clock(void)
6356898103SIngo Molnar {
6456898103SIngo Molnar 	return sched_clock();
6556898103SIngo Molnar }
6656898103SIngo Molnar #else
6756898103SIngo Molnar extern int sched_clock_stable(void);
6856898103SIngo Molnar extern void clear_sched_clock_stable(void);
6956898103SIngo Molnar 
70698eff63SPeter Zijlstra /*
71698eff63SPeter Zijlstra  * When sched_clock_stable(), __sched_clock_offset provides the offset
72698eff63SPeter Zijlstra  * between local_clock() and sched_clock().
73698eff63SPeter Zijlstra  */
74698eff63SPeter Zijlstra extern u64 __sched_clock_offset;
75698eff63SPeter Zijlstra 
7656898103SIngo Molnar extern void sched_clock_tick(void);
77b421b22bSPeter Zijlstra extern void sched_clock_tick_stable(void);
7856898103SIngo Molnar extern void sched_clock_idle_sleep_event(void);
79ac1e843fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(void);
8056898103SIngo Molnar 
8156898103SIngo Molnar /*
8256898103SIngo Molnar  * As outlined in clock.c, provides a fast, high resolution, nanosecond
8356898103SIngo Molnar  * time source that is monotonic per cpu argument and has bounded drift
8456898103SIngo Molnar  * between cpus.
8556898103SIngo Molnar  *
8656898103SIngo Molnar  * ######################### BIG FAT WARNING ##########################
8756898103SIngo Molnar  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
8856898103SIngo Molnar  * # go backwards !!                                                  #
8956898103SIngo Molnar  * ####################################################################
9056898103SIngo Molnar  */
cpu_clock(int cpu)9156898103SIngo Molnar static inline u64 cpu_clock(int cpu)
9256898103SIngo Molnar {
9356898103SIngo Molnar 	return sched_clock_cpu(cpu);
9456898103SIngo Molnar }
9556898103SIngo Molnar 
96*fb7d4948SPeter Zijlstra extern u64 local_clock_noinstr(void);
97776f2291SPeter Zijlstra extern u64 local_clock(void);
98776f2291SPeter Zijlstra 
9956898103SIngo Molnar #endif
10056898103SIngo Molnar 
10156898103SIngo Molnar #ifdef CONFIG_IRQ_TIME_ACCOUNTING
10256898103SIngo Molnar /*
10356898103SIngo Molnar  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
10456898103SIngo Molnar  * The reason for this explicit opt-in is not to have perf penalty with
10556898103SIngo Molnar  * slow sched_clocks.
10656898103SIngo Molnar  */
10756898103SIngo Molnar extern void enable_sched_clock_irqtime(void);
10856898103SIngo Molnar extern void disable_sched_clock_irqtime(void);
10956898103SIngo Molnar #else
enable_sched_clock_irqtime(void)11056898103SIngo Molnar static inline void enable_sched_clock_irqtime(void) {}
disable_sched_clock_irqtime(void)11156898103SIngo Molnar static inline void disable_sched_clock_irqtime(void) {}
11256898103SIngo Molnar #endif
11356898103SIngo Molnar 
114e6017571SIngo Molnar #endif /* _LINUX_SCHED_CLOCK_H */
115