xref: /linux-6.15/include/linux/vtime.h (revision e00a844a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KERNEL_VTIME_H
3 #define _LINUX_KERNEL_VTIME_H
4 
5 #include <linux/context_tracking_state.h>
6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7 #include <asm/vtime.h>
8 #endif
9 
10 
11 struct task_struct;
12 
13 /*
14  * vtime_accounting_cpu_enabled() definitions/declarations
15  */
16 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
17 static inline bool vtime_accounting_cpu_enabled(void) { return true; }
18 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
19 /*
20  * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
21  * in that case and compute the tickless cputime.
22  * For now vtime state is tied to context tracking. We might want to decouple
23  * those later if necessary.
24  */
25 static inline bool vtime_accounting_enabled(void)
26 {
27 	return context_tracking_is_enabled();
28 }
29 
30 static inline bool vtime_accounting_cpu_enabled(void)
31 {
32 	if (vtime_accounting_enabled()) {
33 		if (context_tracking_cpu_is_enabled())
34 			return true;
35 	}
36 
37 	return false;
38 }
39 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
40 static inline bool vtime_accounting_cpu_enabled(void) { return false; }
41 #endif
42 
43 
44 /*
45  * Common vtime APIs
46  */
47 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
48 
49 #ifdef __ARCH_HAS_VTIME_TASK_SWITCH
50 extern void vtime_task_switch(struct task_struct *prev);
51 #else
52 extern void vtime_common_task_switch(struct task_struct *prev);
53 static inline void vtime_task_switch(struct task_struct *prev)
54 {
55 	if (vtime_accounting_cpu_enabled())
56 		vtime_common_task_switch(prev);
57 }
58 #endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
59 
60 extern void vtime_account_system(struct task_struct *tsk);
61 extern void vtime_account_idle(struct task_struct *tsk);
62 
63 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
64 
65 static inline void vtime_task_switch(struct task_struct *prev) { }
66 static inline void vtime_account_system(struct task_struct *tsk) { }
67 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
68 
69 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
70 extern void arch_vtime_task_switch(struct task_struct *tsk);
71 extern void vtime_user_enter(struct task_struct *tsk);
72 extern void vtime_user_exit(struct task_struct *tsk);
73 extern void vtime_guest_enter(struct task_struct *tsk);
74 extern void vtime_guest_exit(struct task_struct *tsk);
75 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
76 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
77 static inline void vtime_user_enter(struct task_struct *tsk) { }
78 static inline void vtime_user_exit(struct task_struct *tsk) { }
79 static inline void vtime_guest_enter(struct task_struct *tsk) { }
80 static inline void vtime_guest_exit(struct task_struct *tsk) { }
81 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
82 #endif
83 
84 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
85 extern void vtime_account_irq_enter(struct task_struct *tsk);
86 static inline void vtime_account_irq_exit(struct task_struct *tsk)
87 {
88 	/* On hard|softirq exit we always account to hard|softirq cputime */
89 	vtime_account_system(tsk);
90 }
91 extern void vtime_flush(struct task_struct *tsk);
92 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
93 static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
94 static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
95 static inline void vtime_flush(struct task_struct *tsk) { }
96 #endif
97 
98 
99 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
100 extern void irqtime_account_irq(struct task_struct *tsk);
101 #else
102 static inline void irqtime_account_irq(struct task_struct *tsk) { }
103 #endif
104 
105 static inline void account_irq_enter_time(struct task_struct *tsk)
106 {
107 	vtime_account_irq_enter(tsk);
108 	irqtime_account_irq(tsk);
109 }
110 
111 static inline void account_irq_exit_time(struct task_struct *tsk)
112 {
113 	vtime_account_irq_exit(tsk);
114 	irqtime_account_irq(tsk);
115 }
116 
117 #endif /* _LINUX_KERNEL_VTIME_H */
118