xref: /linux-6.15/include/linux/vtime.h (revision e44fcb4b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KERNEL_VTIME_H
3 #define _LINUX_KERNEL_VTIME_H
4 
5 #include <linux/context_tracking_state.h>
6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7 #include <asm/vtime.h>
8 #endif
9 
10 
11 struct task_struct;
12 
13 /*
14  * vtime_accounting_enabled_this_cpu() definitions/declarations
15  */
16 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
17 
18 static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
19 extern void vtime_task_switch(struct task_struct *prev);
20 
21 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
22 
23 /*
24  * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
25  * in that case and compute the tickless cputime.
26  * For now vtime state is tied to context tracking. We might want to decouple
27  * those later if necessary.
28  */
29 static inline bool vtime_accounting_enabled(void)
30 {
31 	return context_tracking_enabled();
32 }
33 
34 static inline bool vtime_accounting_enabled_this_cpu(void)
35 {
36 	if (vtime_accounting_enabled()) {
37 		if (context_tracking_enabled_this_cpu())
38 			return true;
39 	}
40 
41 	return false;
42 }
43 
44 extern void vtime_task_switch_generic(struct task_struct *prev);
45 
46 static inline void vtime_task_switch(struct task_struct *prev)
47 {
48 	if (vtime_accounting_enabled_this_cpu())
49 		vtime_task_switch_generic(prev);
50 }
51 
52 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
53 
54 static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
55 static inline void vtime_task_switch(struct task_struct *prev) { }
56 
57 #endif
58 
59 /*
60  * Common vtime APIs
61  */
62 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
63 extern void vtime_account_kernel(struct task_struct *tsk);
64 extern void vtime_account_idle(struct task_struct *tsk);
65 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
66 static inline void vtime_account_kernel(struct task_struct *tsk) { }
67 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
68 
69 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
70 extern void arch_vtime_task_switch(struct task_struct *tsk);
71 extern void vtime_user_enter(struct task_struct *tsk);
72 extern void vtime_user_exit(struct task_struct *tsk);
73 extern void vtime_guest_enter(struct task_struct *tsk);
74 extern void vtime_guest_exit(struct task_struct *tsk);
75 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
76 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
77 static inline void vtime_user_enter(struct task_struct *tsk) { }
78 static inline void vtime_user_exit(struct task_struct *tsk) { }
79 static inline void vtime_guest_enter(struct task_struct *tsk) { }
80 static inline void vtime_guest_exit(struct task_struct *tsk) { }
81 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
82 #endif
83 
84 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
85 extern void vtime_account_irq_enter(struct task_struct *tsk);
86 static inline void vtime_account_irq_exit(struct task_struct *tsk)
87 {
88 	/* On hard|softirq exit we always account to hard|softirq cputime */
89 	vtime_account_kernel(tsk);
90 }
91 extern void vtime_flush(struct task_struct *tsk);
92 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
93 static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
94 static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
95 static inline void vtime_flush(struct task_struct *tsk) { }
96 #endif
97 
98 
99 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
100 extern void irqtime_account_irq(struct task_struct *tsk);
101 #else
102 static inline void irqtime_account_irq(struct task_struct *tsk) { }
103 #endif
104 
105 static inline void account_irq_enter_time(struct task_struct *tsk)
106 {
107 	vtime_account_irq_enter(tsk);
108 	irqtime_account_irq(tsk);
109 }
110 
111 static inline void account_irq_exit_time(struct task_struct *tsk)
112 {
113 	vtime_account_irq_exit(tsk);
114 	irqtime_account_irq(tsk);
115 }
116 
117 #endif /* _LINUX_KERNEL_VTIME_H */
118