xref: /linux-6.15/include/linux/tick.h (revision b0402403)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Tick related global functions
4  */
5 #ifndef _LINUX_TICK_H
6 #define _LINUX_TICK_H
7 
8 #include <linux/clockchips.h>
9 #include <linux/irqflags.h>
10 #include <linux/percpu.h>
11 #include <linux/context_tracking_state.h>
12 #include <linux/cpumask.h>
13 #include <linux/sched.h>
14 #include <linux/rcupdate.h>
15 
16 #ifdef CONFIG_GENERIC_CLOCKEVENTS
17 extern void __init tick_init(void);
18 /* Should be core only, but ARM BL switcher requires it */
19 extern void tick_suspend_local(void);
20 /* Should be core only, but XEN resume magic and ARM BL switcher require it */
21 extern void tick_resume_local(void);
22 extern void tick_cleanup_dead_cpu(int cpu);
23 #else /* CONFIG_GENERIC_CLOCKEVENTS */
24 static inline void tick_init(void) { }
25 static inline void tick_suspend_local(void) { }
26 static inline void tick_resume_local(void) { }
27 static inline void tick_cleanup_dead_cpu(int cpu) { }
28 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
29 
30 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU)
31 extern int tick_cpu_dying(unsigned int cpu);
32 extern void tick_assert_timekeeping_handover(void);
33 #else
34 #define tick_cpu_dying	NULL
35 static inline void tick_assert_timekeeping_handover(void) { }
36 #endif
37 
38 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
39 extern void tick_freeze(void);
40 extern void tick_unfreeze(void);
41 #else
42 static inline void tick_freeze(void) { }
43 static inline void tick_unfreeze(void) { }
44 #endif
45 
46 #ifdef CONFIG_TICK_ONESHOT
47 extern void tick_irq_enter(void);
48 #  ifndef arch_needs_cpu
49 #   define arch_needs_cpu() (0)
50 #  endif
51 # else
52 static inline void tick_irq_enter(void) { }
53 #endif
54 
55 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
56 extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
57 #else
58 static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
59 #endif
60 
61 enum tick_broadcast_mode {
62 	TICK_BROADCAST_OFF,
63 	TICK_BROADCAST_ON,
64 	TICK_BROADCAST_FORCE,
65 };
66 
67 enum tick_broadcast_state {
68 	TICK_BROADCAST_EXIT,
69 	TICK_BROADCAST_ENTER,
70 };
71 
72 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
73 extern void tick_broadcast_control(enum tick_broadcast_mode mode);
74 #else
75 static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
76 #endif /* BROADCAST */
77 
78 #ifdef CONFIG_GENERIC_CLOCKEVENTS
79 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
80 #else
81 static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
82 {
83 	return 0;
84 }
85 #endif
86 
87 static inline void tick_broadcast_enable(void)
88 {
89 	tick_broadcast_control(TICK_BROADCAST_ON);
90 }
91 static inline void tick_broadcast_disable(void)
92 {
93 	tick_broadcast_control(TICK_BROADCAST_OFF);
94 }
95 static inline void tick_broadcast_force(void)
96 {
97 	tick_broadcast_control(TICK_BROADCAST_FORCE);
98 }
99 static inline int tick_broadcast_enter(void)
100 {
101 	return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
102 }
103 static inline void tick_broadcast_exit(void)
104 {
105 	tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
106 }
107 
108 enum tick_dep_bits {
109 	TICK_DEP_BIT_POSIX_TIMER	= 0,
110 	TICK_DEP_BIT_PERF_EVENTS	= 1,
111 	TICK_DEP_BIT_SCHED		= 2,
112 	TICK_DEP_BIT_CLOCK_UNSTABLE	= 3,
113 	TICK_DEP_BIT_RCU		= 4,
114 	TICK_DEP_BIT_RCU_EXP		= 5
115 };
116 #define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
117 
118 #define TICK_DEP_MASK_NONE		0
119 #define TICK_DEP_MASK_POSIX_TIMER	(1 << TICK_DEP_BIT_POSIX_TIMER)
120 #define TICK_DEP_MASK_PERF_EVENTS	(1 << TICK_DEP_BIT_PERF_EVENTS)
121 #define TICK_DEP_MASK_SCHED		(1 << TICK_DEP_BIT_SCHED)
122 #define TICK_DEP_MASK_CLOCK_UNSTABLE	(1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
123 #define TICK_DEP_MASK_RCU		(1 << TICK_DEP_BIT_RCU)
124 #define TICK_DEP_MASK_RCU_EXP		(1 << TICK_DEP_BIT_RCU_EXP)
125 
126 #ifdef CONFIG_NO_HZ_COMMON
127 extern bool tick_nohz_enabled;
128 extern bool tick_nohz_tick_stopped(void);
129 extern bool tick_nohz_tick_stopped_cpu(int cpu);
130 extern void tick_nohz_idle_stop_tick(void);
131 extern void tick_nohz_idle_retain_tick(void);
132 extern void tick_nohz_idle_restart_tick(void);
133 extern void tick_nohz_idle_enter(void);
134 extern void tick_nohz_idle_exit(void);
135 extern void tick_nohz_irq_exit(void);
136 extern bool tick_nohz_idle_got_tick(void);
137 extern ktime_t tick_nohz_get_next_hrtimer(void);
138 extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
139 extern unsigned long tick_nohz_get_idle_calls(void);
140 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
141 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
142 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
143 #else /* !CONFIG_NO_HZ_COMMON */
144 #define tick_nohz_enabled (0)
145 static inline int tick_nohz_tick_stopped(void) { return 0; }
146 static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
147 static inline void tick_nohz_idle_stop_tick(void) { }
148 static inline void tick_nohz_idle_retain_tick(void) { }
149 static inline void tick_nohz_idle_restart_tick(void) { }
150 static inline void tick_nohz_idle_enter(void) { }
151 static inline void tick_nohz_idle_exit(void) { }
152 static inline bool tick_nohz_idle_got_tick(void) { return false; }
153 static inline ktime_t tick_nohz_get_next_hrtimer(void)
154 {
155 	/* Next wake up is the tick period, assume it starts now */
156 	return ktime_add(ktime_get(), TICK_NSEC);
157 }
158 static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
159 {
160 	*delta_next = TICK_NSEC;
161 	return *delta_next;
162 }
163 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
164 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
165 #endif /* !CONFIG_NO_HZ_COMMON */
166 
167 /*
168  * Mask of CPUs that are nohz_full.
169  *
170  * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu()
171  * check.
172  */
173 extern cpumask_var_t tick_nohz_full_mask;
174 
175 #ifdef CONFIG_NO_HZ_FULL
176 extern bool tick_nohz_full_running;
177 
178 static inline bool tick_nohz_full_enabled(void)
179 {
180 	if (!context_tracking_enabled())
181 		return false;
182 
183 	return tick_nohz_full_running;
184 }
185 
186 /*
187  * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
188  * the cpu expression (typically smp_processor_id()) _after_ the static
189  * key.
190  */
191 #define tick_nohz_full_cpu(_cpu) ({					\
192 	bool __ret = false;						\
193 	if (tick_nohz_full_enabled())					\
194 		__ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask);	\
195 	__ret;								\
196 })
197 
198 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
199 {
200 	if (tick_nohz_full_enabled())
201 		cpumask_or(mask, mask, tick_nohz_full_mask);
202 }
203 
204 extern void tick_nohz_dep_set(enum tick_dep_bits bit);
205 extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
206 extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
207 extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
208 extern void tick_nohz_dep_set_task(struct task_struct *tsk,
209 				   enum tick_dep_bits bit);
210 extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
211 				     enum tick_dep_bits bit);
212 extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
213 				     enum tick_dep_bits bit);
214 extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
215 				       enum tick_dep_bits bit);
216 extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
217 
218 /*
219  * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
220  * on top of static keys.
221  */
222 static inline void tick_dep_set(enum tick_dep_bits bit)
223 {
224 	if (tick_nohz_full_enabled())
225 		tick_nohz_dep_set(bit);
226 }
227 
228 static inline void tick_dep_clear(enum tick_dep_bits bit)
229 {
230 	if (tick_nohz_full_enabled())
231 		tick_nohz_dep_clear(bit);
232 }
233 
234 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
235 {
236 	if (tick_nohz_full_cpu(cpu))
237 		tick_nohz_dep_set_cpu(cpu, bit);
238 }
239 
240 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
241 {
242 	if (tick_nohz_full_cpu(cpu))
243 		tick_nohz_dep_clear_cpu(cpu, bit);
244 }
245 
246 static inline void tick_dep_set_task(struct task_struct *tsk,
247 				     enum tick_dep_bits bit)
248 {
249 	if (tick_nohz_full_enabled())
250 		tick_nohz_dep_set_task(tsk, bit);
251 }
252 static inline void tick_dep_clear_task(struct task_struct *tsk,
253 				       enum tick_dep_bits bit)
254 {
255 	if (tick_nohz_full_enabled())
256 		tick_nohz_dep_clear_task(tsk, bit);
257 }
258 static inline void tick_dep_set_signal(struct task_struct *tsk,
259 				       enum tick_dep_bits bit)
260 {
261 	if (tick_nohz_full_enabled())
262 		tick_nohz_dep_set_signal(tsk, bit);
263 }
264 static inline void tick_dep_clear_signal(struct signal_struct *signal,
265 					 enum tick_dep_bits bit)
266 {
267 	if (tick_nohz_full_enabled())
268 		tick_nohz_dep_clear_signal(signal, bit);
269 }
270 
271 extern void tick_nohz_full_kick_cpu(int cpu);
272 extern void __tick_nohz_task_switch(void);
273 extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
274 #else
275 static inline bool tick_nohz_full_enabled(void) { return false; }
276 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
277 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
278 
279 static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
280 static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
281 static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
282 
283 static inline void tick_dep_set(enum tick_dep_bits bit) { }
284 static inline void tick_dep_clear(enum tick_dep_bits bit) { }
285 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
286 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
287 static inline void tick_dep_set_task(struct task_struct *tsk,
288 				     enum tick_dep_bits bit) { }
289 static inline void tick_dep_clear_task(struct task_struct *tsk,
290 				       enum tick_dep_bits bit) { }
291 static inline void tick_dep_set_signal(struct task_struct *tsk,
292 				       enum tick_dep_bits bit) { }
293 static inline void tick_dep_clear_signal(struct signal_struct *signal,
294 					 enum tick_dep_bits bit) { }
295 
296 static inline void tick_nohz_full_kick_cpu(int cpu) { }
297 static inline void __tick_nohz_task_switch(void) { }
298 static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { }
299 #endif
300 
301 static inline void tick_nohz_task_switch(void)
302 {
303 	if (tick_nohz_full_enabled())
304 		__tick_nohz_task_switch();
305 }
306 
307 static inline void tick_nohz_user_enter_prepare(void)
308 {
309 	if (tick_nohz_full_cpu(smp_processor_id()))
310 		rcu_nocb_flush_deferred_wakeup();
311 }
312 
313 #endif
314