1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Tick related global functions 4 */ 5 #ifndef _LINUX_TICK_H 6 #define _LINUX_TICK_H 7 8 #include <linux/clockchips.h> 9 #include <linux/irqflags.h> 10 #include <linux/percpu.h> 11 #include <linux/context_tracking_state.h> 12 #include <linux/cpumask.h> 13 #include <linux/sched.h> 14 #include <linux/rcupdate.h> 15 #include <linux/static_key.h> 16 17 #ifdef CONFIG_GENERIC_CLOCKEVENTS 18 extern void __init tick_init(void); 19 /* Should be core only, but ARM BL switcher requires it */ 20 extern void tick_suspend_local(void); 21 /* Should be core only, but XEN resume magic and ARM BL switcher require it */ 22 extern void tick_resume_local(void); 23 #else /* CONFIG_GENERIC_CLOCKEVENTS */ 24 static inline void tick_init(void) { } 25 static inline void tick_suspend_local(void) { } 26 static inline void tick_resume_local(void) { } 27 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 28 29 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU) 30 extern int tick_cpu_dying(unsigned int cpu); 31 extern void tick_assert_timekeeping_handover(void); 32 #else 33 #define tick_cpu_dying NULL 34 static inline void tick_assert_timekeeping_handover(void) { } 35 #endif 36 37 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND) 38 extern void tick_freeze(void); 39 extern void tick_unfreeze(void); 40 #else 41 static inline void tick_freeze(void) { } 42 static inline void tick_unfreeze(void) { } 43 #endif 44 45 #ifdef CONFIG_TICK_ONESHOT 46 extern void tick_irq_enter(void); 47 # ifndef arch_needs_cpu 48 # define arch_needs_cpu() (0) 49 # endif 50 # else 51 static inline void tick_irq_enter(void) { } 52 #endif 53 54 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 55 extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu); 56 #else 57 static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { } 58 #endif 59 60 enum tick_broadcast_mode { 61 TICK_BROADCAST_OFF, 62 TICK_BROADCAST_ON, 63 TICK_BROADCAST_FORCE, 64 }; 65 66 enum tick_broadcast_state { 67 TICK_BROADCAST_EXIT, 68 TICK_BROADCAST_ENTER, 69 }; 70 71 extern struct static_key_false arch_needs_tick_broadcast; 72 73 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 74 extern void tick_broadcast_control(enum tick_broadcast_mode mode); 75 #else 76 static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 77 #endif /* BROADCAST */ 78 79 #ifdef CONFIG_GENERIC_CLOCKEVENTS 80 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 81 #else 82 static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) 83 { 84 return 0; 85 } 86 #endif 87 88 static inline void tick_broadcast_enable(void) 89 { 90 tick_broadcast_control(TICK_BROADCAST_ON); 91 } 92 static inline void tick_broadcast_disable(void) 93 { 94 tick_broadcast_control(TICK_BROADCAST_OFF); 95 } 96 static inline void tick_broadcast_force(void) 97 { 98 tick_broadcast_control(TICK_BROADCAST_FORCE); 99 } 100 static inline int tick_broadcast_enter(void) 101 { 102 return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER); 103 } 104 static inline void tick_broadcast_exit(void) 105 { 106 tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); 107 } 108 109 enum tick_dep_bits { 110 TICK_DEP_BIT_POSIX_TIMER = 0, 111 TICK_DEP_BIT_PERF_EVENTS = 1, 112 TICK_DEP_BIT_SCHED = 2, 113 TICK_DEP_BIT_CLOCK_UNSTABLE = 3, 114 TICK_DEP_BIT_RCU = 4, 115 TICK_DEP_BIT_RCU_EXP = 5 116 }; 117 #define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP 118 119 #define TICK_DEP_MASK_NONE 0 120 #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER) 121 #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) 122 #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) 123 #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) 124 #define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU) 125 #define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP) 126 127 #ifdef CONFIG_NO_HZ_COMMON 128 extern bool tick_nohz_enabled; 129 extern bool tick_nohz_tick_stopped(void); 130 extern bool tick_nohz_tick_stopped_cpu(int cpu); 131 extern void tick_nohz_idle_stop_tick(void); 132 extern void tick_nohz_idle_retain_tick(void); 133 extern void tick_nohz_idle_restart_tick(void); 134 extern void tick_nohz_idle_enter(void); 135 extern void tick_nohz_idle_exit(void); 136 extern void tick_nohz_irq_exit(void); 137 extern bool tick_nohz_idle_got_tick(void); 138 extern ktime_t tick_nohz_get_next_hrtimer(void); 139 extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); 140 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 141 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 142 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 143 #else /* !CONFIG_NO_HZ_COMMON */ 144 #define tick_nohz_enabled (0) 145 static inline int tick_nohz_tick_stopped(void) { return 0; } 146 static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } 147 static inline void tick_nohz_idle_stop_tick(void) { } 148 static inline void tick_nohz_idle_retain_tick(void) { } 149 static inline void tick_nohz_idle_restart_tick(void) { } 150 static inline void tick_nohz_idle_enter(void) { } 151 static inline void tick_nohz_idle_exit(void) { } 152 static inline bool tick_nohz_idle_got_tick(void) { return false; } 153 static inline ktime_t tick_nohz_get_next_hrtimer(void) 154 { 155 /* Next wake up is the tick period, assume it starts now */ 156 return ktime_add(ktime_get(), TICK_NSEC); 157 } 158 static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 159 { 160 *delta_next = TICK_NSEC; 161 return *delta_next; 162 } 163 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 164 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 165 #endif /* !CONFIG_NO_HZ_COMMON */ 166 167 /* 168 * Mask of CPUs that are nohz_full. 169 * 170 * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() 171 * check. 172 */ 173 extern cpumask_var_t tick_nohz_full_mask; 174 175 #ifdef CONFIG_NO_HZ_FULL 176 extern bool tick_nohz_full_running; 177 178 static inline bool tick_nohz_full_enabled(void) 179 { 180 if (!context_tracking_enabled()) 181 return false; 182 183 return tick_nohz_full_running; 184 } 185 186 /* 187 * Check if a CPU is part of the nohz_full subset. Arrange for evaluating 188 * the cpu expression (typically smp_processor_id()) _after_ the static 189 * key. 190 */ 191 #define tick_nohz_full_cpu(_cpu) ({ \ 192 bool __ret = false; \ 193 if (tick_nohz_full_enabled()) \ 194 __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \ 195 __ret; \ 196 }) 197 198 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) 199 { 200 if (tick_nohz_full_enabled()) 201 cpumask_or(mask, mask, tick_nohz_full_mask); 202 } 203 204 extern void tick_nohz_dep_set(enum tick_dep_bits bit); 205 extern void tick_nohz_dep_clear(enum tick_dep_bits bit); 206 extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); 207 extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit); 208 extern void tick_nohz_dep_set_task(struct task_struct *tsk, 209 enum tick_dep_bits bit); 210 extern void tick_nohz_dep_clear_task(struct task_struct *tsk, 211 enum tick_dep_bits bit); 212 extern void tick_nohz_dep_set_signal(struct task_struct *tsk, 213 enum tick_dep_bits bit); 214 extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, 215 enum tick_dep_bits bit); 216 extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu); 217 218 /* 219 * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases 220 * on top of static keys. 221 */ 222 static inline void tick_dep_set(enum tick_dep_bits bit) 223 { 224 if (tick_nohz_full_enabled()) 225 tick_nohz_dep_set(bit); 226 } 227 228 static inline void tick_dep_clear(enum tick_dep_bits bit) 229 { 230 if (tick_nohz_full_enabled()) 231 tick_nohz_dep_clear(bit); 232 } 233 234 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) 235 { 236 if (tick_nohz_full_cpu(cpu)) 237 tick_nohz_dep_set_cpu(cpu, bit); 238 } 239 240 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 241 { 242 if (tick_nohz_full_cpu(cpu)) 243 tick_nohz_dep_clear_cpu(cpu, bit); 244 } 245 246 static inline void tick_dep_set_task(struct task_struct *tsk, 247 enum tick_dep_bits bit) 248 { 249 if (tick_nohz_full_enabled()) 250 tick_nohz_dep_set_task(tsk, bit); 251 } 252 253 static inline void tick_dep_clear_task(struct task_struct *tsk, 254 enum tick_dep_bits bit) 255 { 256 if (tick_nohz_full_enabled()) 257 tick_nohz_dep_clear_task(tsk, bit); 258 } 259 260 static inline void tick_dep_init_task(struct task_struct *tsk) 261 { 262 atomic_set(&tsk->tick_dep_mask, 0); 263 } 264 265 static inline void tick_dep_set_signal(struct task_struct *tsk, 266 enum tick_dep_bits bit) 267 { 268 if (tick_nohz_full_enabled()) 269 tick_nohz_dep_set_signal(tsk, bit); 270 } 271 static inline void tick_dep_clear_signal(struct signal_struct *signal, 272 enum tick_dep_bits bit) 273 { 274 if (tick_nohz_full_enabled()) 275 tick_nohz_dep_clear_signal(signal, bit); 276 } 277 278 extern void tick_nohz_full_kick_cpu(int cpu); 279 extern void __tick_nohz_task_switch(void); 280 extern void __init tick_nohz_full_setup(cpumask_var_t cpumask); 281 #else 282 static inline bool tick_nohz_full_enabled(void) { return false; } 283 static inline bool tick_nohz_full_cpu(int cpu) { return false; } 284 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 285 286 static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } 287 static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } 288 static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; } 289 290 static inline void tick_dep_set(enum tick_dep_bits bit) { } 291 static inline void tick_dep_clear(enum tick_dep_bits bit) { } 292 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } 293 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } 294 static inline void tick_dep_set_task(struct task_struct *tsk, 295 enum tick_dep_bits bit) { } 296 static inline void tick_dep_clear_task(struct task_struct *tsk, 297 enum tick_dep_bits bit) { } 298 static inline void tick_dep_init_task(struct task_struct *tsk) { } 299 static inline void tick_dep_set_signal(struct task_struct *tsk, 300 enum tick_dep_bits bit) { } 301 static inline void tick_dep_clear_signal(struct signal_struct *signal, 302 enum tick_dep_bits bit) { } 303 304 static inline void tick_nohz_full_kick_cpu(int cpu) { } 305 static inline void __tick_nohz_task_switch(void) { } 306 static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { } 307 #endif 308 309 static inline void tick_nohz_task_switch(void) 310 { 311 if (tick_nohz_full_enabled()) 312 __tick_nohz_task_switch(); 313 } 314 315 static inline void tick_nohz_user_enter_prepare(void) 316 { 317 if (tick_nohz_full_cpu(smp_processor_id())) 318 rcu_nocb_flush_deferred_wakeup(); 319 } 320 321 #endif 322