1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Tick related global functions 4 */ 5 #ifndef _LINUX_TICK_H 6 #define _LINUX_TICK_H 7 8 #include <linux/clockchips.h> 9 #include <linux/irqflags.h> 10 #include <linux/percpu.h> 11 #include <linux/context_tracking_state.h> 12 #include <linux/cpumask.h> 13 #include <linux/sched.h> 14 #include <linux/rcupdate.h> 15 #include <linux/static_key.h> 16 17 #ifdef CONFIG_GENERIC_CLOCKEVENTS 18 extern void __init tick_init(void); 19 /* Should be core only, but ARM BL switcher requires it */ 20 extern void tick_suspend_local(void); 21 /* Should be core only, but XEN resume magic and ARM BL switcher require it */ 22 extern void tick_resume_local(void); 23 extern void tick_cleanup_dead_cpu(int cpu); 24 #else /* CONFIG_GENERIC_CLOCKEVENTS */ 25 static inline void tick_init(void) { } 26 static inline void tick_suspend_local(void) { } 27 static inline void tick_resume_local(void) { } 28 static inline void tick_cleanup_dead_cpu(int cpu) { } 29 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 30 31 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU) 32 extern int tick_cpu_dying(unsigned int cpu); 33 extern void tick_assert_timekeeping_handover(void); 34 #else 35 #define tick_cpu_dying NULL 36 static inline void tick_assert_timekeeping_handover(void) { } 37 #endif 38 39 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND) 40 extern void tick_freeze(void); 41 extern void tick_unfreeze(void); 42 #else 43 static inline void tick_freeze(void) { } 44 static inline void tick_unfreeze(void) { } 45 #endif 46 47 #ifdef CONFIG_TICK_ONESHOT 48 extern void tick_irq_enter(void); 49 # ifndef arch_needs_cpu 50 # define arch_needs_cpu() (0) 51 # endif 52 # else 53 static inline void tick_irq_enter(void) { } 54 #endif 55 56 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 57 extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu); 58 #else 59 static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { } 60 #endif 61 62 enum tick_broadcast_mode { 63 TICK_BROADCAST_OFF, 64 TICK_BROADCAST_ON, 65 TICK_BROADCAST_FORCE, 66 }; 67 68 enum tick_broadcast_state { 69 TICK_BROADCAST_EXIT, 70 TICK_BROADCAST_ENTER, 71 }; 72 73 extern struct static_key_false arch_needs_tick_broadcast; 74 75 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 76 extern void tick_broadcast_control(enum tick_broadcast_mode mode); 77 #else 78 static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 79 #endif /* BROADCAST */ 80 81 #ifdef CONFIG_GENERIC_CLOCKEVENTS 82 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 83 #else 84 static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) 85 { 86 return 0; 87 } 88 #endif 89 90 static inline void tick_broadcast_enable(void) 91 { 92 tick_broadcast_control(TICK_BROADCAST_ON); 93 } 94 static inline void tick_broadcast_disable(void) 95 { 96 tick_broadcast_control(TICK_BROADCAST_OFF); 97 } 98 static inline void tick_broadcast_force(void) 99 { 100 tick_broadcast_control(TICK_BROADCAST_FORCE); 101 } 102 static inline int tick_broadcast_enter(void) 103 { 104 return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER); 105 } 106 static inline void tick_broadcast_exit(void) 107 { 108 tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); 109 } 110 111 enum tick_dep_bits { 112 TICK_DEP_BIT_POSIX_TIMER = 0, 113 TICK_DEP_BIT_PERF_EVENTS = 1, 114 TICK_DEP_BIT_SCHED = 2, 115 TICK_DEP_BIT_CLOCK_UNSTABLE = 3, 116 TICK_DEP_BIT_RCU = 4, 117 TICK_DEP_BIT_RCU_EXP = 5 118 }; 119 #define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP 120 121 #define TICK_DEP_MASK_NONE 0 122 #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER) 123 #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) 124 #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) 125 #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) 126 #define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU) 127 #define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP) 128 129 #ifdef CONFIG_NO_HZ_COMMON 130 extern bool tick_nohz_enabled; 131 extern bool tick_nohz_tick_stopped(void); 132 extern bool tick_nohz_tick_stopped_cpu(int cpu); 133 extern void tick_nohz_idle_stop_tick(void); 134 extern void tick_nohz_idle_retain_tick(void); 135 extern void tick_nohz_idle_restart_tick(void); 136 extern void tick_nohz_idle_enter(void); 137 extern void tick_nohz_idle_exit(void); 138 extern void tick_nohz_irq_exit(void); 139 extern bool tick_nohz_idle_got_tick(void); 140 extern ktime_t tick_nohz_get_next_hrtimer(void); 141 extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); 142 extern unsigned long tick_nohz_get_idle_calls(void); 143 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 144 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 145 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 146 #else /* !CONFIG_NO_HZ_COMMON */ 147 #define tick_nohz_enabled (0) 148 static inline int tick_nohz_tick_stopped(void) { return 0; } 149 static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } 150 static inline void tick_nohz_idle_stop_tick(void) { } 151 static inline void tick_nohz_idle_retain_tick(void) { } 152 static inline void tick_nohz_idle_restart_tick(void) { } 153 static inline void tick_nohz_idle_enter(void) { } 154 static inline void tick_nohz_idle_exit(void) { } 155 static inline bool tick_nohz_idle_got_tick(void) { return false; } 156 static inline ktime_t tick_nohz_get_next_hrtimer(void) 157 { 158 /* Next wake up is the tick period, assume it starts now */ 159 return ktime_add(ktime_get(), TICK_NSEC); 160 } 161 static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 162 { 163 *delta_next = TICK_NSEC; 164 return *delta_next; 165 } 166 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 167 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 168 #endif /* !CONFIG_NO_HZ_COMMON */ 169 170 /* 171 * Mask of CPUs that are nohz_full. 172 * 173 * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() 174 * check. 175 */ 176 extern cpumask_var_t tick_nohz_full_mask; 177 178 #ifdef CONFIG_NO_HZ_FULL 179 extern bool tick_nohz_full_running; 180 181 static inline bool tick_nohz_full_enabled(void) 182 { 183 if (!context_tracking_enabled()) 184 return false; 185 186 return tick_nohz_full_running; 187 } 188 189 /* 190 * Check if a CPU is part of the nohz_full subset. Arrange for evaluating 191 * the cpu expression (typically smp_processor_id()) _after_ the static 192 * key. 193 */ 194 #define tick_nohz_full_cpu(_cpu) ({ \ 195 bool __ret = false; \ 196 if (tick_nohz_full_enabled()) \ 197 __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \ 198 __ret; \ 199 }) 200 201 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) 202 { 203 if (tick_nohz_full_enabled()) 204 cpumask_or(mask, mask, tick_nohz_full_mask); 205 } 206 207 extern void tick_nohz_dep_set(enum tick_dep_bits bit); 208 extern void tick_nohz_dep_clear(enum tick_dep_bits bit); 209 extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); 210 extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit); 211 extern void tick_nohz_dep_set_task(struct task_struct *tsk, 212 enum tick_dep_bits bit); 213 extern void tick_nohz_dep_clear_task(struct task_struct *tsk, 214 enum tick_dep_bits bit); 215 extern void tick_nohz_dep_set_signal(struct task_struct *tsk, 216 enum tick_dep_bits bit); 217 extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, 218 enum tick_dep_bits bit); 219 extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu); 220 221 /* 222 * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases 223 * on top of static keys. 224 */ 225 static inline void tick_dep_set(enum tick_dep_bits bit) 226 { 227 if (tick_nohz_full_enabled()) 228 tick_nohz_dep_set(bit); 229 } 230 231 static inline void tick_dep_clear(enum tick_dep_bits bit) 232 { 233 if (tick_nohz_full_enabled()) 234 tick_nohz_dep_clear(bit); 235 } 236 237 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) 238 { 239 if (tick_nohz_full_cpu(cpu)) 240 tick_nohz_dep_set_cpu(cpu, bit); 241 } 242 243 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 244 { 245 if (tick_nohz_full_cpu(cpu)) 246 tick_nohz_dep_clear_cpu(cpu, bit); 247 } 248 249 static inline void tick_dep_set_task(struct task_struct *tsk, 250 enum tick_dep_bits bit) 251 { 252 if (tick_nohz_full_enabled()) 253 tick_nohz_dep_set_task(tsk, bit); 254 } 255 static inline void tick_dep_clear_task(struct task_struct *tsk, 256 enum tick_dep_bits bit) 257 { 258 if (tick_nohz_full_enabled()) 259 tick_nohz_dep_clear_task(tsk, bit); 260 } 261 static inline void tick_dep_set_signal(struct task_struct *tsk, 262 enum tick_dep_bits bit) 263 { 264 if (tick_nohz_full_enabled()) 265 tick_nohz_dep_set_signal(tsk, bit); 266 } 267 static inline void tick_dep_clear_signal(struct signal_struct *signal, 268 enum tick_dep_bits bit) 269 { 270 if (tick_nohz_full_enabled()) 271 tick_nohz_dep_clear_signal(signal, bit); 272 } 273 274 extern void tick_nohz_full_kick_cpu(int cpu); 275 extern void __tick_nohz_task_switch(void); 276 extern void __init tick_nohz_full_setup(cpumask_var_t cpumask); 277 #else 278 static inline bool tick_nohz_full_enabled(void) { return false; } 279 static inline bool tick_nohz_full_cpu(int cpu) { return false; } 280 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 281 282 static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } 283 static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } 284 static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; } 285 286 static inline void tick_dep_set(enum tick_dep_bits bit) { } 287 static inline void tick_dep_clear(enum tick_dep_bits bit) { } 288 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } 289 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } 290 static inline void tick_dep_set_task(struct task_struct *tsk, 291 enum tick_dep_bits bit) { } 292 static inline void tick_dep_clear_task(struct task_struct *tsk, 293 enum tick_dep_bits bit) { } 294 static inline void tick_dep_set_signal(struct task_struct *tsk, 295 enum tick_dep_bits bit) { } 296 static inline void tick_dep_clear_signal(struct signal_struct *signal, 297 enum tick_dep_bits bit) { } 298 299 static inline void tick_nohz_full_kick_cpu(int cpu) { } 300 static inline void __tick_nohz_task_switch(void) { } 301 static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { } 302 #endif 303 304 static inline void tick_nohz_task_switch(void) 305 { 306 if (tick_nohz_full_enabled()) 307 __tick_nohz_task_switch(); 308 } 309 310 static inline void tick_nohz_user_enter_prepare(void) 311 { 312 if (tick_nohz_full_cpu(smp_processor_id())) 313 rcu_nocb_flush_deferred_wakeup(); 314 } 315 316 #endif 317