1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel internal timers 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 8 * 9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 10 * "A Kernel Model for Precision Timekeeping" by Dave Mills 11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 12 * serialize accesses to xtime/lost_ticks). 13 * Copyright (C) 1998 Andrea Arcangeli 14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 19 */ 20 21 #include <linux/kernel_stat.h> 22 #include <linux/export.h> 23 #include <linux/interrupt.h> 24 #include <linux/percpu.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/swap.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/notifier.h> 30 #include <linux/thread_info.h> 31 #include <linux/time.h> 32 #include <linux/jiffies.h> 33 #include <linux/posix-timers.h> 34 #include <linux/cpu.h> 35 #include <linux/syscalls.h> 36 #include <linux/delay.h> 37 #include <linux/tick.h> 38 #include <linux/kallsyms.h> 39 #include <linux/irq_work.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/sched/nohz.h> 43 #include <linux/sched/debug.h> 44 #include <linux/slab.h> 45 #include <linux/compat.h> 46 #include <linux/random.h> 47 #include <linux/sysctl.h> 48 49 #include <linux/uaccess.h> 50 #include <asm/unistd.h> 51 #include <asm/div64.h> 52 #include <asm/timex.h> 53 #include <asm/io.h> 54 55 #include "tick-internal.h" 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/timer.h> 59 60 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 61 62 EXPORT_SYMBOL(jiffies_64); 63 64 /* 65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 66 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 67 * level has a different granularity. 68 * 69 * The level granularity is: LVL_CLK_DIV ^ lvl 70 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 71 * 72 * The array level of a newly armed timer depends on the relative expiry 73 * time. The farther the expiry time is away the higher the array level and 74 * therefor the granularity becomes. 75 * 76 * Contrary to the original timer wheel implementation, which aims for 'exact' 77 * expiry of the timers, this implementation removes the need for recascading 78 * the timers into the lower array levels. The previous 'classic' timer wheel 79 * implementation of the kernel already violated the 'exact' expiry by adding 80 * slack to the expiry time to provide batched expiration. The granularity 81 * levels provide implicit batching. 82 * 83 * This is an optimization of the original timer wheel implementation for the 84 * majority of the timer wheel use cases: timeouts. The vast majority of 85 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 86 * the timeout expires it indicates that normal operation is disturbed, so it 87 * does not matter much whether the timeout comes with a slight delay. 88 * 89 * The only exception to this are networking timers with a small expiry 90 * time. They rely on the granularity. Those fit into the first wheel level, 91 * which has HZ granularity. 92 * 93 * We don't have cascading anymore. timers with a expiry time above the 94 * capacity of the last wheel level are force expired at the maximum timeout 95 * value of the last wheel level. From data sampling we know that the maximum 96 * value observed is 5 days (network connection tracking), so this should not 97 * be an issue. 98 * 99 * The currently chosen array constants values are a good compromise between 100 * array size and granularity. 101 * 102 * This results in the following granularity and range levels: 103 * 104 * HZ 1000 steps 105 * Level Offset Granularity Range 106 * 0 0 1 ms 0 ms - 63 ms 107 * 1 64 8 ms 64 ms - 511 ms 108 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 109 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 110 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 111 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 112 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 113 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 114 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 115 * 116 * HZ 300 117 * Level Offset Granularity Range 118 * 0 0 3 ms 0 ms - 210 ms 119 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 120 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 121 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 122 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 123 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 124 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 125 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 126 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 127 * 128 * HZ 250 129 * Level Offset Granularity Range 130 * 0 0 4 ms 0 ms - 255 ms 131 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 132 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 133 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 134 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 135 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 136 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 137 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 138 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 139 * 140 * HZ 100 141 * Level Offset Granularity Range 142 * 0 0 10 ms 0 ms - 630 ms 143 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 144 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 145 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 146 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 147 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 148 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 149 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 150 */ 151 152 /* Clock divisor for the next level */ 153 #define LVL_CLK_SHIFT 3 154 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 155 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 156 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 157 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 158 159 /* 160 * The time start value for each level to select the bucket at enqueue 161 * time. We start from the last possible delta of the previous level 162 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()). 163 */ 164 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 165 166 /* Size of each clock level */ 167 #define LVL_BITS 6 168 #define LVL_SIZE (1UL << LVL_BITS) 169 #define LVL_MASK (LVL_SIZE - 1) 170 #define LVL_OFFS(n) ((n) * LVL_SIZE) 171 172 /* Level depth */ 173 #if HZ > 100 174 # define LVL_DEPTH 9 175 # else 176 # define LVL_DEPTH 8 177 #endif 178 179 /* The cutoff (max. capacity of the wheel) */ 180 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 181 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 182 183 /* 184 * The resulting wheel size. If NOHZ is configured we allocate two 185 * wheels so we have a separate storage for the deferrable timers. 186 */ 187 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 188 189 #ifdef CONFIG_NO_HZ_COMMON 190 /* 191 * If multiple bases need to be locked, use the base ordering for lock 192 * nesting, i.e. lowest number first. 193 */ 194 # define NR_BASES 3 195 # define BASE_LOCAL 0 196 # define BASE_GLOBAL 1 197 # define BASE_DEF 2 198 #else 199 # define NR_BASES 1 200 # define BASE_LOCAL 0 201 # define BASE_GLOBAL 0 202 # define BASE_DEF 0 203 #endif 204 205 /** 206 * struct timer_base - Per CPU timer base (number of base depends on config) 207 * @lock: Lock protecting the timer_base 208 * @running_timer: When expiring timers, the lock is dropped. To make 209 * sure not to race agains deleting/modifying a 210 * currently running timer, the pointer is set to the 211 * timer, which expires at the moment. If no timer is 212 * running, the pointer is NULL. 213 * @expiry_lock: PREEMPT_RT only: Lock is taken in softirq around 214 * timer expiry callback execution and when trying to 215 * delete a running timer and it wasn't successful in 216 * the first glance. It prevents priority inversion 217 * when callback was preempted on a remote CPU and a 218 * caller tries to delete the running timer. It also 219 * prevents a life lock, when the task which tries to 220 * delete a timer preempted the softirq thread which 221 * is running the timer callback function. 222 * @timer_waiters: PREEMPT_RT only: Tells, if there is a waiter 223 * waiting for the end of the timer callback function 224 * execution. 225 * @clk: clock of the timer base; is updated before enqueue 226 * of a timer; during expiry, it is 1 offset ahead of 227 * jiffies to avoid endless requeuing to current 228 * jiffies 229 * @next_expiry: expiry value of the first timer; it is updated when 230 * finding the next timer and during enqueue; the 231 * value is not valid, when next_expiry_recalc is set 232 * @cpu: Number of CPU the timer base belongs to 233 * @next_expiry_recalc: States, whether a recalculation of next_expiry is 234 * required. Value is set true, when a timer was 235 * deleted. 236 * @is_idle: Is set, when timer_base is idle. It is triggered by NOHZ 237 * code. This state is only used in standard 238 * base. Deferrable timers, which are enqueued remotely 239 * never wake up an idle CPU. So no matter of supporting it 240 * for this base. 241 * @timers_pending: Is set, when a timer is pending in the base. It is only 242 * reliable when next_expiry_recalc is not set. 243 * @pending_map: bitmap of the timer wheel; each bit reflects a 244 * bucket of the wheel. When a bit is set, at least a 245 * single timer is enqueued in the related bucket. 246 * @vectors: Array of lists; Each array member reflects a bucket 247 * of the timer wheel. The list contains all timers 248 * which are enqueued into a specific bucket. 249 */ 250 struct timer_base { 251 raw_spinlock_t lock; 252 struct timer_list *running_timer; 253 #ifdef CONFIG_PREEMPT_RT 254 spinlock_t expiry_lock; 255 atomic_t timer_waiters; 256 #endif 257 unsigned long clk; 258 unsigned long next_expiry; 259 unsigned int cpu; 260 bool next_expiry_recalc; 261 bool is_idle; 262 bool timers_pending; 263 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 264 struct hlist_head vectors[WHEEL_SIZE]; 265 } ____cacheline_aligned; 266 267 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 268 269 #ifdef CONFIG_NO_HZ_COMMON 270 271 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); 272 static DEFINE_MUTEX(timer_keys_mutex); 273 274 static void timer_update_keys(struct work_struct *work); 275 static DECLARE_WORK(timer_update_work, timer_update_keys); 276 277 #ifdef CONFIG_SMP 278 static unsigned int sysctl_timer_migration = 1; 279 280 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); 281 282 static void timers_update_migration(void) 283 { 284 if (sysctl_timer_migration && tick_nohz_active) 285 static_branch_enable(&timers_migration_enabled); 286 else 287 static_branch_disable(&timers_migration_enabled); 288 } 289 290 #ifdef CONFIG_SYSCTL 291 static int timer_migration_handler(struct ctl_table *table, int write, 292 void *buffer, size_t *lenp, loff_t *ppos) 293 { 294 int ret; 295 296 mutex_lock(&timer_keys_mutex); 297 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 298 if (!ret && write) 299 timers_update_migration(); 300 mutex_unlock(&timer_keys_mutex); 301 return ret; 302 } 303 304 static struct ctl_table timer_sysctl[] = { 305 { 306 .procname = "timer_migration", 307 .data = &sysctl_timer_migration, 308 .maxlen = sizeof(unsigned int), 309 .mode = 0644, 310 .proc_handler = timer_migration_handler, 311 .extra1 = SYSCTL_ZERO, 312 .extra2 = SYSCTL_ONE, 313 }, 314 {} 315 }; 316 317 static int __init timer_sysctl_init(void) 318 { 319 register_sysctl("kernel", timer_sysctl); 320 return 0; 321 } 322 device_initcall(timer_sysctl_init); 323 #endif /* CONFIG_SYSCTL */ 324 #else /* CONFIG_SMP */ 325 static inline void timers_update_migration(void) { } 326 #endif /* !CONFIG_SMP */ 327 328 static void timer_update_keys(struct work_struct *work) 329 { 330 mutex_lock(&timer_keys_mutex); 331 timers_update_migration(); 332 static_branch_enable(&timers_nohz_active); 333 mutex_unlock(&timer_keys_mutex); 334 } 335 336 void timers_update_nohz(void) 337 { 338 schedule_work(&timer_update_work); 339 } 340 341 static inline bool is_timers_nohz_active(void) 342 { 343 return static_branch_unlikely(&timers_nohz_active); 344 } 345 #else 346 static inline bool is_timers_nohz_active(void) { return false; } 347 #endif /* NO_HZ_COMMON */ 348 349 static unsigned long round_jiffies_common(unsigned long j, int cpu, 350 bool force_up) 351 { 352 int rem; 353 unsigned long original = j; 354 355 /* 356 * We don't want all cpus firing their timers at once hitting the 357 * same lock or cachelines, so we skew each extra cpu with an extra 358 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 359 * already did this. 360 * The skew is done by adding 3*cpunr, then round, then subtract this 361 * extra offset again. 362 */ 363 j += cpu * 3; 364 365 rem = j % HZ; 366 367 /* 368 * If the target jiffie is just after a whole second (which can happen 369 * due to delays of the timer irq, long irq off times etc etc) then 370 * we should round down to the whole second, not up. Use 1/4th second 371 * as cutoff for this rounding as an extreme upper bound for this. 372 * But never round down if @force_up is set. 373 */ 374 if (rem < HZ/4 && !force_up) /* round down */ 375 j = j - rem; 376 else /* round up */ 377 j = j - rem + HZ; 378 379 /* now that we have rounded, subtract the extra skew again */ 380 j -= cpu * 3; 381 382 /* 383 * Make sure j is still in the future. Otherwise return the 384 * unmodified value. 385 */ 386 return time_is_after_jiffies(j) ? j : original; 387 } 388 389 /** 390 * __round_jiffies - function to round jiffies to a full second 391 * @j: the time in (absolute) jiffies that should be rounded 392 * @cpu: the processor number on which the timeout will happen 393 * 394 * __round_jiffies() rounds an absolute time in the future (in jiffies) 395 * up or down to (approximately) full seconds. This is useful for timers 396 * for which the exact time they fire does not matter too much, as long as 397 * they fire approximately every X seconds. 398 * 399 * By rounding these timers to whole seconds, all such timers will fire 400 * at the same time, rather than at various times spread out. The goal 401 * of this is to have the CPU wake up less, which saves power. 402 * 403 * The exact rounding is skewed for each processor to avoid all 404 * processors firing at the exact same time, which could lead 405 * to lock contention or spurious cache line bouncing. 406 * 407 * The return value is the rounded version of the @j parameter. 408 */ 409 unsigned long __round_jiffies(unsigned long j, int cpu) 410 { 411 return round_jiffies_common(j, cpu, false); 412 } 413 EXPORT_SYMBOL_GPL(__round_jiffies); 414 415 /** 416 * __round_jiffies_relative - function to round jiffies to a full second 417 * @j: the time in (relative) jiffies that should be rounded 418 * @cpu: the processor number on which the timeout will happen 419 * 420 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 421 * up or down to (approximately) full seconds. This is useful for timers 422 * for which the exact time they fire does not matter too much, as long as 423 * they fire approximately every X seconds. 424 * 425 * By rounding these timers to whole seconds, all such timers will fire 426 * at the same time, rather than at various times spread out. The goal 427 * of this is to have the CPU wake up less, which saves power. 428 * 429 * The exact rounding is skewed for each processor to avoid all 430 * processors firing at the exact same time, which could lead 431 * to lock contention or spurious cache line bouncing. 432 * 433 * The return value is the rounded version of the @j parameter. 434 */ 435 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 436 { 437 unsigned long j0 = jiffies; 438 439 /* Use j0 because jiffies might change while we run */ 440 return round_jiffies_common(j + j0, cpu, false) - j0; 441 } 442 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 443 444 /** 445 * round_jiffies - function to round jiffies to a full second 446 * @j: the time in (absolute) jiffies that should be rounded 447 * 448 * round_jiffies() rounds an absolute time in the future (in jiffies) 449 * up or down to (approximately) full seconds. This is useful for timers 450 * for which the exact time they fire does not matter too much, as long as 451 * they fire approximately every X seconds. 452 * 453 * By rounding these timers to whole seconds, all such timers will fire 454 * at the same time, rather than at various times spread out. The goal 455 * of this is to have the CPU wake up less, which saves power. 456 * 457 * The return value is the rounded version of the @j parameter. 458 */ 459 unsigned long round_jiffies(unsigned long j) 460 { 461 return round_jiffies_common(j, raw_smp_processor_id(), false); 462 } 463 EXPORT_SYMBOL_GPL(round_jiffies); 464 465 /** 466 * round_jiffies_relative - function to round jiffies to a full second 467 * @j: the time in (relative) jiffies that should be rounded 468 * 469 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 470 * up or down to (approximately) full seconds. This is useful for timers 471 * for which the exact time they fire does not matter too much, as long as 472 * they fire approximately every X seconds. 473 * 474 * By rounding these timers to whole seconds, all such timers will fire 475 * at the same time, rather than at various times spread out. The goal 476 * of this is to have the CPU wake up less, which saves power. 477 * 478 * The return value is the rounded version of the @j parameter. 479 */ 480 unsigned long round_jiffies_relative(unsigned long j) 481 { 482 return __round_jiffies_relative(j, raw_smp_processor_id()); 483 } 484 EXPORT_SYMBOL_GPL(round_jiffies_relative); 485 486 /** 487 * __round_jiffies_up - function to round jiffies up to a full second 488 * @j: the time in (absolute) jiffies that should be rounded 489 * @cpu: the processor number on which the timeout will happen 490 * 491 * This is the same as __round_jiffies() except that it will never 492 * round down. This is useful for timeouts for which the exact time 493 * of firing does not matter too much, as long as they don't fire too 494 * early. 495 */ 496 unsigned long __round_jiffies_up(unsigned long j, int cpu) 497 { 498 return round_jiffies_common(j, cpu, true); 499 } 500 EXPORT_SYMBOL_GPL(__round_jiffies_up); 501 502 /** 503 * __round_jiffies_up_relative - function to round jiffies up to a full second 504 * @j: the time in (relative) jiffies that should be rounded 505 * @cpu: the processor number on which the timeout will happen 506 * 507 * This is the same as __round_jiffies_relative() except that it will never 508 * round down. This is useful for timeouts for which the exact time 509 * of firing does not matter too much, as long as they don't fire too 510 * early. 511 */ 512 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 513 { 514 unsigned long j0 = jiffies; 515 516 /* Use j0 because jiffies might change while we run */ 517 return round_jiffies_common(j + j0, cpu, true) - j0; 518 } 519 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 520 521 /** 522 * round_jiffies_up - function to round jiffies up to a full second 523 * @j: the time in (absolute) jiffies that should be rounded 524 * 525 * This is the same as round_jiffies() except that it will never 526 * round down. This is useful for timeouts for which the exact time 527 * of firing does not matter too much, as long as they don't fire too 528 * early. 529 */ 530 unsigned long round_jiffies_up(unsigned long j) 531 { 532 return round_jiffies_common(j, raw_smp_processor_id(), true); 533 } 534 EXPORT_SYMBOL_GPL(round_jiffies_up); 535 536 /** 537 * round_jiffies_up_relative - function to round jiffies up to a full second 538 * @j: the time in (relative) jiffies that should be rounded 539 * 540 * This is the same as round_jiffies_relative() except that it will never 541 * round down. This is useful for timeouts for which the exact time 542 * of firing does not matter too much, as long as they don't fire too 543 * early. 544 */ 545 unsigned long round_jiffies_up_relative(unsigned long j) 546 { 547 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 548 } 549 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 550 551 552 static inline unsigned int timer_get_idx(struct timer_list *timer) 553 { 554 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 555 } 556 557 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 558 { 559 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 560 idx << TIMER_ARRAYSHIFT; 561 } 562 563 /* 564 * Helper function to calculate the array index for a given expiry 565 * time. 566 */ 567 static inline unsigned calc_index(unsigned long expires, unsigned lvl, 568 unsigned long *bucket_expiry) 569 { 570 571 /* 572 * The timer wheel has to guarantee that a timer does not fire 573 * early. Early expiry can happen due to: 574 * - Timer is armed at the edge of a tick 575 * - Truncation of the expiry time in the outer wheel levels 576 * 577 * Round up with level granularity to prevent this. 578 */ 579 expires = (expires >> LVL_SHIFT(lvl)) + 1; 580 *bucket_expiry = expires << LVL_SHIFT(lvl); 581 return LVL_OFFS(lvl) + (expires & LVL_MASK); 582 } 583 584 static int calc_wheel_index(unsigned long expires, unsigned long clk, 585 unsigned long *bucket_expiry) 586 { 587 unsigned long delta = expires - clk; 588 unsigned int idx; 589 590 if (delta < LVL_START(1)) { 591 idx = calc_index(expires, 0, bucket_expiry); 592 } else if (delta < LVL_START(2)) { 593 idx = calc_index(expires, 1, bucket_expiry); 594 } else if (delta < LVL_START(3)) { 595 idx = calc_index(expires, 2, bucket_expiry); 596 } else if (delta < LVL_START(4)) { 597 idx = calc_index(expires, 3, bucket_expiry); 598 } else if (delta < LVL_START(5)) { 599 idx = calc_index(expires, 4, bucket_expiry); 600 } else if (delta < LVL_START(6)) { 601 idx = calc_index(expires, 5, bucket_expiry); 602 } else if (delta < LVL_START(7)) { 603 idx = calc_index(expires, 6, bucket_expiry); 604 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 605 idx = calc_index(expires, 7, bucket_expiry); 606 } else if ((long) delta < 0) { 607 idx = clk & LVL_MASK; 608 *bucket_expiry = clk; 609 } else { 610 /* 611 * Force expire obscene large timeouts to expire at the 612 * capacity limit of the wheel. 613 */ 614 if (delta >= WHEEL_TIMEOUT_CUTOFF) 615 expires = clk + WHEEL_TIMEOUT_MAX; 616 617 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); 618 } 619 return idx; 620 } 621 622 static void 623 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 624 { 625 /* 626 * Deferrable timers do not prevent the CPU from entering dynticks and 627 * are not taken into account on the idle/nohz_full path. An IPI when a 628 * new deferrable timer is enqueued will wake up the remote CPU but 629 * nothing will be done with the deferrable timer base. Therefore skip 630 * the remote IPI for deferrable timers completely. 631 */ 632 if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE) 633 return; 634 635 /* 636 * We might have to IPI the remote CPU if the base is idle and the 637 * timer is not deferrable. If the other CPU is on the way to idle 638 * then it can't set base->is_idle as we hold the base lock: 639 */ 640 if (base->is_idle) 641 wake_up_nohz_cpu(base->cpu); 642 } 643 644 /* 645 * Enqueue the timer into the hash bucket, mark it pending in 646 * the bitmap, store the index in the timer flags then wake up 647 * the target CPU if needed. 648 */ 649 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 650 unsigned int idx, unsigned long bucket_expiry) 651 { 652 653 hlist_add_head(&timer->entry, base->vectors + idx); 654 __set_bit(idx, base->pending_map); 655 timer_set_idx(timer, idx); 656 657 trace_timer_start(timer, bucket_expiry); 658 659 /* 660 * Check whether this is the new first expiring timer. The 661 * effective expiry time of the timer is required here 662 * (bucket_expiry) instead of timer->expires. 663 */ 664 if (time_before(bucket_expiry, base->next_expiry)) { 665 /* 666 * Set the next expiry time and kick the CPU so it 667 * can reevaluate the wheel: 668 */ 669 base->next_expiry = bucket_expiry; 670 base->timers_pending = true; 671 base->next_expiry_recalc = false; 672 trigger_dyntick_cpu(base, timer); 673 } 674 } 675 676 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) 677 { 678 unsigned long bucket_expiry; 679 unsigned int idx; 680 681 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); 682 enqueue_timer(base, timer, idx, bucket_expiry); 683 } 684 685 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 686 687 static const struct debug_obj_descr timer_debug_descr; 688 689 struct timer_hint { 690 void (*function)(struct timer_list *t); 691 long offset; 692 }; 693 694 #define TIMER_HINT(fn, container, timr, hintfn) \ 695 { \ 696 .function = fn, \ 697 .offset = offsetof(container, hintfn) - \ 698 offsetof(container, timr) \ 699 } 700 701 static const struct timer_hint timer_hints[] = { 702 TIMER_HINT(delayed_work_timer_fn, 703 struct delayed_work, timer, work.func), 704 TIMER_HINT(kthread_delayed_work_timer_fn, 705 struct kthread_delayed_work, timer, work.func), 706 }; 707 708 static void *timer_debug_hint(void *addr) 709 { 710 struct timer_list *timer = addr; 711 int i; 712 713 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) { 714 if (timer_hints[i].function == timer->function) { 715 void (**fn)(void) = addr + timer_hints[i].offset; 716 717 return *fn; 718 } 719 } 720 721 return timer->function; 722 } 723 724 static bool timer_is_static_object(void *addr) 725 { 726 struct timer_list *timer = addr; 727 728 return (timer->entry.pprev == NULL && 729 timer->entry.next == TIMER_ENTRY_STATIC); 730 } 731 732 /* 733 * fixup_init is called when: 734 * - an active object is initialized 735 */ 736 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 737 { 738 struct timer_list *timer = addr; 739 740 switch (state) { 741 case ODEBUG_STATE_ACTIVE: 742 del_timer_sync(timer); 743 debug_object_init(timer, &timer_debug_descr); 744 return true; 745 default: 746 return false; 747 } 748 } 749 750 /* Stub timer callback for improperly used timers. */ 751 static void stub_timer(struct timer_list *unused) 752 { 753 WARN_ON(1); 754 } 755 756 /* 757 * fixup_activate is called when: 758 * - an active object is activated 759 * - an unknown non-static object is activated 760 */ 761 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 762 { 763 struct timer_list *timer = addr; 764 765 switch (state) { 766 case ODEBUG_STATE_NOTAVAILABLE: 767 timer_setup(timer, stub_timer, 0); 768 return true; 769 770 case ODEBUG_STATE_ACTIVE: 771 WARN_ON(1); 772 fallthrough; 773 default: 774 return false; 775 } 776 } 777 778 /* 779 * fixup_free is called when: 780 * - an active object is freed 781 */ 782 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 783 { 784 struct timer_list *timer = addr; 785 786 switch (state) { 787 case ODEBUG_STATE_ACTIVE: 788 del_timer_sync(timer); 789 debug_object_free(timer, &timer_debug_descr); 790 return true; 791 default: 792 return false; 793 } 794 } 795 796 /* 797 * fixup_assert_init is called when: 798 * - an untracked/uninit-ed object is found 799 */ 800 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 801 { 802 struct timer_list *timer = addr; 803 804 switch (state) { 805 case ODEBUG_STATE_NOTAVAILABLE: 806 timer_setup(timer, stub_timer, 0); 807 return true; 808 default: 809 return false; 810 } 811 } 812 813 static const struct debug_obj_descr timer_debug_descr = { 814 .name = "timer_list", 815 .debug_hint = timer_debug_hint, 816 .is_static_object = timer_is_static_object, 817 .fixup_init = timer_fixup_init, 818 .fixup_activate = timer_fixup_activate, 819 .fixup_free = timer_fixup_free, 820 .fixup_assert_init = timer_fixup_assert_init, 821 }; 822 823 static inline void debug_timer_init(struct timer_list *timer) 824 { 825 debug_object_init(timer, &timer_debug_descr); 826 } 827 828 static inline void debug_timer_activate(struct timer_list *timer) 829 { 830 debug_object_activate(timer, &timer_debug_descr); 831 } 832 833 static inline void debug_timer_deactivate(struct timer_list *timer) 834 { 835 debug_object_deactivate(timer, &timer_debug_descr); 836 } 837 838 static inline void debug_timer_assert_init(struct timer_list *timer) 839 { 840 debug_object_assert_init(timer, &timer_debug_descr); 841 } 842 843 static void do_init_timer(struct timer_list *timer, 844 void (*func)(struct timer_list *), 845 unsigned int flags, 846 const char *name, struct lock_class_key *key); 847 848 void init_timer_on_stack_key(struct timer_list *timer, 849 void (*func)(struct timer_list *), 850 unsigned int flags, 851 const char *name, struct lock_class_key *key) 852 { 853 debug_object_init_on_stack(timer, &timer_debug_descr); 854 do_init_timer(timer, func, flags, name, key); 855 } 856 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 857 858 void destroy_timer_on_stack(struct timer_list *timer) 859 { 860 debug_object_free(timer, &timer_debug_descr); 861 } 862 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 863 864 #else 865 static inline void debug_timer_init(struct timer_list *timer) { } 866 static inline void debug_timer_activate(struct timer_list *timer) { } 867 static inline void debug_timer_deactivate(struct timer_list *timer) { } 868 static inline void debug_timer_assert_init(struct timer_list *timer) { } 869 #endif 870 871 static inline void debug_init(struct timer_list *timer) 872 { 873 debug_timer_init(timer); 874 trace_timer_init(timer); 875 } 876 877 static inline void debug_deactivate(struct timer_list *timer) 878 { 879 debug_timer_deactivate(timer); 880 trace_timer_cancel(timer); 881 } 882 883 static inline void debug_assert_init(struct timer_list *timer) 884 { 885 debug_timer_assert_init(timer); 886 } 887 888 static void do_init_timer(struct timer_list *timer, 889 void (*func)(struct timer_list *), 890 unsigned int flags, 891 const char *name, struct lock_class_key *key) 892 { 893 timer->entry.pprev = NULL; 894 timer->function = func; 895 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS)) 896 flags &= TIMER_INIT_FLAGS; 897 timer->flags = flags | raw_smp_processor_id(); 898 lockdep_init_map(&timer->lockdep_map, name, key, 0); 899 } 900 901 /** 902 * init_timer_key - initialize a timer 903 * @timer: the timer to be initialized 904 * @func: timer callback function 905 * @flags: timer flags 906 * @name: name of the timer 907 * @key: lockdep class key of the fake lock used for tracking timer 908 * sync lock dependencies 909 * 910 * init_timer_key() must be done to a timer prior calling *any* of the 911 * other timer functions. 912 */ 913 void init_timer_key(struct timer_list *timer, 914 void (*func)(struct timer_list *), unsigned int flags, 915 const char *name, struct lock_class_key *key) 916 { 917 debug_init(timer); 918 do_init_timer(timer, func, flags, name, key); 919 } 920 EXPORT_SYMBOL(init_timer_key); 921 922 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 923 { 924 struct hlist_node *entry = &timer->entry; 925 926 debug_deactivate(timer); 927 928 __hlist_del(entry); 929 if (clear_pending) 930 entry->pprev = NULL; 931 entry->next = LIST_POISON2; 932 } 933 934 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 935 bool clear_pending) 936 { 937 unsigned idx = timer_get_idx(timer); 938 939 if (!timer_pending(timer)) 940 return 0; 941 942 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { 943 __clear_bit(idx, base->pending_map); 944 base->next_expiry_recalc = true; 945 } 946 947 detach_timer(timer, clear_pending); 948 return 1; 949 } 950 951 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 952 { 953 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL; 954 struct timer_base *base; 955 956 base = per_cpu_ptr(&timer_bases[index], cpu); 957 958 /* 959 * If the timer is deferrable and NO_HZ_COMMON is set then we need 960 * to use the deferrable base. 961 */ 962 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 963 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 964 return base; 965 } 966 967 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 968 { 969 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL; 970 struct timer_base *base; 971 972 base = this_cpu_ptr(&timer_bases[index]); 973 974 /* 975 * If the timer is deferrable and NO_HZ_COMMON is set then we need 976 * to use the deferrable base. 977 */ 978 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 979 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 980 return base; 981 } 982 983 static inline struct timer_base *get_timer_base(u32 tflags) 984 { 985 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 986 } 987 988 static inline struct timer_base * 989 get_target_base(struct timer_base *base, unsigned tflags) 990 { 991 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 992 if (static_branch_likely(&timers_migration_enabled) && 993 !(tflags & TIMER_PINNED)) 994 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 995 #endif 996 return get_timer_this_cpu_base(tflags); 997 } 998 999 static inline void __forward_timer_base(struct timer_base *base, 1000 unsigned long basej) 1001 { 1002 /* 1003 * Check whether we can forward the base. We can only do that when 1004 * @basej is past base->clk otherwise we might rewind base->clk. 1005 */ 1006 if (time_before_eq(basej, base->clk)) 1007 return; 1008 1009 /* 1010 * If the next expiry value is > jiffies, then we fast forward to 1011 * jiffies otherwise we forward to the next expiry value. 1012 */ 1013 if (time_after(base->next_expiry, basej)) { 1014 base->clk = basej; 1015 } else { 1016 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) 1017 return; 1018 base->clk = base->next_expiry; 1019 } 1020 1021 } 1022 1023 static inline void forward_timer_base(struct timer_base *base) 1024 { 1025 __forward_timer_base(base, READ_ONCE(jiffies)); 1026 } 1027 1028 /* 1029 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 1030 * that all timers which are tied to this base are locked, and the base itself 1031 * is locked too. 1032 * 1033 * So __run_timers/migrate_timers can safely modify all timers which could 1034 * be found in the base->vectors array. 1035 * 1036 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 1037 * to wait until the migration is done. 1038 */ 1039 static struct timer_base *lock_timer_base(struct timer_list *timer, 1040 unsigned long *flags) 1041 __acquires(timer->base->lock) 1042 { 1043 for (;;) { 1044 struct timer_base *base; 1045 u32 tf; 1046 1047 /* 1048 * We need to use READ_ONCE() here, otherwise the compiler 1049 * might re-read @tf between the check for TIMER_MIGRATING 1050 * and spin_lock(). 1051 */ 1052 tf = READ_ONCE(timer->flags); 1053 1054 if (!(tf & TIMER_MIGRATING)) { 1055 base = get_timer_base(tf); 1056 raw_spin_lock_irqsave(&base->lock, *flags); 1057 if (timer->flags == tf) 1058 return base; 1059 raw_spin_unlock_irqrestore(&base->lock, *flags); 1060 } 1061 cpu_relax(); 1062 } 1063 } 1064 1065 #define MOD_TIMER_PENDING_ONLY 0x01 1066 #define MOD_TIMER_REDUCE 0x02 1067 #define MOD_TIMER_NOTPENDING 0x04 1068 1069 static inline int 1070 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 1071 { 1072 unsigned long clk = 0, flags, bucket_expiry; 1073 struct timer_base *base, *new_base; 1074 unsigned int idx = UINT_MAX; 1075 int ret = 0; 1076 1077 debug_assert_init(timer); 1078 1079 /* 1080 * This is a common optimization triggered by the networking code - if 1081 * the timer is re-modified to have the same timeout or ends up in the 1082 * same array bucket then just return: 1083 */ 1084 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { 1085 /* 1086 * The downside of this optimization is that it can result in 1087 * larger granularity than you would get from adding a new 1088 * timer with this expiry. 1089 */ 1090 long diff = timer->expires - expires; 1091 1092 if (!diff) 1093 return 1; 1094 if (options & MOD_TIMER_REDUCE && diff <= 0) 1095 return 1; 1096 1097 /* 1098 * We lock timer base and calculate the bucket index right 1099 * here. If the timer ends up in the same bucket, then we 1100 * just update the expiry time and avoid the whole 1101 * dequeue/enqueue dance. 1102 */ 1103 base = lock_timer_base(timer, &flags); 1104 /* 1105 * Has @timer been shutdown? This needs to be evaluated 1106 * while holding base lock to prevent a race against the 1107 * shutdown code. 1108 */ 1109 if (!timer->function) 1110 goto out_unlock; 1111 1112 forward_timer_base(base); 1113 1114 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 1115 time_before_eq(timer->expires, expires)) { 1116 ret = 1; 1117 goto out_unlock; 1118 } 1119 1120 clk = base->clk; 1121 idx = calc_wheel_index(expires, clk, &bucket_expiry); 1122 1123 /* 1124 * Retrieve and compare the array index of the pending 1125 * timer. If it matches set the expiry to the new value so a 1126 * subsequent call will exit in the expires check above. 1127 */ 1128 if (idx == timer_get_idx(timer)) { 1129 if (!(options & MOD_TIMER_REDUCE)) 1130 timer->expires = expires; 1131 else if (time_after(timer->expires, expires)) 1132 timer->expires = expires; 1133 ret = 1; 1134 goto out_unlock; 1135 } 1136 } else { 1137 base = lock_timer_base(timer, &flags); 1138 /* 1139 * Has @timer been shutdown? This needs to be evaluated 1140 * while holding base lock to prevent a race against the 1141 * shutdown code. 1142 */ 1143 if (!timer->function) 1144 goto out_unlock; 1145 1146 forward_timer_base(base); 1147 } 1148 1149 ret = detach_if_pending(timer, base, false); 1150 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1151 goto out_unlock; 1152 1153 new_base = get_target_base(base, timer->flags); 1154 1155 if (base != new_base) { 1156 /* 1157 * We are trying to schedule the timer on the new base. 1158 * However we can't change timer's base while it is running, 1159 * otherwise timer_delete_sync() can't detect that the timer's 1160 * handler yet has not finished. This also guarantees that the 1161 * timer is serialized wrt itself. 1162 */ 1163 if (likely(base->running_timer != timer)) { 1164 /* See the comment in lock_timer_base() */ 1165 timer->flags |= TIMER_MIGRATING; 1166 1167 raw_spin_unlock(&base->lock); 1168 base = new_base; 1169 raw_spin_lock(&base->lock); 1170 WRITE_ONCE(timer->flags, 1171 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1172 forward_timer_base(base); 1173 } 1174 } 1175 1176 debug_timer_activate(timer); 1177 1178 timer->expires = expires; 1179 /* 1180 * If 'idx' was calculated above and the base time did not advance 1181 * between calculating 'idx' and possibly switching the base, only 1182 * enqueue_timer() is required. Otherwise we need to (re)calculate 1183 * the wheel index via internal_add_timer(). 1184 */ 1185 if (idx != UINT_MAX && clk == base->clk) 1186 enqueue_timer(base, timer, idx, bucket_expiry); 1187 else 1188 internal_add_timer(base, timer); 1189 1190 out_unlock: 1191 raw_spin_unlock_irqrestore(&base->lock, flags); 1192 1193 return ret; 1194 } 1195 1196 /** 1197 * mod_timer_pending - Modify a pending timer's timeout 1198 * @timer: The pending timer to be modified 1199 * @expires: New absolute timeout in jiffies 1200 * 1201 * mod_timer_pending() is the same for pending timers as mod_timer(), but 1202 * will not activate inactive timers. 1203 * 1204 * If @timer->function == NULL then the start operation is silently 1205 * discarded. 1206 * 1207 * Return: 1208 * * %0 - The timer was inactive and not modified or was in 1209 * shutdown state and the operation was discarded 1210 * * %1 - The timer was active and requeued to expire at @expires 1211 */ 1212 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1213 { 1214 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1215 } 1216 EXPORT_SYMBOL(mod_timer_pending); 1217 1218 /** 1219 * mod_timer - Modify a timer's timeout 1220 * @timer: The timer to be modified 1221 * @expires: New absolute timeout in jiffies 1222 * 1223 * mod_timer(timer, expires) is equivalent to: 1224 * 1225 * del_timer(timer); timer->expires = expires; add_timer(timer); 1226 * 1227 * mod_timer() is more efficient than the above open coded sequence. In 1228 * case that the timer is inactive, the del_timer() part is a NOP. The 1229 * timer is in any case activated with the new expiry time @expires. 1230 * 1231 * Note that if there are multiple unserialized concurrent users of the 1232 * same timer, then mod_timer() is the only safe way to modify the timeout, 1233 * since add_timer() cannot modify an already running timer. 1234 * 1235 * If @timer->function == NULL then the start operation is silently 1236 * discarded. In this case the return value is 0 and meaningless. 1237 * 1238 * Return: 1239 * * %0 - The timer was inactive and started or was in shutdown 1240 * state and the operation was discarded 1241 * * %1 - The timer was active and requeued to expire at @expires or 1242 * the timer was active and not modified because @expires did 1243 * not change the effective expiry time 1244 */ 1245 int mod_timer(struct timer_list *timer, unsigned long expires) 1246 { 1247 return __mod_timer(timer, expires, 0); 1248 } 1249 EXPORT_SYMBOL(mod_timer); 1250 1251 /** 1252 * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1253 * @timer: The timer to be modified 1254 * @expires: New absolute timeout in jiffies 1255 * 1256 * timer_reduce() is very similar to mod_timer(), except that it will only 1257 * modify an enqueued timer if that would reduce the expiration time. If 1258 * @timer is not enqueued it starts the timer. 1259 * 1260 * If @timer->function == NULL then the start operation is silently 1261 * discarded. 1262 * 1263 * Return: 1264 * * %0 - The timer was inactive and started or was in shutdown 1265 * state and the operation was discarded 1266 * * %1 - The timer was active and requeued to expire at @expires or 1267 * the timer was active and not modified because @expires 1268 * did not change the effective expiry time such that the 1269 * timer would expire earlier than already scheduled 1270 */ 1271 int timer_reduce(struct timer_list *timer, unsigned long expires) 1272 { 1273 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1274 } 1275 EXPORT_SYMBOL(timer_reduce); 1276 1277 /** 1278 * add_timer - Start a timer 1279 * @timer: The timer to be started 1280 * 1281 * Start @timer to expire at @timer->expires in the future. @timer->expires 1282 * is the absolute expiry time measured in 'jiffies'. When the timer expires 1283 * timer->function(timer) will be invoked from soft interrupt context. 1284 * 1285 * The @timer->expires and @timer->function fields must be set prior 1286 * to calling this function. 1287 * 1288 * If @timer->function == NULL then the start operation is silently 1289 * discarded. 1290 * 1291 * If @timer->expires is already in the past @timer will be queued to 1292 * expire at the next timer tick. 1293 * 1294 * This can only operate on an inactive timer. Attempts to invoke this on 1295 * an active timer are rejected with a warning. 1296 */ 1297 void add_timer(struct timer_list *timer) 1298 { 1299 if (WARN_ON_ONCE(timer_pending(timer))) 1300 return; 1301 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1302 } 1303 EXPORT_SYMBOL(add_timer); 1304 1305 /** 1306 * add_timer_local() - Start a timer on the local CPU 1307 * @timer: The timer to be started 1308 * 1309 * Same as add_timer() except that the timer flag TIMER_PINNED is set. 1310 * 1311 * See add_timer() for further details. 1312 */ 1313 void add_timer_local(struct timer_list *timer) 1314 { 1315 if (WARN_ON_ONCE(timer_pending(timer))) 1316 return; 1317 timer->flags |= TIMER_PINNED; 1318 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1319 } 1320 EXPORT_SYMBOL(add_timer_local); 1321 1322 /** 1323 * add_timer_global() - Start a timer without TIMER_PINNED flag set 1324 * @timer: The timer to be started 1325 * 1326 * Same as add_timer() except that the timer flag TIMER_PINNED is unset. 1327 * 1328 * See add_timer() for further details. 1329 */ 1330 void add_timer_global(struct timer_list *timer) 1331 { 1332 if (WARN_ON_ONCE(timer_pending(timer))) 1333 return; 1334 timer->flags &= ~TIMER_PINNED; 1335 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1336 } 1337 EXPORT_SYMBOL(add_timer_global); 1338 1339 /** 1340 * add_timer_on - Start a timer on a particular CPU 1341 * @timer: The timer to be started 1342 * @cpu: The CPU to start it on 1343 * 1344 * Same as add_timer() except that it starts the timer on the given CPU and 1345 * the TIMER_PINNED flag is set. When timer shouldn't be a pinned timer in 1346 * the next round, add_timer_global() should be used instead as it unsets 1347 * the TIMER_PINNED flag. 1348 * 1349 * See add_timer() for further details. 1350 */ 1351 void add_timer_on(struct timer_list *timer, int cpu) 1352 { 1353 struct timer_base *new_base, *base; 1354 unsigned long flags; 1355 1356 debug_assert_init(timer); 1357 1358 if (WARN_ON_ONCE(timer_pending(timer))) 1359 return; 1360 1361 /* Make sure timer flags have TIMER_PINNED flag set */ 1362 timer->flags |= TIMER_PINNED; 1363 1364 new_base = get_timer_cpu_base(timer->flags, cpu); 1365 1366 /* 1367 * If @timer was on a different CPU, it should be migrated with the 1368 * old base locked to prevent other operations proceeding with the 1369 * wrong base locked. See lock_timer_base(). 1370 */ 1371 base = lock_timer_base(timer, &flags); 1372 /* 1373 * Has @timer been shutdown? This needs to be evaluated while 1374 * holding base lock to prevent a race against the shutdown code. 1375 */ 1376 if (!timer->function) 1377 goto out_unlock; 1378 1379 if (base != new_base) { 1380 timer->flags |= TIMER_MIGRATING; 1381 1382 raw_spin_unlock(&base->lock); 1383 base = new_base; 1384 raw_spin_lock(&base->lock); 1385 WRITE_ONCE(timer->flags, 1386 (timer->flags & ~TIMER_BASEMASK) | cpu); 1387 } 1388 forward_timer_base(base); 1389 1390 debug_timer_activate(timer); 1391 internal_add_timer(base, timer); 1392 out_unlock: 1393 raw_spin_unlock_irqrestore(&base->lock, flags); 1394 } 1395 EXPORT_SYMBOL_GPL(add_timer_on); 1396 1397 /** 1398 * __timer_delete - Internal function: Deactivate a timer 1399 * @timer: The timer to be deactivated 1400 * @shutdown: If true, this indicates that the timer is about to be 1401 * shutdown permanently. 1402 * 1403 * If @shutdown is true then @timer->function is set to NULL under the 1404 * timer base lock which prevents further rearming of the time. In that 1405 * case any attempt to rearm @timer after this function returns will be 1406 * silently ignored. 1407 * 1408 * Return: 1409 * * %0 - The timer was not pending 1410 * * %1 - The timer was pending and deactivated 1411 */ 1412 static int __timer_delete(struct timer_list *timer, bool shutdown) 1413 { 1414 struct timer_base *base; 1415 unsigned long flags; 1416 int ret = 0; 1417 1418 debug_assert_init(timer); 1419 1420 /* 1421 * If @shutdown is set then the lock has to be taken whether the 1422 * timer is pending or not to protect against a concurrent rearm 1423 * which might hit between the lockless pending check and the lock 1424 * aquisition. By taking the lock it is ensured that such a newly 1425 * enqueued timer is dequeued and cannot end up with 1426 * timer->function == NULL in the expiry code. 1427 * 1428 * If timer->function is currently executed, then this makes sure 1429 * that the callback cannot requeue the timer. 1430 */ 1431 if (timer_pending(timer) || shutdown) { 1432 base = lock_timer_base(timer, &flags); 1433 ret = detach_if_pending(timer, base, true); 1434 if (shutdown) 1435 timer->function = NULL; 1436 raw_spin_unlock_irqrestore(&base->lock, flags); 1437 } 1438 1439 return ret; 1440 } 1441 1442 /** 1443 * timer_delete - Deactivate a timer 1444 * @timer: The timer to be deactivated 1445 * 1446 * The function only deactivates a pending timer, but contrary to 1447 * timer_delete_sync() it does not take into account whether the timer's 1448 * callback function is concurrently executed on a different CPU or not. 1449 * It neither prevents rearming of the timer. If @timer can be rearmed 1450 * concurrently then the return value of this function is meaningless. 1451 * 1452 * Return: 1453 * * %0 - The timer was not pending 1454 * * %1 - The timer was pending and deactivated 1455 */ 1456 int timer_delete(struct timer_list *timer) 1457 { 1458 return __timer_delete(timer, false); 1459 } 1460 EXPORT_SYMBOL(timer_delete); 1461 1462 /** 1463 * timer_shutdown - Deactivate a timer and prevent rearming 1464 * @timer: The timer to be deactivated 1465 * 1466 * The function does not wait for an eventually running timer callback on a 1467 * different CPU but it prevents rearming of the timer. Any attempt to arm 1468 * @timer after this function returns will be silently ignored. 1469 * 1470 * This function is useful for teardown code and should only be used when 1471 * timer_shutdown_sync() cannot be invoked due to locking or context constraints. 1472 * 1473 * Return: 1474 * * %0 - The timer was not pending 1475 * * %1 - The timer was pending 1476 */ 1477 int timer_shutdown(struct timer_list *timer) 1478 { 1479 return __timer_delete(timer, true); 1480 } 1481 EXPORT_SYMBOL_GPL(timer_shutdown); 1482 1483 /** 1484 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer 1485 * @timer: Timer to deactivate 1486 * @shutdown: If true, this indicates that the timer is about to be 1487 * shutdown permanently. 1488 * 1489 * If @shutdown is true then @timer->function is set to NULL under the 1490 * timer base lock which prevents further rearming of the timer. Any 1491 * attempt to rearm @timer after this function returns will be silently 1492 * ignored. 1493 * 1494 * This function cannot guarantee that the timer cannot be rearmed 1495 * right after dropping the base lock if @shutdown is false. That 1496 * needs to be prevented by the calling code if necessary. 1497 * 1498 * Return: 1499 * * %0 - The timer was not pending 1500 * * %1 - The timer was pending and deactivated 1501 * * %-1 - The timer callback function is running on a different CPU 1502 */ 1503 static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown) 1504 { 1505 struct timer_base *base; 1506 unsigned long flags; 1507 int ret = -1; 1508 1509 debug_assert_init(timer); 1510 1511 base = lock_timer_base(timer, &flags); 1512 1513 if (base->running_timer != timer) 1514 ret = detach_if_pending(timer, base, true); 1515 if (shutdown) 1516 timer->function = NULL; 1517 1518 raw_spin_unlock_irqrestore(&base->lock, flags); 1519 1520 return ret; 1521 } 1522 1523 /** 1524 * try_to_del_timer_sync - Try to deactivate a timer 1525 * @timer: Timer to deactivate 1526 * 1527 * This function tries to deactivate a timer. On success the timer is not 1528 * queued and the timer callback function is not running on any CPU. 1529 * 1530 * This function does not guarantee that the timer cannot be rearmed right 1531 * after dropping the base lock. That needs to be prevented by the calling 1532 * code if necessary. 1533 * 1534 * Return: 1535 * * %0 - The timer was not pending 1536 * * %1 - The timer was pending and deactivated 1537 * * %-1 - The timer callback function is running on a different CPU 1538 */ 1539 int try_to_del_timer_sync(struct timer_list *timer) 1540 { 1541 return __try_to_del_timer_sync(timer, false); 1542 } 1543 EXPORT_SYMBOL(try_to_del_timer_sync); 1544 1545 #ifdef CONFIG_PREEMPT_RT 1546 static __init void timer_base_init_expiry_lock(struct timer_base *base) 1547 { 1548 spin_lock_init(&base->expiry_lock); 1549 } 1550 1551 static inline void timer_base_lock_expiry(struct timer_base *base) 1552 { 1553 spin_lock(&base->expiry_lock); 1554 } 1555 1556 static inline void timer_base_unlock_expiry(struct timer_base *base) 1557 { 1558 spin_unlock(&base->expiry_lock); 1559 } 1560 1561 /* 1562 * The counterpart to del_timer_wait_running(). 1563 * 1564 * If there is a waiter for base->expiry_lock, then it was waiting for the 1565 * timer callback to finish. Drop expiry_lock and reacquire it. That allows 1566 * the waiter to acquire the lock and make progress. 1567 */ 1568 static void timer_sync_wait_running(struct timer_base *base) 1569 { 1570 if (atomic_read(&base->timer_waiters)) { 1571 raw_spin_unlock_irq(&base->lock); 1572 spin_unlock(&base->expiry_lock); 1573 spin_lock(&base->expiry_lock); 1574 raw_spin_lock_irq(&base->lock); 1575 } 1576 } 1577 1578 /* 1579 * This function is called on PREEMPT_RT kernels when the fast path 1580 * deletion of a timer failed because the timer callback function was 1581 * running. 1582 * 1583 * This prevents priority inversion, if the softirq thread on a remote CPU 1584 * got preempted, and it prevents a life lock when the task which tries to 1585 * delete a timer preempted the softirq thread running the timer callback 1586 * function. 1587 */ 1588 static void del_timer_wait_running(struct timer_list *timer) 1589 { 1590 u32 tf; 1591 1592 tf = READ_ONCE(timer->flags); 1593 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) { 1594 struct timer_base *base = get_timer_base(tf); 1595 1596 /* 1597 * Mark the base as contended and grab the expiry lock, 1598 * which is held by the softirq across the timer 1599 * callback. Drop the lock immediately so the softirq can 1600 * expire the next timer. In theory the timer could already 1601 * be running again, but that's more than unlikely and just 1602 * causes another wait loop. 1603 */ 1604 atomic_inc(&base->timer_waiters); 1605 spin_lock_bh(&base->expiry_lock); 1606 atomic_dec(&base->timer_waiters); 1607 spin_unlock_bh(&base->expiry_lock); 1608 } 1609 } 1610 #else 1611 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } 1612 static inline void timer_base_lock_expiry(struct timer_base *base) { } 1613 static inline void timer_base_unlock_expiry(struct timer_base *base) { } 1614 static inline void timer_sync_wait_running(struct timer_base *base) { } 1615 static inline void del_timer_wait_running(struct timer_list *timer) { } 1616 #endif 1617 1618 /** 1619 * __timer_delete_sync - Internal function: Deactivate a timer and wait 1620 * for the handler to finish. 1621 * @timer: The timer to be deactivated 1622 * @shutdown: If true, @timer->function will be set to NULL under the 1623 * timer base lock which prevents rearming of @timer 1624 * 1625 * If @shutdown is not set the timer can be rearmed later. If the timer can 1626 * be rearmed concurrently, i.e. after dropping the base lock then the 1627 * return value is meaningless. 1628 * 1629 * If @shutdown is set then @timer->function is set to NULL under timer 1630 * base lock which prevents rearming of the timer. Any attempt to rearm 1631 * a shutdown timer is silently ignored. 1632 * 1633 * If the timer should be reused after shutdown it has to be initialized 1634 * again. 1635 * 1636 * Return: 1637 * * %0 - The timer was not pending 1638 * * %1 - The timer was pending and deactivated 1639 */ 1640 static int __timer_delete_sync(struct timer_list *timer, bool shutdown) 1641 { 1642 int ret; 1643 1644 #ifdef CONFIG_LOCKDEP 1645 unsigned long flags; 1646 1647 /* 1648 * If lockdep gives a backtrace here, please reference 1649 * the synchronization rules above. 1650 */ 1651 local_irq_save(flags); 1652 lock_map_acquire(&timer->lockdep_map); 1653 lock_map_release(&timer->lockdep_map); 1654 local_irq_restore(flags); 1655 #endif 1656 /* 1657 * don't use it in hardirq context, because it 1658 * could lead to deadlock. 1659 */ 1660 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE)); 1661 1662 /* 1663 * Must be able to sleep on PREEMPT_RT because of the slowpath in 1664 * del_timer_wait_running(). 1665 */ 1666 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) 1667 lockdep_assert_preemption_enabled(); 1668 1669 do { 1670 ret = __try_to_del_timer_sync(timer, shutdown); 1671 1672 if (unlikely(ret < 0)) { 1673 del_timer_wait_running(timer); 1674 cpu_relax(); 1675 } 1676 } while (ret < 0); 1677 1678 return ret; 1679 } 1680 1681 /** 1682 * timer_delete_sync - Deactivate a timer and wait for the handler to finish. 1683 * @timer: The timer to be deactivated 1684 * 1685 * Synchronization rules: Callers must prevent restarting of the timer, 1686 * otherwise this function is meaningless. It must not be called from 1687 * interrupt contexts unless the timer is an irqsafe one. The caller must 1688 * not hold locks which would prevent completion of the timer's callback 1689 * function. The timer's handler must not call add_timer_on(). Upon exit 1690 * the timer is not queued and the handler is not running on any CPU. 1691 * 1692 * For !irqsafe timers, the caller must not hold locks that are held in 1693 * interrupt context. Even if the lock has nothing to do with the timer in 1694 * question. Here's why:: 1695 * 1696 * CPU0 CPU1 1697 * ---- ---- 1698 * <SOFTIRQ> 1699 * call_timer_fn(); 1700 * base->running_timer = mytimer; 1701 * spin_lock_irq(somelock); 1702 * <IRQ> 1703 * spin_lock(somelock); 1704 * timer_delete_sync(mytimer); 1705 * while (base->running_timer == mytimer); 1706 * 1707 * Now timer_delete_sync() will never return and never release somelock. 1708 * The interrupt on the other CPU is waiting to grab somelock but it has 1709 * interrupted the softirq that CPU0 is waiting to finish. 1710 * 1711 * This function cannot guarantee that the timer is not rearmed again by 1712 * some concurrent or preempting code, right after it dropped the base 1713 * lock. If there is the possibility of a concurrent rearm then the return 1714 * value of the function is meaningless. 1715 * 1716 * If such a guarantee is needed, e.g. for teardown situations then use 1717 * timer_shutdown_sync() instead. 1718 * 1719 * Return: 1720 * * %0 - The timer was not pending 1721 * * %1 - The timer was pending and deactivated 1722 */ 1723 int timer_delete_sync(struct timer_list *timer) 1724 { 1725 return __timer_delete_sync(timer, false); 1726 } 1727 EXPORT_SYMBOL(timer_delete_sync); 1728 1729 /** 1730 * timer_shutdown_sync - Shutdown a timer and prevent rearming 1731 * @timer: The timer to be shutdown 1732 * 1733 * When the function returns it is guaranteed that: 1734 * - @timer is not queued 1735 * - The callback function of @timer is not running 1736 * - @timer cannot be enqueued again. Any attempt to rearm 1737 * @timer is silently ignored. 1738 * 1739 * See timer_delete_sync() for synchronization rules. 1740 * 1741 * This function is useful for final teardown of an infrastructure where 1742 * the timer is subject to a circular dependency problem. 1743 * 1744 * A common pattern for this is a timer and a workqueue where the timer can 1745 * schedule work and work can arm the timer. On shutdown the workqueue must 1746 * be destroyed and the timer must be prevented from rearming. Unless the 1747 * code has conditionals like 'if (mything->in_shutdown)' to prevent that 1748 * there is no way to get this correct with timer_delete_sync(). 1749 * 1750 * timer_shutdown_sync() is solving the problem. The correct ordering of 1751 * calls in this case is: 1752 * 1753 * timer_shutdown_sync(&mything->timer); 1754 * workqueue_destroy(&mything->workqueue); 1755 * 1756 * After this 'mything' can be safely freed. 1757 * 1758 * This obviously implies that the timer is not required to be functional 1759 * for the rest of the shutdown operation. 1760 * 1761 * Return: 1762 * * %0 - The timer was not pending 1763 * * %1 - The timer was pending 1764 */ 1765 int timer_shutdown_sync(struct timer_list *timer) 1766 { 1767 return __timer_delete_sync(timer, true); 1768 } 1769 EXPORT_SYMBOL_GPL(timer_shutdown_sync); 1770 1771 static void call_timer_fn(struct timer_list *timer, 1772 void (*fn)(struct timer_list *), 1773 unsigned long baseclk) 1774 { 1775 int count = preempt_count(); 1776 1777 #ifdef CONFIG_LOCKDEP 1778 /* 1779 * It is permissible to free the timer from inside the 1780 * function that is called from it, this we need to take into 1781 * account for lockdep too. To avoid bogus "held lock freed" 1782 * warnings as well as problems when looking into 1783 * timer->lockdep_map, make a copy and use that here. 1784 */ 1785 struct lockdep_map lockdep_map; 1786 1787 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1788 #endif 1789 /* 1790 * Couple the lock chain with the lock chain at 1791 * timer_delete_sync() by acquiring the lock_map around the fn() 1792 * call here and in timer_delete_sync(). 1793 */ 1794 lock_map_acquire(&lockdep_map); 1795 1796 trace_timer_expire_entry(timer, baseclk); 1797 fn(timer); 1798 trace_timer_expire_exit(timer); 1799 1800 lock_map_release(&lockdep_map); 1801 1802 if (count != preempt_count()) { 1803 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1804 fn, count, preempt_count()); 1805 /* 1806 * Restore the preempt count. That gives us a decent 1807 * chance to survive and extract information. If the 1808 * callback kept a lock held, bad luck, but not worse 1809 * than the BUG() we had. 1810 */ 1811 preempt_count_set(count); 1812 } 1813 } 1814 1815 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1816 { 1817 /* 1818 * This value is required only for tracing. base->clk was 1819 * incremented directly before expire_timers was called. But expiry 1820 * is related to the old base->clk value. 1821 */ 1822 unsigned long baseclk = base->clk - 1; 1823 1824 while (!hlist_empty(head)) { 1825 struct timer_list *timer; 1826 void (*fn)(struct timer_list *); 1827 1828 timer = hlist_entry(head->first, struct timer_list, entry); 1829 1830 base->running_timer = timer; 1831 detach_timer(timer, true); 1832 1833 fn = timer->function; 1834 1835 if (WARN_ON_ONCE(!fn)) { 1836 /* Should never happen. Emphasis on should! */ 1837 base->running_timer = NULL; 1838 continue; 1839 } 1840 1841 if (timer->flags & TIMER_IRQSAFE) { 1842 raw_spin_unlock(&base->lock); 1843 call_timer_fn(timer, fn, baseclk); 1844 raw_spin_lock(&base->lock); 1845 base->running_timer = NULL; 1846 } else { 1847 raw_spin_unlock_irq(&base->lock); 1848 call_timer_fn(timer, fn, baseclk); 1849 raw_spin_lock_irq(&base->lock); 1850 base->running_timer = NULL; 1851 timer_sync_wait_running(base); 1852 } 1853 } 1854 } 1855 1856 static int collect_expired_timers(struct timer_base *base, 1857 struct hlist_head *heads) 1858 { 1859 unsigned long clk = base->clk = base->next_expiry; 1860 struct hlist_head *vec; 1861 int i, levels = 0; 1862 unsigned int idx; 1863 1864 for (i = 0; i < LVL_DEPTH; i++) { 1865 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1866 1867 if (__test_and_clear_bit(idx, base->pending_map)) { 1868 vec = base->vectors + idx; 1869 hlist_move_list(vec, heads++); 1870 levels++; 1871 } 1872 /* Is it time to look at the next level? */ 1873 if (clk & LVL_CLK_MASK) 1874 break; 1875 /* Shift clock for the next level granularity */ 1876 clk >>= LVL_CLK_SHIFT; 1877 } 1878 return levels; 1879 } 1880 1881 /* 1882 * Find the next pending bucket of a level. Search from level start (@offset) 1883 * + @clk upwards and if nothing there, search from start of the level 1884 * (@offset) up to @offset + clk. 1885 */ 1886 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1887 unsigned clk) 1888 { 1889 unsigned pos, start = offset + clk; 1890 unsigned end = offset + LVL_SIZE; 1891 1892 pos = find_next_bit(base->pending_map, end, start); 1893 if (pos < end) 1894 return pos - start; 1895 1896 pos = find_next_bit(base->pending_map, start, offset); 1897 return pos < start ? pos + LVL_SIZE - start : -1; 1898 } 1899 1900 /* 1901 * Search the first expiring timer in the various clock levels. Caller must 1902 * hold base->lock. 1903 * 1904 * Store next expiry time in base->next_expiry. 1905 */ 1906 static void next_expiry_recalc(struct timer_base *base) 1907 { 1908 unsigned long clk, next, adj; 1909 unsigned lvl, offset = 0; 1910 1911 next = base->clk + NEXT_TIMER_MAX_DELTA; 1912 clk = base->clk; 1913 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1914 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1915 unsigned long lvl_clk = clk & LVL_CLK_MASK; 1916 1917 if (pos >= 0) { 1918 unsigned long tmp = clk + (unsigned long) pos; 1919 1920 tmp <<= LVL_SHIFT(lvl); 1921 if (time_before(tmp, next)) 1922 next = tmp; 1923 1924 /* 1925 * If the next expiration happens before we reach 1926 * the next level, no need to check further. 1927 */ 1928 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) 1929 break; 1930 } 1931 /* 1932 * Clock for the next level. If the current level clock lower 1933 * bits are zero, we look at the next level as is. If not we 1934 * need to advance it by one because that's going to be the 1935 * next expiring bucket in that level. base->clk is the next 1936 * expiring jiffie. So in case of: 1937 * 1938 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1939 * 0 0 0 0 0 0 1940 * 1941 * we have to look at all levels @index 0. With 1942 * 1943 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1944 * 0 0 0 0 0 2 1945 * 1946 * LVL0 has the next expiring bucket @index 2. The upper 1947 * levels have the next expiring bucket @index 1. 1948 * 1949 * In case that the propagation wraps the next level the same 1950 * rules apply: 1951 * 1952 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1953 * 0 0 0 0 F 2 1954 * 1955 * So after looking at LVL0 we get: 1956 * 1957 * LVL5 LVL4 LVL3 LVL2 LVL1 1958 * 0 0 0 1 0 1959 * 1960 * So no propagation from LVL1 to LVL2 because that happened 1961 * with the add already, but then we need to propagate further 1962 * from LVL2 to LVL3. 1963 * 1964 * So the simple check whether the lower bits of the current 1965 * level are 0 or not is sufficient for all cases. 1966 */ 1967 adj = lvl_clk ? 1 : 0; 1968 clk >>= LVL_CLK_SHIFT; 1969 clk += adj; 1970 } 1971 1972 base->next_expiry = next; 1973 base->next_expiry_recalc = false; 1974 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); 1975 } 1976 1977 #ifdef CONFIG_NO_HZ_COMMON 1978 /* 1979 * Check, if the next hrtimer event is before the next timer wheel 1980 * event: 1981 */ 1982 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1983 { 1984 u64 nextevt = hrtimer_get_next_event(); 1985 1986 /* 1987 * If high resolution timers are enabled 1988 * hrtimer_get_next_event() returns KTIME_MAX. 1989 */ 1990 if (expires <= nextevt) 1991 return expires; 1992 1993 /* 1994 * If the next timer is already expired, return the tick base 1995 * time so the tick is fired immediately. 1996 */ 1997 if (nextevt <= basem) 1998 return basem; 1999 2000 /* 2001 * Round up to the next jiffie. High resolution timers are 2002 * off, so the hrtimers are expired in the tick and we need to 2003 * make sure that this tick really expires the timer to avoid 2004 * a ping pong of the nohz stop code. 2005 * 2006 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 2007 */ 2008 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 2009 } 2010 2011 static unsigned long next_timer_interrupt(struct timer_base *base, 2012 unsigned long basej) 2013 { 2014 if (base->next_expiry_recalc) 2015 next_expiry_recalc(base); 2016 2017 /* 2018 * Move next_expiry for the empty base into the future to prevent an 2019 * unnecessary raise of the timer softirq when the next_expiry value 2020 * will be reached even if there is no timer pending. 2021 * 2022 * This update is also required to make timer_base::next_expiry values 2023 * easy comparable to find out which base holds the first pending timer. 2024 */ 2025 if (!base->timers_pending) 2026 base->next_expiry = basej + NEXT_TIMER_MAX_DELTA; 2027 2028 return base->next_expiry; 2029 } 2030 2031 static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem, 2032 struct timer_base *base_local, 2033 struct timer_base *base_global, 2034 struct timer_events *tevt) 2035 { 2036 unsigned long nextevt, nextevt_local, nextevt_global; 2037 bool local_first; 2038 2039 nextevt_local = next_timer_interrupt(base_local, basej); 2040 nextevt_global = next_timer_interrupt(base_global, basej); 2041 2042 local_first = time_before_eq(nextevt_local, nextevt_global); 2043 2044 nextevt = local_first ? nextevt_local : nextevt_global; 2045 2046 /* 2047 * If the @nextevt is at max. one tick away, use @nextevt and store 2048 * it in the local expiry value. The next global event is irrelevant in 2049 * this case and can be left as KTIME_MAX. 2050 */ 2051 if (time_before_eq(nextevt, basej + 1)) { 2052 /* If we missed a tick already, force 0 delta */ 2053 if (time_before(nextevt, basej)) 2054 nextevt = basej; 2055 tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC; 2056 2057 /* 2058 * This is required for the remote check only but it doesn't 2059 * hurt, when it is done for both call sites: 2060 * 2061 * * The remote callers will only take care of the global timers 2062 * as local timers will be handled by CPU itself. When not 2063 * updating tevt->global with the already missed first global 2064 * timer, it is possible that it will be missed completely. 2065 * 2066 * * The local callers will ignore the tevt->global anyway, when 2067 * nextevt is max. one tick away. 2068 */ 2069 if (!local_first) 2070 tevt->global = tevt->local; 2071 return nextevt; 2072 } 2073 2074 /* 2075 * Update tevt.* values: 2076 * 2077 * If the local queue expires first, then the global event can be 2078 * ignored. If the global queue is empty, nothing to do either. 2079 */ 2080 if (!local_first && base_global->timers_pending) 2081 tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; 2082 2083 if (base_local->timers_pending) 2084 tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; 2085 2086 return nextevt; 2087 } 2088 2089 # ifdef CONFIG_SMP 2090 /** 2091 * fetch_next_timer_interrupt_remote() - Store next timers into @tevt 2092 * @basej: base time jiffies 2093 * @basem: base time clock monotonic 2094 * @tevt: Pointer to the storage for the expiry values 2095 * @cpu: Remote CPU 2096 * 2097 * Stores the next pending local and global timer expiry values in the 2098 * struct pointed to by @tevt. If a queue is empty the corresponding 2099 * field is set to KTIME_MAX. If local event expires before global 2100 * event, global event is set to KTIME_MAX as well. 2101 * 2102 * Caller needs to make sure timer base locks are held (use 2103 * timer_lock_remote_bases() for this purpose). 2104 */ 2105 void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem, 2106 struct timer_events *tevt, 2107 unsigned int cpu) 2108 { 2109 struct timer_base *base_local, *base_global; 2110 2111 /* Preset local / global events */ 2112 tevt->local = tevt->global = KTIME_MAX; 2113 2114 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); 2115 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); 2116 2117 lockdep_assert_held(&base_local->lock); 2118 lockdep_assert_held(&base_global->lock); 2119 2120 fetch_next_timer_interrupt(basej, basem, base_local, base_global, tevt); 2121 } 2122 2123 /** 2124 * timer_unlock_remote_bases - unlock timer bases of cpu 2125 * @cpu: Remote CPU 2126 * 2127 * Unlocks the remote timer bases. 2128 */ 2129 void timer_unlock_remote_bases(unsigned int cpu) 2130 __releases(timer_bases[BASE_LOCAL]->lock) 2131 __releases(timer_bases[BASE_GLOBAL]->lock) 2132 { 2133 struct timer_base *base_local, *base_global; 2134 2135 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); 2136 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); 2137 2138 raw_spin_unlock(&base_global->lock); 2139 raw_spin_unlock(&base_local->lock); 2140 } 2141 2142 /** 2143 * timer_lock_remote_bases - lock timer bases of cpu 2144 * @cpu: Remote CPU 2145 * 2146 * Locks the remote timer bases. 2147 */ 2148 void timer_lock_remote_bases(unsigned int cpu) 2149 __acquires(timer_bases[BASE_LOCAL]->lock) 2150 __acquires(timer_bases[BASE_GLOBAL]->lock) 2151 { 2152 struct timer_base *base_local, *base_global; 2153 2154 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); 2155 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); 2156 2157 lockdep_assert_irqs_disabled(); 2158 2159 raw_spin_lock(&base_local->lock); 2160 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); 2161 } 2162 # endif /* CONFIG_SMP */ 2163 2164 static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, 2165 bool *idle) 2166 { 2167 struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX }; 2168 struct timer_base *base_local, *base_global; 2169 unsigned long nextevt; 2170 u64 expires; 2171 2172 /* 2173 * Pretend that there is no timer pending if the cpu is offline. 2174 * Possible pending timers will be migrated later to an active cpu. 2175 */ 2176 if (cpu_is_offline(smp_processor_id())) { 2177 if (idle) 2178 *idle = true; 2179 return tevt.local; 2180 } 2181 2182 base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]); 2183 base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]); 2184 2185 raw_spin_lock(&base_local->lock); 2186 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); 2187 2188 nextevt = fetch_next_timer_interrupt(basej, basem, base_local, 2189 base_global, &tevt); 2190 2191 /* 2192 * We have a fresh next event. Check whether we can forward the 2193 * base. 2194 */ 2195 __forward_timer_base(base_local, basej); 2196 __forward_timer_base(base_global, basej); 2197 2198 /* 2199 * Set base->is_idle only when caller is timer_base_try_to_set_idle() 2200 */ 2201 if (idle) { 2202 /* 2203 * Bases are idle if the next event is more than a tick away. 2204 * 2205 * If the base is marked idle then any timer add operation must 2206 * forward the base clk itself to keep granularity small. This 2207 * idle logic is only maintained for the BASE_LOCAL and 2208 * BASE_GLOBAL base, deferrable timers may still see large 2209 * granularity skew (by design). 2210 */ 2211 if (!base_local->is_idle && time_after(nextevt, basej + 1)) { 2212 base_local->is_idle = base_global->is_idle = true; 2213 trace_timer_base_idle(true, base_local->cpu); 2214 } 2215 *idle = base_local->is_idle; 2216 } 2217 2218 raw_spin_unlock(&base_global->lock); 2219 raw_spin_unlock(&base_local->lock); 2220 2221 expires = min_t(u64, tevt.local, tevt.global); 2222 2223 return cmp_next_hrtimer_event(basem, expires); 2224 } 2225 2226 /** 2227 * get_next_timer_interrupt() - return the time (clock mono) of the next timer 2228 * @basej: base time jiffies 2229 * @basem: base time clock monotonic 2230 * 2231 * Returns the tick aligned clock monotonic time of the next pending 2232 * timer or KTIME_MAX if no timer is pending. 2233 */ 2234 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 2235 { 2236 return __get_next_timer_interrupt(basej, basem, NULL); 2237 } 2238 2239 /** 2240 * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases 2241 * @basej: base time jiffies 2242 * @basem: base time clock monotonic 2243 * @idle: pointer to store the value of timer_base->is_idle on return; 2244 * *idle contains the information whether tick was already stopped 2245 * 2246 * Returns the tick aligned clock monotonic time of the next pending timer or 2247 * KTIME_MAX if no timer is pending. When tick was already stopped KTIME_MAX is 2248 * returned as well. 2249 */ 2250 u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle) 2251 { 2252 if (*idle) 2253 return KTIME_MAX; 2254 2255 return __get_next_timer_interrupt(basej, basem, idle); 2256 } 2257 2258 /** 2259 * timer_clear_idle - Clear the idle state of the timer base 2260 * 2261 * Called with interrupts disabled 2262 */ 2263 void timer_clear_idle(void) 2264 { 2265 /* 2266 * We do this unlocked. The worst outcome is a remote enqueue sending 2267 * a pointless IPI, but taking the lock would just make the window for 2268 * sending the IPI a few instructions smaller for the cost of taking 2269 * the lock in the exit from idle path. 2270 */ 2271 __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false); 2272 __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false); 2273 trace_timer_base_idle(false, smp_processor_id()); 2274 } 2275 #endif 2276 2277 /** 2278 * __run_timers - run all expired timers (if any) on this CPU. 2279 * @base: the timer vector to be processed. 2280 */ 2281 static inline void __run_timers(struct timer_base *base) 2282 { 2283 struct hlist_head heads[LVL_DEPTH]; 2284 int levels; 2285 2286 lockdep_assert_held(&base->lock); 2287 2288 if (base->running_timer) 2289 return; 2290 2291 while (time_after_eq(jiffies, base->clk) && 2292 time_after_eq(jiffies, base->next_expiry)) { 2293 levels = collect_expired_timers(base, heads); 2294 /* 2295 * The two possible reasons for not finding any expired 2296 * timer at this clk are that all matching timers have been 2297 * dequeued or no timer has been queued since 2298 * base::next_expiry was set to base::clk + 2299 * NEXT_TIMER_MAX_DELTA. 2300 */ 2301 WARN_ON_ONCE(!levels && !base->next_expiry_recalc 2302 && base->timers_pending); 2303 /* 2304 * While executing timers, base->clk is set 1 offset ahead of 2305 * jiffies to avoid endless requeuing to current jiffies. 2306 */ 2307 base->clk++; 2308 next_expiry_recalc(base); 2309 2310 while (levels--) 2311 expire_timers(base, heads + levels); 2312 } 2313 } 2314 2315 static void __run_timer_base(struct timer_base *base) 2316 { 2317 if (time_before(jiffies, base->next_expiry)) 2318 return; 2319 2320 timer_base_lock_expiry(base); 2321 raw_spin_lock_irq(&base->lock); 2322 __run_timers(base); 2323 raw_spin_unlock_irq(&base->lock); 2324 timer_base_unlock_expiry(base); 2325 } 2326 2327 static void run_timer_base(int index) 2328 { 2329 struct timer_base *base = this_cpu_ptr(&timer_bases[index]); 2330 2331 __run_timer_base(base); 2332 } 2333 2334 /* 2335 * This function runs timers and the timer-tq in bottom half context. 2336 */ 2337 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 2338 { 2339 run_timer_base(BASE_LOCAL); 2340 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) { 2341 run_timer_base(BASE_GLOBAL); 2342 run_timer_base(BASE_DEF); 2343 } 2344 } 2345 2346 /* 2347 * Called by the local, per-CPU timer interrupt on SMP. 2348 */ 2349 static void run_local_timers(void) 2350 { 2351 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]); 2352 2353 hrtimer_run_queues(); 2354 2355 for (int i = 0; i < NR_BASES; i++, base++) { 2356 /* Raise the softirq only if required. */ 2357 if (time_after_eq(jiffies, base->next_expiry)) { 2358 raise_softirq(TIMER_SOFTIRQ); 2359 return; 2360 } 2361 } 2362 } 2363 2364 /* 2365 * Called from the timer interrupt handler to charge one tick to the current 2366 * process. user_tick is 1 if the tick is user time, 0 for system. 2367 */ 2368 void update_process_times(int user_tick) 2369 { 2370 struct task_struct *p = current; 2371 2372 /* Note: this timer irq context must be accounted for as well. */ 2373 account_process_tick(p, user_tick); 2374 run_local_timers(); 2375 rcu_sched_clock_irq(user_tick); 2376 #ifdef CONFIG_IRQ_WORK 2377 if (in_irq()) 2378 irq_work_tick(); 2379 #endif 2380 scheduler_tick(); 2381 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 2382 run_posix_cpu_timers(); 2383 } 2384 2385 /* 2386 * Since schedule_timeout()'s timer is defined on the stack, it must store 2387 * the target task on the stack as well. 2388 */ 2389 struct process_timer { 2390 struct timer_list timer; 2391 struct task_struct *task; 2392 }; 2393 2394 static void process_timeout(struct timer_list *t) 2395 { 2396 struct process_timer *timeout = from_timer(timeout, t, timer); 2397 2398 wake_up_process(timeout->task); 2399 } 2400 2401 /** 2402 * schedule_timeout - sleep until timeout 2403 * @timeout: timeout value in jiffies 2404 * 2405 * Make the current task sleep until @timeout jiffies have elapsed. 2406 * The function behavior depends on the current task state 2407 * (see also set_current_state() description): 2408 * 2409 * %TASK_RUNNING - the scheduler is called, but the task does not sleep 2410 * at all. That happens because sched_submit_work() does nothing for 2411 * tasks in %TASK_RUNNING state. 2412 * 2413 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 2414 * pass before the routine returns unless the current task is explicitly 2415 * woken up, (e.g. by wake_up_process()). 2416 * 2417 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 2418 * delivered to the current task or the current task is explicitly woken 2419 * up. 2420 * 2421 * The current task state is guaranteed to be %TASK_RUNNING when this 2422 * routine returns. 2423 * 2424 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 2425 * the CPU away without a bound on the timeout. In this case the return 2426 * value will be %MAX_SCHEDULE_TIMEOUT. 2427 * 2428 * Returns 0 when the timer has expired otherwise the remaining time in 2429 * jiffies will be returned. In all cases the return value is guaranteed 2430 * to be non-negative. 2431 */ 2432 signed long __sched schedule_timeout(signed long timeout) 2433 { 2434 struct process_timer timer; 2435 unsigned long expire; 2436 2437 switch (timeout) 2438 { 2439 case MAX_SCHEDULE_TIMEOUT: 2440 /* 2441 * These two special cases are useful to be comfortable 2442 * in the caller. Nothing more. We could take 2443 * MAX_SCHEDULE_TIMEOUT from one of the negative value 2444 * but I' d like to return a valid offset (>=0) to allow 2445 * the caller to do everything it want with the retval. 2446 */ 2447 schedule(); 2448 goto out; 2449 default: 2450 /* 2451 * Another bit of PARANOID. Note that the retval will be 2452 * 0 since no piece of kernel is supposed to do a check 2453 * for a negative retval of schedule_timeout() (since it 2454 * should never happens anyway). You just have the printk() 2455 * that will tell you if something is gone wrong and where. 2456 */ 2457 if (timeout < 0) { 2458 printk(KERN_ERR "schedule_timeout: wrong timeout " 2459 "value %lx\n", timeout); 2460 dump_stack(); 2461 __set_current_state(TASK_RUNNING); 2462 goto out; 2463 } 2464 } 2465 2466 expire = timeout + jiffies; 2467 2468 timer.task = current; 2469 timer_setup_on_stack(&timer.timer, process_timeout, 0); 2470 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); 2471 schedule(); 2472 del_timer_sync(&timer.timer); 2473 2474 /* Remove the timer from the object tracker */ 2475 destroy_timer_on_stack(&timer.timer); 2476 2477 timeout = expire - jiffies; 2478 2479 out: 2480 return timeout < 0 ? 0 : timeout; 2481 } 2482 EXPORT_SYMBOL(schedule_timeout); 2483 2484 /* 2485 * We can use __set_current_state() here because schedule_timeout() calls 2486 * schedule() unconditionally. 2487 */ 2488 signed long __sched schedule_timeout_interruptible(signed long timeout) 2489 { 2490 __set_current_state(TASK_INTERRUPTIBLE); 2491 return schedule_timeout(timeout); 2492 } 2493 EXPORT_SYMBOL(schedule_timeout_interruptible); 2494 2495 signed long __sched schedule_timeout_killable(signed long timeout) 2496 { 2497 __set_current_state(TASK_KILLABLE); 2498 return schedule_timeout(timeout); 2499 } 2500 EXPORT_SYMBOL(schedule_timeout_killable); 2501 2502 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 2503 { 2504 __set_current_state(TASK_UNINTERRUPTIBLE); 2505 return schedule_timeout(timeout); 2506 } 2507 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 2508 2509 /* 2510 * Like schedule_timeout_uninterruptible(), except this task will not contribute 2511 * to load average. 2512 */ 2513 signed long __sched schedule_timeout_idle(signed long timeout) 2514 { 2515 __set_current_state(TASK_IDLE); 2516 return schedule_timeout(timeout); 2517 } 2518 EXPORT_SYMBOL(schedule_timeout_idle); 2519 2520 #ifdef CONFIG_HOTPLUG_CPU 2521 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 2522 { 2523 struct timer_list *timer; 2524 int cpu = new_base->cpu; 2525 2526 while (!hlist_empty(head)) { 2527 timer = hlist_entry(head->first, struct timer_list, entry); 2528 detach_timer(timer, false); 2529 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 2530 internal_add_timer(new_base, timer); 2531 } 2532 } 2533 2534 int timers_prepare_cpu(unsigned int cpu) 2535 { 2536 struct timer_base *base; 2537 int b; 2538 2539 for (b = 0; b < NR_BASES; b++) { 2540 base = per_cpu_ptr(&timer_bases[b], cpu); 2541 base->clk = jiffies; 2542 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2543 base->next_expiry_recalc = false; 2544 base->timers_pending = false; 2545 base->is_idle = false; 2546 } 2547 return 0; 2548 } 2549 2550 int timers_dead_cpu(unsigned int cpu) 2551 { 2552 struct timer_base *old_base; 2553 struct timer_base *new_base; 2554 int b, i; 2555 2556 for (b = 0; b < NR_BASES; b++) { 2557 old_base = per_cpu_ptr(&timer_bases[b], cpu); 2558 new_base = get_cpu_ptr(&timer_bases[b]); 2559 /* 2560 * The caller is globally serialized and nobody else 2561 * takes two locks at once, deadlock is not possible. 2562 */ 2563 raw_spin_lock_irq(&new_base->lock); 2564 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 2565 2566 /* 2567 * The current CPUs base clock might be stale. Update it 2568 * before moving the timers over. 2569 */ 2570 forward_timer_base(new_base); 2571 2572 WARN_ON_ONCE(old_base->running_timer); 2573 old_base->running_timer = NULL; 2574 2575 for (i = 0; i < WHEEL_SIZE; i++) 2576 migrate_timer_list(new_base, old_base->vectors + i); 2577 2578 raw_spin_unlock(&old_base->lock); 2579 raw_spin_unlock_irq(&new_base->lock); 2580 put_cpu_ptr(&timer_bases); 2581 } 2582 return 0; 2583 } 2584 2585 #endif /* CONFIG_HOTPLUG_CPU */ 2586 2587 static void __init init_timer_cpu(int cpu) 2588 { 2589 struct timer_base *base; 2590 int i; 2591 2592 for (i = 0; i < NR_BASES; i++) { 2593 base = per_cpu_ptr(&timer_bases[i], cpu); 2594 base->cpu = cpu; 2595 raw_spin_lock_init(&base->lock); 2596 base->clk = jiffies; 2597 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2598 timer_base_init_expiry_lock(base); 2599 } 2600 } 2601 2602 static void __init init_timer_cpus(void) 2603 { 2604 int cpu; 2605 2606 for_each_possible_cpu(cpu) 2607 init_timer_cpu(cpu); 2608 } 2609 2610 void __init init_timers(void) 2611 { 2612 init_timer_cpus(); 2613 posix_cputimers_init_work(); 2614 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 2615 } 2616 2617 /** 2618 * msleep - sleep safely even with waitqueue interruptions 2619 * @msecs: Time in milliseconds to sleep for 2620 */ 2621 void msleep(unsigned int msecs) 2622 { 2623 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2624 2625 while (timeout) 2626 timeout = schedule_timeout_uninterruptible(timeout); 2627 } 2628 2629 EXPORT_SYMBOL(msleep); 2630 2631 /** 2632 * msleep_interruptible - sleep waiting for signals 2633 * @msecs: Time in milliseconds to sleep for 2634 */ 2635 unsigned long msleep_interruptible(unsigned int msecs) 2636 { 2637 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2638 2639 while (timeout && !signal_pending(current)) 2640 timeout = schedule_timeout_interruptible(timeout); 2641 return jiffies_to_msecs(timeout); 2642 } 2643 2644 EXPORT_SYMBOL(msleep_interruptible); 2645 2646 /** 2647 * usleep_range_state - Sleep for an approximate time in a given state 2648 * @min: Minimum time in usecs to sleep 2649 * @max: Maximum time in usecs to sleep 2650 * @state: State of the current task that will be while sleeping 2651 * 2652 * In non-atomic context where the exact wakeup time is flexible, use 2653 * usleep_range_state() instead of udelay(). The sleep improves responsiveness 2654 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2655 * power usage by allowing hrtimers to take advantage of an already- 2656 * scheduled interrupt instead of scheduling a new one just for this sleep. 2657 */ 2658 void __sched usleep_range_state(unsigned long min, unsigned long max, 2659 unsigned int state) 2660 { 2661 ktime_t exp = ktime_add_us(ktime_get(), min); 2662 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2663 2664 for (;;) { 2665 __set_current_state(state); 2666 /* Do not return before the requested sleep time has elapsed */ 2667 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2668 break; 2669 } 2670 } 2671 EXPORT_SYMBOL(usleep_range_state); 2672