1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Infrastructure for migratable timers 4 * 5 * Copyright(C) 2022 linutronix GmbH 6 */ 7 #include <linux/cpuhotplug.h> 8 #include <linux/slab.h> 9 #include <linux/smp.h> 10 #include <linux/spinlock.h> 11 #include <linux/timerqueue.h> 12 #include <trace/events/ipi.h> 13 14 #include "timer_migration.h" 15 #include "tick-internal.h" 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/timer_migration.h> 19 20 /* 21 * The timer migration mechanism is built on a hierarchy of groups. The 22 * lowest level group contains CPUs, the next level groups of CPU groups 23 * and so forth. The CPU groups are kept per node so for the normal case 24 * lock contention won't happen across nodes. Depending on the number of 25 * CPUs per node even the next level might be kept as groups of CPU groups 26 * per node and only the levels above cross the node topology. 27 * 28 * Example topology for a two node system with 24 CPUs each. 29 * 30 * LVL 2 [GRP2:0] 31 * GRP1:0 = GRP1:M 32 * 33 * LVL 1 [GRP1:0] [GRP1:1] 34 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5 35 * 36 * LVL 0 [GRP0:0] [GRP0:1] [GRP0:2] [GRP0:3] [GRP0:4] [GRP0:5] 37 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47 38 * 39 * The groups hold a timer queue of events sorted by expiry time. These 40 * queues are updated when CPUs go in idle. When they come out of idle 41 * ignore flag of events is set. 42 * 43 * Each group has a designated migrator CPU/group as long as a CPU/group is 44 * active in the group. This designated role is necessary to avoid that all 45 * active CPUs in a group try to migrate expired timers from other CPUs, 46 * which would result in massive lock bouncing. 47 * 48 * When a CPU is awake, it checks in it's own timer tick the group 49 * hierarchy up to the point where it is assigned the migrator role or if 50 * no CPU is active, it also checks the groups where no migrator is set 51 * (TMIGR_NONE). 52 * 53 * If it finds expired timers in one of the group queues it pulls them over 54 * from the idle CPU and runs the timer function. After that it updates the 55 * group and the parent groups if required. 56 * 57 * CPUs which go idle arm their CPU local timer hardware for the next local 58 * (pinned) timer event. If the next migratable timer expires after the 59 * next local timer or the CPU has no migratable timer pending then the 60 * CPU does not queue an event in the LVL0 group. If the next migratable 61 * timer expires before the next local timer then the CPU queues that timer 62 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0 63 * group. 64 * 65 * When CPU comes out of idle and when a group has at least a single active 66 * child, the ignore flag of the tmigr_event is set. This indicates, that 67 * the event is ignored even if it is still enqueued in the parent groups 68 * timer queue. It will be removed when touching the timer queue the next 69 * time. This spares locking in active path as the lock protects (after 70 * setup) only event information. For more information about locking, 71 * please read the section "Locking rules". 72 * 73 * If the CPU is the migrator of the group then it delegates that role to 74 * the next active CPU in the group or sets migrator to TMIGR_NONE when 75 * there is no active CPU in the group. This delegation needs to be 76 * propagated up the hierarchy so hand over from other leaves can happen at 77 * all hierarchy levels w/o doing a search. 78 * 79 * When the last CPU in the system goes idle, then it drops all migrator 80 * duties up to the top level of the hierarchy (LVL2 in the example). It 81 * then has to make sure, that it arms it's own local hardware timer for 82 * the earliest event in the system. 83 * 84 * 85 * Lifetime rules: 86 * --------------- 87 * 88 * The groups are built up at init time or when CPUs come online. They are 89 * not destroyed when a group becomes empty due to offlining. The group 90 * just won't participate in the hierarchy management anymore. Destroying 91 * groups would result in interesting race conditions which would just make 92 * the whole mechanism slow and complex. 93 * 94 * 95 * Locking rules: 96 * -------------- 97 * 98 * For setting up new groups and handling events it's required to lock both 99 * child and parent group. The lock ordering is always bottom up. This also 100 * includes the per CPU locks in struct tmigr_cpu. For updating the migrator and 101 * active CPU/group information atomic_try_cmpxchg() is used instead and only 102 * the per CPU tmigr_cpu->lock is held. 103 * 104 * During the setup of groups tmigr_level_list is required. It is protected by 105 * @tmigr_mutex. 106 * 107 * When @timer_base->lock as well as tmigr related locks are required, the lock 108 * ordering is: first @timer_base->lock, afterwards tmigr related locks. 109 * 110 * 111 * Protection of the tmigr group state information: 112 * ------------------------------------------------ 113 * 114 * The state information with the list of active children and migrator needs to 115 * be protected by a sequence counter. It prevents a race when updates in child 116 * groups are propagated in changed order. The state update is performed 117 * lockless and group wise. The following scenario describes what happens 118 * without updating the sequence counter: 119 * 120 * Therefore, let's take three groups and four CPUs (CPU2 and CPU3 as well 121 * as GRP0:1 will not change during the scenario): 122 * 123 * LVL 1 [GRP1:0] 124 * migrator = GRP0:1 125 * active = GRP0:0, GRP0:1 126 * / \ 127 * LVL 0 [GRP0:0] [GRP0:1] 128 * migrator = CPU0 migrator = CPU2 129 * active = CPU0 active = CPU2 130 * / \ / \ 131 * CPUs 0 1 2 3 132 * active idle active idle 133 * 134 * 135 * 1. CPU0 goes idle. As the update is performed group wise, in the first step 136 * only GRP0:0 is updated. The update of GRP1:0 is pending as CPU0 has to 137 * walk the hierarchy. 138 * 139 * LVL 1 [GRP1:0] 140 * migrator = GRP0:1 141 * active = GRP0:0, GRP0:1 142 * / \ 143 * LVL 0 [GRP0:0] [GRP0:1] 144 * --> migrator = TMIGR_NONE migrator = CPU2 145 * --> active = active = CPU2 146 * / \ / \ 147 * CPUs 0 1 2 3 148 * --> idle idle active idle 149 * 150 * 2. While CPU0 goes idle and continues to update the state, CPU1 comes out of 151 * idle. CPU1 updates GRP0:0. The update for GRP1:0 is pending as CPU1 also 152 * has to walk the hierarchy. Both CPUs (CPU0 and CPU1) now walk the 153 * hierarchy to perform the needed update from their point of view. The 154 * currently visible state looks the following: 155 * 156 * LVL 1 [GRP1:0] 157 * migrator = GRP0:1 158 * active = GRP0:0, GRP0:1 159 * / \ 160 * LVL 0 [GRP0:0] [GRP0:1] 161 * --> migrator = CPU1 migrator = CPU2 162 * --> active = CPU1 active = CPU2 163 * / \ / \ 164 * CPUs 0 1 2 3 165 * idle --> active active idle 166 * 167 * 3. Here is the race condition: CPU1 managed to propagate its changes (from 168 * step 2) through the hierarchy to GRP1:0 before CPU0 (step 1) did. The 169 * active members of GRP1:0 remain unchanged after the update since it is 170 * still valid from CPU1 current point of view: 171 * 172 * LVL 1 [GRP1:0] 173 * --> migrator = GRP0:1 174 * --> active = GRP0:0, GRP0:1 175 * / \ 176 * LVL 0 [GRP0:0] [GRP0:1] 177 * migrator = CPU1 migrator = CPU2 178 * active = CPU1 active = CPU2 179 * / \ / \ 180 * CPUs 0 1 2 3 181 * idle active active idle 182 * 183 * 4. Now CPU0 finally propagates its changes (from step 1) to GRP1:0. 184 * 185 * LVL 1 [GRP1:0] 186 * --> migrator = GRP0:1 187 * --> active = GRP0:1 188 * / \ 189 * LVL 0 [GRP0:0] [GRP0:1] 190 * migrator = CPU1 migrator = CPU2 191 * active = CPU1 active = CPU2 192 * / \ / \ 193 * CPUs 0 1 2 3 194 * idle active active idle 195 * 196 * 197 * The race of CPU0 vs. CPU1 led to an inconsistent state in GRP1:0. CPU1 is 198 * active and is correctly listed as active in GRP0:0. However GRP1:0 does not 199 * have GRP0:0 listed as active, which is wrong. The sequence counter has been 200 * added to avoid inconsistent states during updates. The state is updated 201 * atomically only if all members, including the sequence counter, match the 202 * expected value (compare-and-exchange). 203 * 204 * Looking back at the previous example with the addition of the sequence 205 * counter: The update as performed by CPU0 in step 4 will fail. CPU1 changed 206 * the sequence number during the update in step 3 so the expected old value (as 207 * seen by CPU0 before starting the walk) does not match. 208 * 209 * Prevent race between new event and last CPU going inactive 210 * ---------------------------------------------------------- 211 * 212 * When the last CPU is going idle and there is a concurrent update of a new 213 * first global timer of an idle CPU, the group and child states have to be read 214 * while holding the lock in tmigr_update_events(). The following scenario shows 215 * what happens, when this is not done. 216 * 217 * 1. Only CPU2 is active: 218 * 219 * LVL 1 [GRP1:0] 220 * migrator = GRP0:1 221 * active = GRP0:1 222 * next_expiry = KTIME_MAX 223 * / \ 224 * LVL 0 [GRP0:0] [GRP0:1] 225 * migrator = TMIGR_NONE migrator = CPU2 226 * active = active = CPU2 227 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX 228 * / \ / \ 229 * CPUs 0 1 2 3 230 * idle idle active idle 231 * 232 * 2. Now CPU 2 goes idle (and has no global timer, that has to be handled) and 233 * propagates that to GRP0:1: 234 * 235 * LVL 1 [GRP1:0] 236 * migrator = GRP0:1 237 * active = GRP0:1 238 * next_expiry = KTIME_MAX 239 * / \ 240 * LVL 0 [GRP0:0] [GRP0:1] 241 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE 242 * active = --> active = 243 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX 244 * / \ / \ 245 * CPUs 0 1 2 3 246 * idle idle --> idle idle 247 * 248 * 3. Now the idle state is propagated up to GRP1:0. As this is now the last 249 * child going idle in top level group, the expiry of the next group event 250 * has to be handed back to make sure no event is lost. As there is no event 251 * enqueued, KTIME_MAX is handed back to CPU2. 252 * 253 * LVL 1 [GRP1:0] 254 * --> migrator = TMIGR_NONE 255 * --> active = 256 * next_expiry = KTIME_MAX 257 * / \ 258 * LVL 0 [GRP0:0] [GRP0:1] 259 * migrator = TMIGR_NONE migrator = TMIGR_NONE 260 * active = active = 261 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX 262 * / \ / \ 263 * CPUs 0 1 2 3 264 * idle idle --> idle idle 265 * 266 * 4. CPU 0 has a new timer queued from idle and it expires at TIMER0. CPU0 267 * propagates that to GRP0:0: 268 * 269 * LVL 1 [GRP1:0] 270 * migrator = TMIGR_NONE 271 * active = 272 * next_expiry = KTIME_MAX 273 * / \ 274 * LVL 0 [GRP0:0] [GRP0:1] 275 * migrator = TMIGR_NONE migrator = TMIGR_NONE 276 * active = active = 277 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX 278 * / \ / \ 279 * CPUs 0 1 2 3 280 * idle idle idle idle 281 * 282 * 5. GRP0:0 is not active, so the new timer has to be propagated to 283 * GRP1:0. Therefore the GRP1:0 state has to be read. When the stalled value 284 * (from step 2) is read, the timer is enqueued into GRP1:0, but nothing is 285 * handed back to CPU0, as it seems that there is still an active child in 286 * top level group. 287 * 288 * LVL 1 [GRP1:0] 289 * migrator = TMIGR_NONE 290 * active = 291 * --> next_expiry = TIMER0 292 * / \ 293 * LVL 0 [GRP0:0] [GRP0:1] 294 * migrator = TMIGR_NONE migrator = TMIGR_NONE 295 * active = active = 296 * next_expiry = TIMER0 next_expiry = KTIME_MAX 297 * / \ / \ 298 * CPUs 0 1 2 3 299 * idle idle idle idle 300 * 301 * This is prevented by reading the state when holding the lock (when a new 302 * timer has to be propagated from idle path):: 303 * 304 * CPU2 (tmigr_inactive_up()) CPU0 (tmigr_new_timer_up()) 305 * -------------------------- --------------------------- 306 * // step 3: 307 * cmpxchg(&GRP1:0->state); 308 * tmigr_update_events() { 309 * spin_lock(&GRP1:0->lock); 310 * // ... update events ... 311 * // hand back first expiry when GRP1:0 is idle 312 * spin_unlock(&GRP1:0->lock); 313 * // ^^^ release state modification 314 * } 315 * tmigr_update_events() { 316 * spin_lock(&GRP1:0->lock) 317 * // ^^^ acquire state modification 318 * group_state = atomic_read(&GRP1:0->state) 319 * // .... update events ... 320 * // hand back first expiry when GRP1:0 is idle 321 * spin_unlock(&GRP1:0->lock) <3> 322 * // ^^^ makes state visible for other 323 * // callers of tmigr_new_timer_up() 324 * } 325 * 326 * When CPU0 grabs the lock directly after cmpxchg, the first timer is reported 327 * back to CPU0 and also later on to CPU2. So no timer is missed. A concurrent 328 * update of the group state from active path is no problem, as the upcoming CPU 329 * will take care of the group events. 330 * 331 * Required event and timerqueue update after a remote expiry: 332 * ----------------------------------------------------------- 333 * 334 * After expiring timers of a remote CPU, a walk through the hierarchy and 335 * update of events and timerqueues is required. It is obviously needed if there 336 * is a 'new' global timer but also if there is no new global timer but the 337 * remote CPU is still idle. 338 * 339 * 1. CPU0 and CPU1 are idle and have both a global timer expiring at the same 340 * time. So both have an event enqueued in the timerqueue of GRP0:0. CPU3 is 341 * also idle and has no global timer pending. CPU2 is the only active CPU and 342 * thus also the migrator: 343 * 344 * LVL 1 [GRP1:0] 345 * migrator = GRP0:1 346 * active = GRP0:1 347 * --> timerqueue = evt-GRP0:0 348 * / \ 349 * LVL 0 [GRP0:0] [GRP0:1] 350 * migrator = TMIGR_NONE migrator = CPU2 351 * active = active = CPU2 352 * groupevt.ignore = false groupevt.ignore = true 353 * groupevt.cpu = CPU0 groupevt.cpu = 354 * timerqueue = evt-CPU0, timerqueue = 355 * evt-CPU1 356 * / \ / \ 357 * CPUs 0 1 2 3 358 * idle idle active idle 359 * 360 * 2. CPU2 starts to expire remote timers. It starts with LVL0 group 361 * GRP0:1. There is no event queued in the timerqueue, so CPU2 continues with 362 * the parent of GRP0:1: GRP1:0. In GRP1:0 it dequeues the first event. It 363 * looks at tmigr_event::cpu struct member and expires the pending timer(s) 364 * of CPU0. 365 * 366 * LVL 1 [GRP1:0] 367 * migrator = GRP0:1 368 * active = GRP0:1 369 * --> timerqueue = 370 * / \ 371 * LVL 0 [GRP0:0] [GRP0:1] 372 * migrator = TMIGR_NONE migrator = CPU2 373 * active = active = CPU2 374 * groupevt.ignore = false groupevt.ignore = true 375 * --> groupevt.cpu = CPU0 groupevt.cpu = 376 * timerqueue = evt-CPU0, timerqueue = 377 * evt-CPU1 378 * / \ / \ 379 * CPUs 0 1 2 3 380 * idle idle active idle 381 * 382 * 3. Some work has to be done after expiring the timers of CPU0. If we stop 383 * here, then CPU1's pending global timer(s) will not expire in time and the 384 * timerqueue of GRP0:0 has still an event for CPU0 enqueued which has just 385 * been processed. So it is required to walk the hierarchy from CPU0's point 386 * of view and update it accordingly. CPU0's event will be removed from the 387 * timerqueue because it has no pending timer. If CPU0 would have a timer 388 * pending then it has to expire after CPU1's first timer because all timers 389 * from this period were just expired. Either way CPU1's event will be first 390 * in GRP0:0's timerqueue and therefore set in the CPU field of the group 391 * event which is then enqueued in GRP1:0's timerqueue as GRP0:0 is still not 392 * active: 393 * 394 * LVL 1 [GRP1:0] 395 * migrator = GRP0:1 396 * active = GRP0:1 397 * --> timerqueue = evt-GRP0:0 398 * / \ 399 * LVL 0 [GRP0:0] [GRP0:1] 400 * migrator = TMIGR_NONE migrator = CPU2 401 * active = active = CPU2 402 * groupevt.ignore = false groupevt.ignore = true 403 * --> groupevt.cpu = CPU1 groupevt.cpu = 404 * --> timerqueue = evt-CPU1 timerqueue = 405 * / \ / \ 406 * CPUs 0 1 2 3 407 * idle idle active idle 408 * 409 * Now CPU2 (migrator) will continue step 2 at GRP1:0 and will expire the 410 * timer(s) of CPU1. 411 * 412 * The hierarchy walk in step 3 can be skipped if the migrator notices that a 413 * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care 414 * of the group as migrator and any needed updates within the hierarchy. 415 */ 416 417 static DEFINE_MUTEX(tmigr_mutex); 418 static struct list_head *tmigr_level_list __read_mostly; 419 420 static unsigned int tmigr_hierarchy_levels __read_mostly; 421 static unsigned int tmigr_crossnode_level __read_mostly; 422 423 static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); 424 425 #define TMIGR_NONE 0xFF 426 #define BIT_CNT 8 427 428 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc) 429 { 430 return !(tmc->tmgroup && tmc->online); 431 } 432 433 /* 434 * Returns true, when @childmask corresponds to the group migrator or when the 435 * group is not active - so no migrator is set. 436 */ 437 static bool tmigr_check_migrator(struct tmigr_group *group, u8 childmask) 438 { 439 union tmigr_state s; 440 441 s.state = atomic_read(&group->migr_state); 442 443 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE)) 444 return true; 445 446 return false; 447 } 448 449 static bool tmigr_check_migrator_and_lonely(struct tmigr_group *group, u8 childmask) 450 { 451 bool lonely, migrator = false; 452 unsigned long active; 453 union tmigr_state s; 454 455 s.state = atomic_read(&group->migr_state); 456 457 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE)) 458 migrator = true; 459 460 active = s.active; 461 lonely = bitmap_weight(&active, BIT_CNT) <= 1; 462 463 return (migrator && lonely); 464 } 465 466 static bool tmigr_check_lonely(struct tmigr_group *group) 467 { 468 unsigned long active; 469 union tmigr_state s; 470 471 s.state = atomic_read(&group->migr_state); 472 473 active = s.active; 474 475 return bitmap_weight(&active, BIT_CNT) <= 1; 476 } 477 478 /** 479 * struct tmigr_walk - data required for walking the hierarchy 480 * @nextexp: Next CPU event expiry information which is handed into 481 * the timer migration code by the timer code 482 * (get_next_timer_interrupt()) 483 * @firstexp: Contains the first event expiry information when 484 * hierarchy is completely idle. When CPU itself was the 485 * last going idle, information makes sure, that CPU will 486 * be back in time. When using this value in the remote 487 * expiry case, firstexp is stored in the per CPU tmigr_cpu 488 * struct of CPU which expires remote timers. It is updated 489 * in top level group only. Be aware, there could occur a 490 * new top level of the hierarchy between the 'top level 491 * call' in tmigr_update_events() and the check for the 492 * parent group in walk_groups(). Then @firstexp might 493 * contain a value != KTIME_MAX even if it was not the 494 * final top level. This is not a problem, as the worst 495 * outcome is a CPU which might wake up a little early. 496 * @evt: Pointer to tmigr_event which needs to be queued (of idle 497 * child group) 498 * @childmask: childmask of child group 499 * @remote: Is set, when the new timer path is executed in 500 * tmigr_handle_remote_cpu() 501 * @basej: timer base in jiffies 502 * @now: timer base monotonic 503 * @check: is set if there is the need to handle remote timers; 504 * required in tmigr_requires_handle_remote() only 505 * @tmc_active: this flag indicates, whether the CPU which triggers 506 * the hierarchy walk is !idle in the timer migration 507 * hierarchy. When the CPU is idle and the whole hierarchy is 508 * idle, only the first event of the top level has to be 509 * considered. 510 */ 511 struct tmigr_walk { 512 u64 nextexp; 513 u64 firstexp; 514 struct tmigr_event *evt; 515 u8 childmask; 516 bool remote; 517 unsigned long basej; 518 u64 now; 519 bool check; 520 bool tmc_active; 521 }; 522 523 typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *); 524 525 static void __walk_groups(up_f up, struct tmigr_walk *data, 526 struct tmigr_cpu *tmc) 527 { 528 struct tmigr_group *child = NULL, *group = tmc->tmgroup; 529 530 do { 531 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels); 532 533 if (up(group, child, data)) 534 break; 535 536 child = group; 537 group = group->parent; 538 } while (group); 539 } 540 541 static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc) 542 { 543 lockdep_assert_held(&tmc->lock); 544 545 __walk_groups(up, data, tmc); 546 } 547 548 /* 549 * Returns the next event of the timerqueue @group->events 550 * 551 * Removes timers with ignore flag and update next_expiry of the group. Values 552 * of the group event are updated in tmigr_update_events() only. 553 */ 554 static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group) 555 { 556 struct timerqueue_node *node = NULL; 557 struct tmigr_event *evt = NULL; 558 559 lockdep_assert_held(&group->lock); 560 561 WRITE_ONCE(group->next_expiry, KTIME_MAX); 562 563 while ((node = timerqueue_getnext(&group->events))) { 564 evt = container_of(node, struct tmigr_event, nextevt); 565 566 if (!evt->ignore) { 567 WRITE_ONCE(group->next_expiry, evt->nextevt.expires); 568 return evt; 569 } 570 571 /* 572 * Remove next timers with ignore flag, because the group lock 573 * is held anyway 574 */ 575 if (!timerqueue_del(&group->events, node)) 576 break; 577 } 578 579 return NULL; 580 } 581 582 /* 583 * Return the next event (with the expiry equal or before @now) 584 * 585 * Event, which is returned, is also removed from the queue. 586 */ 587 static struct tmigr_event *tmigr_next_expired_groupevt(struct tmigr_group *group, 588 u64 now) 589 { 590 struct tmigr_event *evt = tmigr_next_groupevt(group); 591 592 if (!evt || now < evt->nextevt.expires) 593 return NULL; 594 595 /* 596 * The event is ready to expire. Remove it and update next group event. 597 */ 598 timerqueue_del(&group->events, &evt->nextevt); 599 tmigr_next_groupevt(group); 600 601 return evt; 602 } 603 604 static u64 tmigr_next_groupevt_expires(struct tmigr_group *group) 605 { 606 struct tmigr_event *evt; 607 608 evt = tmigr_next_groupevt(group); 609 610 if (!evt) 611 return KTIME_MAX; 612 else 613 return evt->nextevt.expires; 614 } 615 616 static bool tmigr_active_up(struct tmigr_group *group, 617 struct tmigr_group *child, 618 struct tmigr_walk *data) 619 { 620 union tmigr_state curstate, newstate; 621 bool walk_done; 622 u8 childmask; 623 624 childmask = data->childmask; 625 /* 626 * No memory barrier is required here in contrast to 627 * tmigr_inactive_up(), as the group state change does not depend on the 628 * child state. 629 */ 630 curstate.state = atomic_read(&group->migr_state); 631 632 do { 633 newstate = curstate; 634 walk_done = true; 635 636 if (newstate.migrator == TMIGR_NONE) { 637 newstate.migrator = childmask; 638 639 /* Changes need to be propagated */ 640 walk_done = false; 641 } 642 643 newstate.active |= childmask; 644 newstate.seq++; 645 646 } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)); 647 648 trace_tmigr_group_set_cpu_active(group, newstate, childmask); 649 650 if (walk_done == false) 651 data->childmask = group->childmask; 652 653 /* 654 * The group is active (again). The group event might be still queued 655 * into the parent group's timerqueue but can now be handled by the 656 * migrator of this group. Therefore the ignore flag for the group event 657 * is updated to reflect this. 658 * 659 * The update of the ignore flag in the active path is done lockless. In 660 * worst case the migrator of the parent group observes the change too 661 * late and expires remotely all events belonging to this group. The 662 * lock is held while updating the ignore flag in idle path. So this 663 * state change will not be lost. 664 */ 665 group->groupevt.ignore = true; 666 667 return walk_done; 668 } 669 670 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc) 671 { 672 struct tmigr_walk data; 673 674 data.childmask = tmc->childmask; 675 676 trace_tmigr_cpu_active(tmc); 677 678 tmc->cpuevt.ignore = true; 679 WRITE_ONCE(tmc->wakeup, KTIME_MAX); 680 681 walk_groups(&tmigr_active_up, &data, tmc); 682 } 683 684 /** 685 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy 686 * 687 * Call site timer_clear_idle() is called with interrupts disabled. 688 */ 689 void tmigr_cpu_activate(void) 690 { 691 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 692 693 if (tmigr_is_not_available(tmc)) 694 return; 695 696 if (WARN_ON_ONCE(!tmc->idle)) 697 return; 698 699 raw_spin_lock(&tmc->lock); 700 tmc->idle = false; 701 __tmigr_cpu_activate(tmc); 702 raw_spin_unlock(&tmc->lock); 703 } 704 705 /* 706 * Returns true, if there is nothing to be propagated to the next level 707 * 708 * @data->firstexp is set to expiry of first gobal event of the (top level of 709 * the) hierarchy, but only when hierarchy is completely idle. 710 * 711 * The child and group states need to be read under the lock, to prevent a race 712 * against a concurrent tmigr_inactive_up() run when the last CPU goes idle. See 713 * also section "Prevent race between new event and last CPU going inactive" in 714 * the documentation at the top. 715 * 716 * This is the only place where the group event expiry value is set. 717 */ 718 static 719 bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child, 720 struct tmigr_walk *data) 721 { 722 struct tmigr_event *evt, *first_childevt; 723 union tmigr_state childstate, groupstate; 724 bool remote = data->remote; 725 bool walk_done = false; 726 u64 nextexp; 727 728 if (child) { 729 raw_spin_lock(&child->lock); 730 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); 731 732 childstate.state = atomic_read(&child->migr_state); 733 groupstate.state = atomic_read(&group->migr_state); 734 735 if (childstate.active) { 736 walk_done = true; 737 goto unlock; 738 } 739 740 first_childevt = tmigr_next_groupevt(child); 741 nextexp = child->next_expiry; 742 evt = &child->groupevt; 743 744 evt->ignore = (nextexp == KTIME_MAX) ? true : false; 745 } else { 746 nextexp = data->nextexp; 747 748 first_childevt = evt = data->evt; 749 750 /* 751 * Walking the hierarchy is required in any case when a 752 * remote expiry was done before. This ensures to not lose 753 * already queued events in non active groups (see section 754 * "Required event and timerqueue update after a remote 755 * expiry" in the documentation at the top). 756 * 757 * The two call sites which are executed without a remote expiry 758 * before, are not prevented from propagating changes through 759 * the hierarchy by the return: 760 * - When entering this path by tmigr_new_timer(), @evt->ignore 761 * is never set. 762 * - tmigr_inactive_up() takes care of the propagation by 763 * itself and ignores the return value. But an immediate 764 * return is possible if there is a parent, sparing group 765 * locking at this level, because the upper walking call to 766 * the parent will take care about removing this event from 767 * within the group and update next_expiry accordingly. 768 * 769 * However if there is no parent, ie: the hierarchy has only a 770 * single level so @group is the top level group, make sure the 771 * first event information of the group is updated properly and 772 * also handled properly, so skip this fast return path. 773 */ 774 if (evt->ignore && !remote && group->parent) 775 return true; 776 777 raw_spin_lock(&group->lock); 778 779 childstate.state = 0; 780 groupstate.state = atomic_read(&group->migr_state); 781 } 782 783 /* 784 * If the child event is already queued in the group, remove it from the 785 * queue when the expiry time changed only or when it could be ignored. 786 */ 787 if (timerqueue_node_queued(&evt->nextevt)) { 788 if ((evt->nextevt.expires == nextexp) && !evt->ignore) { 789 /* Make sure not to miss a new CPU event with the same expiry */ 790 evt->cpu = first_childevt->cpu; 791 goto check_toplvl; 792 } 793 794 if (!timerqueue_del(&group->events, &evt->nextevt)) 795 WRITE_ONCE(group->next_expiry, KTIME_MAX); 796 } 797 798 if (evt->ignore) { 799 /* 800 * When the next child event could be ignored (nextexp is 801 * KTIME_MAX) and there was no remote timer handling before or 802 * the group is already active, there is no need to walk the 803 * hierarchy even if there is a parent group. 804 * 805 * The other way round: even if the event could be ignored, but 806 * if a remote timer handling was executed before and the group 807 * is not active, walking the hierarchy is required to not miss 808 * an enqueued timer in the non active group. The enqueued timer 809 * of the group needs to be propagated to a higher level to 810 * ensure it is handled. 811 */ 812 if (!remote || groupstate.active) 813 walk_done = true; 814 } else { 815 evt->nextevt.expires = nextexp; 816 evt->cpu = first_childevt->cpu; 817 818 if (timerqueue_add(&group->events, &evt->nextevt)) 819 WRITE_ONCE(group->next_expiry, nextexp); 820 } 821 822 check_toplvl: 823 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) { 824 walk_done = true; 825 826 /* 827 * Nothing to do when update was done during remote timer 828 * handling. First timer in top level group which needs to be 829 * handled when top level group is not active, is calculated 830 * directly in tmigr_handle_remote_up(). 831 */ 832 if (remote) 833 goto unlock; 834 835 /* 836 * The top level group is idle and it has to be ensured the 837 * global timers are handled in time. (This could be optimized 838 * by keeping track of the last global scheduled event and only 839 * arming it on the CPU if the new event is earlier. Not sure if 840 * its worth the complexity.) 841 */ 842 data->firstexp = tmigr_next_groupevt_expires(group); 843 } 844 845 trace_tmigr_update_events(child, group, childstate, groupstate, 846 nextexp); 847 848 unlock: 849 raw_spin_unlock(&group->lock); 850 851 if (child) 852 raw_spin_unlock(&child->lock); 853 854 return walk_done; 855 } 856 857 static bool tmigr_new_timer_up(struct tmigr_group *group, 858 struct tmigr_group *child, 859 struct tmigr_walk *data) 860 { 861 return tmigr_update_events(group, child, data); 862 } 863 864 /* 865 * Returns the expiry of the next timer that needs to be handled. KTIME_MAX is 866 * returned, if an active CPU will handle all the timer migration hierarchy 867 * timers. 868 */ 869 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp) 870 { 871 struct tmigr_walk data = { .nextexp = nextexp, 872 .firstexp = KTIME_MAX, 873 .evt = &tmc->cpuevt }; 874 875 lockdep_assert_held(&tmc->lock); 876 877 if (tmc->remote) 878 return KTIME_MAX; 879 880 trace_tmigr_cpu_new_timer(tmc); 881 882 tmc->cpuevt.ignore = false; 883 data.remote = false; 884 885 walk_groups(&tmigr_new_timer_up, &data, tmc); 886 887 /* If there is a new first global event, make sure it is handled */ 888 return data.firstexp; 889 } 890 891 static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now, 892 unsigned long jif) 893 { 894 struct timer_events tevt; 895 struct tmigr_walk data; 896 struct tmigr_cpu *tmc; 897 898 tmc = per_cpu_ptr(&tmigr_cpu, cpu); 899 900 raw_spin_lock_irq(&tmc->lock); 901 902 /* 903 * If the remote CPU is offline then the timers have been migrated to 904 * another CPU. 905 * 906 * If tmigr_cpu::remote is set, at the moment another CPU already 907 * expires the timers of the remote CPU. 908 * 909 * If tmigr_event::ignore is set, then the CPU returns from idle and 910 * takes care of its timers. 911 * 912 * If the next event expires in the future, then the event has been 913 * updated and there are no timers to expire right now. The CPU which 914 * updated the event takes care when hierarchy is completely 915 * idle. Otherwise the migrator does it as the event is enqueued. 916 */ 917 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || 918 now < tmc->cpuevt.nextevt.expires) { 919 raw_spin_unlock_irq(&tmc->lock); 920 return; 921 } 922 923 trace_tmigr_handle_remote_cpu(tmc); 924 925 tmc->remote = true; 926 WRITE_ONCE(tmc->wakeup, KTIME_MAX); 927 928 /* Drop the lock to allow the remote CPU to exit idle */ 929 raw_spin_unlock_irq(&tmc->lock); 930 931 if (cpu != smp_processor_id()) 932 timer_expire_remote(cpu); 933 934 /* 935 * Lock ordering needs to be preserved - timer_base locks before tmigr 936 * related locks (see section "Locking rules" in the documentation at 937 * the top). During fetching the next timer interrupt, also tmc->lock 938 * needs to be held. Otherwise there is a possible race window against 939 * the CPU itself when it comes out of idle, updates the first timer in 940 * the hierarchy and goes back to idle. 941 * 942 * timer base locks are dropped as fast as possible: After checking 943 * whether the remote CPU went offline in the meantime and after 944 * fetching the next remote timer interrupt. Dropping the locks as fast 945 * as possible keeps the locking region small and prevents holding 946 * several (unnecessary) locks during walking the hierarchy for updating 947 * the timerqueue and group events. 948 */ 949 local_irq_disable(); 950 timer_lock_remote_bases(cpu); 951 raw_spin_lock(&tmc->lock); 952 953 /* 954 * When the CPU went offline in the meantime, no hierarchy walk has to 955 * be done for updating the queued events, because the walk was 956 * already done during marking the CPU offline in the hierarchy. 957 * 958 * When the CPU is no longer idle, the CPU takes care of the timers and 959 * also of the timers in the hierarchy. 960 * 961 * (See also section "Required event and timerqueue update after a 962 * remote expiry" in the documentation at the top) 963 */ 964 if (!tmc->online || !tmc->idle) { 965 timer_unlock_remote_bases(cpu); 966 goto unlock; 967 } 968 969 /* next event of CPU */ 970 fetch_next_timer_interrupt_remote(jif, now, &tevt, cpu); 971 timer_unlock_remote_bases(cpu); 972 973 data.nextexp = tevt.global; 974 data.firstexp = KTIME_MAX; 975 data.evt = &tmc->cpuevt; 976 data.remote = true; 977 978 /* 979 * The update is done even when there is no 'new' global timer pending 980 * on the remote CPU (see section "Required event and timerqueue update 981 * after a remote expiry" in the documentation at the top) 982 */ 983 walk_groups(&tmigr_new_timer_up, &data, tmc); 984 985 unlock: 986 tmc->remote = false; 987 raw_spin_unlock_irq(&tmc->lock); 988 } 989 990 static bool tmigr_handle_remote_up(struct tmigr_group *group, 991 struct tmigr_group *child, 992 struct tmigr_walk *data) 993 { 994 struct tmigr_event *evt; 995 unsigned long jif; 996 u8 childmask; 997 u64 now; 998 999 jif = data->basej; 1000 now = data->now; 1001 1002 childmask = data->childmask; 1003 1004 trace_tmigr_handle_remote(group); 1005 again: 1006 /* 1007 * Handle the group only if @childmask is the migrator or if the 1008 * group has no migrator. Otherwise the group is active and is 1009 * handled by its own migrator. 1010 */ 1011 if (!tmigr_check_migrator(group, childmask)) 1012 return true; 1013 1014 raw_spin_lock_irq(&group->lock); 1015 1016 evt = tmigr_next_expired_groupevt(group, now); 1017 1018 if (evt) { 1019 unsigned int remote_cpu = evt->cpu; 1020 1021 raw_spin_unlock_irq(&group->lock); 1022 1023 tmigr_handle_remote_cpu(remote_cpu, now, jif); 1024 1025 /* check if there is another event, that needs to be handled */ 1026 goto again; 1027 } 1028 1029 /* 1030 * Update of childmask for the next level and keep track of the expiry 1031 * of the first event that needs to be handled (group->next_expiry was 1032 * updated by tmigr_next_expired_groupevt(), next was set by 1033 * tmigr_handle_remote_cpu()). 1034 */ 1035 data->childmask = group->childmask; 1036 data->firstexp = group->next_expiry; 1037 1038 raw_spin_unlock_irq(&group->lock); 1039 1040 return false; 1041 } 1042 1043 /** 1044 * tmigr_handle_remote() - Handle global timers of remote idle CPUs 1045 * 1046 * Called from the timer soft interrupt with interrupts enabled. 1047 */ 1048 void tmigr_handle_remote(void) 1049 { 1050 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1051 struct tmigr_walk data; 1052 1053 if (tmigr_is_not_available(tmc)) 1054 return; 1055 1056 data.childmask = tmc->childmask; 1057 data.firstexp = KTIME_MAX; 1058 1059 /* 1060 * NOTE: This is a doubled check because the migrator test will be done 1061 * in tmigr_handle_remote_up() anyway. Keep this check to speed up the 1062 * return when nothing has to be done. 1063 */ 1064 if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) { 1065 /* 1066 * If this CPU was an idle migrator, make sure to clear its wakeup 1067 * value so it won't chase timers that have already expired elsewhere. 1068 * This avoids endless requeue from tmigr_new_timer(). 1069 */ 1070 if (READ_ONCE(tmc->wakeup) == KTIME_MAX) 1071 return; 1072 } 1073 1074 data.now = get_jiffies_update(&data.basej); 1075 1076 /* 1077 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to 1078 * KTIME_MAX. Even if tmc->lock is not held during the whole remote 1079 * handling, tmc->wakeup is fine to be stale as it is called in 1080 * interrupt context and tick_nohz_next_event() is executed in interrupt 1081 * exit path only after processing the last pending interrupt. 1082 */ 1083 1084 __walk_groups(&tmigr_handle_remote_up, &data, tmc); 1085 1086 raw_spin_lock_irq(&tmc->lock); 1087 WRITE_ONCE(tmc->wakeup, data.firstexp); 1088 raw_spin_unlock_irq(&tmc->lock); 1089 } 1090 1091 static bool tmigr_requires_handle_remote_up(struct tmigr_group *group, 1092 struct tmigr_group *child, 1093 struct tmigr_walk *data) 1094 { 1095 u8 childmask; 1096 1097 childmask = data->childmask; 1098 1099 /* 1100 * Handle the group only if the child is the migrator or if the group 1101 * has no migrator. Otherwise the group is active and is handled by its 1102 * own migrator. 1103 */ 1104 if (!tmigr_check_migrator(group, childmask)) 1105 return true; 1106 1107 /* 1108 * When there is a parent group and the CPU which triggered the 1109 * hierarchy walk is not active, proceed the walk to reach the top level 1110 * group before reading the next_expiry value. 1111 */ 1112 if (group->parent && !data->tmc_active) 1113 goto out; 1114 1115 /* 1116 * The lock is required on 32bit architectures to read the variable 1117 * consistently with a concurrent writer. On 64bit the lock is not 1118 * required because the read operation is not split and so it is always 1119 * consistent. 1120 */ 1121 if (IS_ENABLED(CONFIG_64BIT)) { 1122 data->firstexp = READ_ONCE(group->next_expiry); 1123 if (data->now >= data->firstexp) { 1124 data->check = true; 1125 return true; 1126 } 1127 } else { 1128 raw_spin_lock(&group->lock); 1129 data->firstexp = group->next_expiry; 1130 if (data->now >= group->next_expiry) { 1131 data->check = true; 1132 raw_spin_unlock(&group->lock); 1133 return true; 1134 } 1135 raw_spin_unlock(&group->lock); 1136 } 1137 1138 out: 1139 /* Update of childmask for the next level */ 1140 data->childmask = group->childmask; 1141 return false; 1142 } 1143 1144 /** 1145 * tmigr_requires_handle_remote() - Check the need of remote timer handling 1146 * 1147 * Must be called with interrupts disabled. 1148 */ 1149 bool tmigr_requires_handle_remote(void) 1150 { 1151 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1152 struct tmigr_walk data; 1153 unsigned long jif; 1154 bool ret = false; 1155 1156 if (tmigr_is_not_available(tmc)) 1157 return ret; 1158 1159 data.now = get_jiffies_update(&jif); 1160 data.childmask = tmc->childmask; 1161 data.firstexp = KTIME_MAX; 1162 data.tmc_active = !tmc->idle; 1163 data.check = false; 1164 1165 /* 1166 * If the CPU is active, walk the hierarchy to check whether a remote 1167 * expiry is required. 1168 * 1169 * Check is done lockless as interrupts are disabled and @tmc->idle is 1170 * set only by the local CPU. 1171 */ 1172 if (!tmc->idle) { 1173 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc); 1174 1175 return data.check; 1176 } 1177 1178 /* 1179 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock 1180 * is required on 32bit architectures to read the variable consistently 1181 * with a concurrent writer. On 64bit the lock is not required because 1182 * the read operation is not split and so it is always consistent. 1183 */ 1184 if (IS_ENABLED(CONFIG_64BIT)) { 1185 if (data.now >= READ_ONCE(tmc->wakeup)) 1186 return true; 1187 } else { 1188 raw_spin_lock(&tmc->lock); 1189 if (data.now >= tmc->wakeup) 1190 ret = true; 1191 raw_spin_unlock(&tmc->lock); 1192 } 1193 1194 return ret; 1195 } 1196 1197 /** 1198 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc) 1199 * @nextexp: Next expiry of global timer (or KTIME_MAX if not) 1200 * 1201 * The CPU is already deactivated in the timer migration 1202 * hierarchy. tick_nohz_get_sleep_length() calls tick_nohz_next_event() 1203 * and thereby the timer idle path is executed once more. @tmc->wakeup 1204 * holds the first timer, when the timer migration hierarchy is 1205 * completely idle. 1206 * 1207 * Returns the first timer that needs to be handled by this CPU or KTIME_MAX if 1208 * nothing needs to be done. 1209 */ 1210 u64 tmigr_cpu_new_timer(u64 nextexp) 1211 { 1212 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1213 u64 ret; 1214 1215 if (tmigr_is_not_available(tmc)) 1216 return nextexp; 1217 1218 raw_spin_lock(&tmc->lock); 1219 1220 ret = READ_ONCE(tmc->wakeup); 1221 if (nextexp != KTIME_MAX) { 1222 if (nextexp != tmc->cpuevt.nextevt.expires || 1223 tmc->cpuevt.ignore) { 1224 ret = tmigr_new_timer(tmc, nextexp); 1225 } 1226 } 1227 /* 1228 * Make sure the reevaluation of timers in idle path will not miss an 1229 * event. 1230 */ 1231 WRITE_ONCE(tmc->wakeup, ret); 1232 1233 trace_tmigr_cpu_new_timer_idle(tmc, nextexp); 1234 raw_spin_unlock(&tmc->lock); 1235 return ret; 1236 } 1237 1238 static bool tmigr_inactive_up(struct tmigr_group *group, 1239 struct tmigr_group *child, 1240 struct tmigr_walk *data) 1241 { 1242 union tmigr_state curstate, newstate, childstate; 1243 bool walk_done; 1244 u8 childmask; 1245 1246 childmask = data->childmask; 1247 childstate.state = 0; 1248 1249 /* 1250 * The memory barrier is paired with the cmpxchg() in tmigr_active_up() 1251 * to make sure the updates of child and group states are ordered. The 1252 * ordering is mandatory, as the group state change depends on the child 1253 * state. 1254 */ 1255 curstate.state = atomic_read_acquire(&group->migr_state); 1256 1257 for (;;) { 1258 if (child) 1259 childstate.state = atomic_read(&child->migr_state); 1260 1261 newstate = curstate; 1262 walk_done = true; 1263 1264 /* Reset active bit when the child is no longer active */ 1265 if (!childstate.active) 1266 newstate.active &= ~childmask; 1267 1268 if (newstate.migrator == childmask) { 1269 /* 1270 * Find a new migrator for the group, because the child 1271 * group is idle! 1272 */ 1273 if (!childstate.active) { 1274 unsigned long new_migr_bit, active = newstate.active; 1275 1276 new_migr_bit = find_first_bit(&active, BIT_CNT); 1277 1278 if (new_migr_bit != BIT_CNT) { 1279 newstate.migrator = BIT(new_migr_bit); 1280 } else { 1281 newstate.migrator = TMIGR_NONE; 1282 1283 /* Changes need to be propagated */ 1284 walk_done = false; 1285 } 1286 } 1287 } 1288 1289 newstate.seq++; 1290 1291 WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active)); 1292 1293 if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)) { 1294 trace_tmigr_group_set_cpu_inactive(group, newstate, childmask); 1295 break; 1296 } 1297 1298 /* 1299 * The memory barrier is paired with the cmpxchg() in 1300 * tmigr_active_up() to make sure the updates of child and group 1301 * states are ordered. It is required only when the above 1302 * try_cmpxchg() fails. 1303 */ 1304 smp_mb__after_atomic(); 1305 } 1306 1307 data->remote = false; 1308 1309 /* Event Handling */ 1310 tmigr_update_events(group, child, data); 1311 1312 if (walk_done == false) 1313 data->childmask = group->childmask; 1314 1315 return walk_done; 1316 } 1317 1318 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp) 1319 { 1320 struct tmigr_walk data = { .nextexp = nextexp, 1321 .firstexp = KTIME_MAX, 1322 .evt = &tmc->cpuevt, 1323 .childmask = tmc->childmask }; 1324 1325 /* 1326 * If nextexp is KTIME_MAX, the CPU event will be ignored because the 1327 * local timer expires before the global timer, no global timer is set 1328 * or CPU goes offline. 1329 */ 1330 if (nextexp != KTIME_MAX) 1331 tmc->cpuevt.ignore = false; 1332 1333 walk_groups(&tmigr_inactive_up, &data, tmc); 1334 return data.firstexp; 1335 } 1336 1337 /** 1338 * tmigr_cpu_deactivate() - Put current CPU into inactive state 1339 * @nextexp: The next global timer expiry of the current CPU 1340 * 1341 * Must be called with interrupts disabled. 1342 * 1343 * Return: the next event expiry of the current CPU or the next event expiry 1344 * from the hierarchy if this CPU is the top level migrator or the hierarchy is 1345 * completely idle. 1346 */ 1347 u64 tmigr_cpu_deactivate(u64 nextexp) 1348 { 1349 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1350 u64 ret; 1351 1352 if (tmigr_is_not_available(tmc)) 1353 return nextexp; 1354 1355 raw_spin_lock(&tmc->lock); 1356 1357 ret = __tmigr_cpu_deactivate(tmc, nextexp); 1358 1359 tmc->idle = true; 1360 1361 /* 1362 * Make sure the reevaluation of timers in idle path will not miss an 1363 * event. 1364 */ 1365 WRITE_ONCE(tmc->wakeup, ret); 1366 1367 trace_tmigr_cpu_idle(tmc, nextexp); 1368 raw_spin_unlock(&tmc->lock); 1369 return ret; 1370 } 1371 1372 /** 1373 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to 1374 * go idle 1375 * @nextevt: The next global timer expiry of the current CPU 1376 * 1377 * Return: 1378 * * KTIME_MAX - when it is probable that nothing has to be done (not 1379 * the only one in the level 0 group; and if it is the 1380 * only one in level 0 group, but there are more than a 1381 * single group active on the way to top level) 1382 * * nextevt - when CPU is offline and has to handle timer on his own 1383 * or when on the way to top in every group only a single 1384 * child is active but @nextevt is before the lowest 1385 * next_expiry encountered while walking up to top level. 1386 * * next_expiry - value of lowest expiry encountered while walking groups 1387 * if only a single child is active on each and @nextevt 1388 * is after this lowest expiry. 1389 */ 1390 u64 tmigr_quick_check(u64 nextevt) 1391 { 1392 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1393 struct tmigr_group *group = tmc->tmgroup; 1394 1395 if (tmigr_is_not_available(tmc)) 1396 return nextevt; 1397 1398 if (WARN_ON_ONCE(tmc->idle)) 1399 return nextevt; 1400 1401 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask)) 1402 return KTIME_MAX; 1403 1404 do { 1405 if (!tmigr_check_lonely(group)) { 1406 return KTIME_MAX; 1407 } else { 1408 /* 1409 * Since current CPU is active, events may not be sorted 1410 * from bottom to the top because the CPU's event is ignored 1411 * up to the top and its sibling's events not propagated upwards. 1412 * Thus keep track of the lowest observed expiry. 1413 */ 1414 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry)); 1415 if (!group->parent) 1416 return nextevt; 1417 } 1418 group = group->parent; 1419 } while (group); 1420 1421 return KTIME_MAX; 1422 } 1423 1424 /* 1425 * tmigr_trigger_active() - trigger a CPU to become active again 1426 * 1427 * This function is executed on a CPU which is part of cpu_online_mask, when the 1428 * last active CPU in the hierarchy is offlining. With this, it is ensured that 1429 * the other CPU is active and takes over the migrator duty. 1430 */ 1431 static long tmigr_trigger_active(void *unused) 1432 { 1433 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1434 1435 WARN_ON_ONCE(!tmc->online || tmc->idle); 1436 1437 return 0; 1438 } 1439 1440 static int tmigr_cpu_offline(unsigned int cpu) 1441 { 1442 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1443 int migrator; 1444 u64 firstexp; 1445 1446 raw_spin_lock_irq(&tmc->lock); 1447 tmc->online = false; 1448 WRITE_ONCE(tmc->wakeup, KTIME_MAX); 1449 1450 /* 1451 * CPU has to handle the local events on his own, when on the way to 1452 * offline; Therefore nextevt value is set to KTIME_MAX 1453 */ 1454 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); 1455 trace_tmigr_cpu_offline(tmc); 1456 raw_spin_unlock_irq(&tmc->lock); 1457 1458 if (firstexp != KTIME_MAX) { 1459 migrator = cpumask_any_but(cpu_online_mask, cpu); 1460 work_on_cpu(migrator, tmigr_trigger_active, NULL); 1461 } 1462 1463 return 0; 1464 } 1465 1466 static int tmigr_cpu_online(unsigned int cpu) 1467 { 1468 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); 1469 1470 /* Check whether CPU data was successfully initialized */ 1471 if (WARN_ON_ONCE(!tmc->tmgroup)) 1472 return -EINVAL; 1473 1474 raw_spin_lock_irq(&tmc->lock); 1475 trace_tmigr_cpu_online(tmc); 1476 tmc->idle = timer_base_is_idle(); 1477 if (!tmc->idle) 1478 __tmigr_cpu_activate(tmc); 1479 tmc->online = true; 1480 raw_spin_unlock_irq(&tmc->lock); 1481 return 0; 1482 } 1483 1484 static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl, 1485 int node) 1486 { 1487 union tmigr_state s; 1488 1489 raw_spin_lock_init(&group->lock); 1490 1491 group->level = lvl; 1492 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE; 1493 1494 group->num_children = 0; 1495 1496 s.migrator = TMIGR_NONE; 1497 s.active = 0; 1498 s.seq = 0; 1499 atomic_set(&group->migr_state, s.state); 1500 1501 timerqueue_init_head(&group->events); 1502 timerqueue_init(&group->groupevt.nextevt); 1503 group->groupevt.nextevt.expires = KTIME_MAX; 1504 WRITE_ONCE(group->next_expiry, KTIME_MAX); 1505 group->groupevt.ignore = true; 1506 } 1507 1508 static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node, 1509 unsigned int lvl) 1510 { 1511 struct tmigr_group *tmp, *group = NULL; 1512 1513 lockdep_assert_held(&tmigr_mutex); 1514 1515 /* Try to attach to an existing group first */ 1516 list_for_each_entry(tmp, &tmigr_level_list[lvl], list) { 1517 /* 1518 * If @lvl is below the cross NUMA node level, check whether 1519 * this group belongs to the same NUMA node. 1520 */ 1521 if (lvl < tmigr_crossnode_level && tmp->numa_node != node) 1522 continue; 1523 1524 /* Capacity left? */ 1525 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP) 1526 continue; 1527 1528 /* 1529 * TODO: A possible further improvement: Make sure that all CPU 1530 * siblings end up in the same group of the lowest level of the 1531 * hierarchy. Rely on the topology sibling mask would be a 1532 * reasonable solution. 1533 */ 1534 1535 group = tmp; 1536 break; 1537 } 1538 1539 if (group) 1540 return group; 1541 1542 /* Allocate and set up a new group */ 1543 group = kzalloc_node(sizeof(*group), GFP_KERNEL, node); 1544 if (!group) 1545 return ERR_PTR(-ENOMEM); 1546 1547 tmigr_init_group(group, lvl, node); 1548 1549 /* Setup successful. Add it to the hierarchy */ 1550 list_add(&group->list, &tmigr_level_list[lvl]); 1551 trace_tmigr_group_set(group); 1552 return group; 1553 } 1554 1555 static void tmigr_connect_child_parent(struct tmigr_group *child, 1556 struct tmigr_group *parent, 1557 bool activate) 1558 { 1559 struct tmigr_walk data; 1560 1561 raw_spin_lock_irq(&child->lock); 1562 raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); 1563 1564 child->parent = parent; 1565 child->childmask = BIT(parent->num_children++); 1566 1567 raw_spin_unlock(&parent->lock); 1568 raw_spin_unlock_irq(&child->lock); 1569 1570 trace_tmigr_connect_child_parent(child); 1571 1572 if (!activate) 1573 return; 1574 1575 /* 1576 * To prevent inconsistent states, active children need to be active in 1577 * the new parent as well. Inactive children are already marked inactive 1578 * in the parent group: 1579 * 1580 * * When new groups were created by tmigr_setup_groups() starting from 1581 * the lowest level (and not higher then one level below the current 1582 * top level), then they are not active. They will be set active when 1583 * the new online CPU comes active. 1584 * 1585 * * But if a new group above the current top level is required, it is 1586 * mandatory to propagate the active state of the already existing 1587 * child to the new parent. So tmigr_connect_child_parent() is 1588 * executed with the formerly top level group (child) and the newly 1589 * created group (parent). 1590 * 1591 * * It is ensured that the child is active, as this setup path is 1592 * executed in hotplug prepare callback. This is exectued by an 1593 * already connected and !idle CPU. Even if all other CPUs go idle, 1594 * the CPU executing the setup will be responsible up to current top 1595 * level group. And the next time it goes inactive, it will release 1596 * the new childmask and parent to subsequent walkers through this 1597 * @child. Therefore propagate active state unconditionally. 1598 */ 1599 data.childmask = child->childmask; 1600 1601 /* 1602 * There is only one new level per time (which is protected by 1603 * tmigr_mutex). When connecting the child and the parent and set the 1604 * child active when the parent is inactive, the parent needs to be the 1605 * uppermost level. Otherwise there went something wrong! 1606 */ 1607 WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); 1608 } 1609 1610 static int tmigr_setup_groups(unsigned int cpu, unsigned int node) 1611 { 1612 struct tmigr_group *group, *child, **stack; 1613 int top = 0, err = 0, i = 0; 1614 struct list_head *lvllist; 1615 1616 stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); 1617 if (!stack) 1618 return -ENOMEM; 1619 1620 do { 1621 group = tmigr_get_group(cpu, node, i); 1622 if (IS_ERR(group)) { 1623 err = PTR_ERR(group); 1624 break; 1625 } 1626 1627 top = i; 1628 stack[i++] = group; 1629 1630 /* 1631 * When booting only less CPUs of a system than CPUs are 1632 * available, not all calculated hierarchy levels are required. 1633 * 1634 * The loop is aborted as soon as the highest level, which might 1635 * be different from tmigr_hierarchy_levels, contains only a 1636 * single group. 1637 */ 1638 if (group->parent || i == tmigr_hierarchy_levels || 1639 (list_empty(&tmigr_level_list[i]) && 1640 list_is_singular(&tmigr_level_list[i - 1]))) 1641 break; 1642 1643 } while (i < tmigr_hierarchy_levels); 1644 1645 while (i > 0) { 1646 group = stack[--i]; 1647 1648 if (err < 0) { 1649 list_del(&group->list); 1650 kfree(group); 1651 continue; 1652 } 1653 1654 WARN_ON_ONCE(i != group->level); 1655 1656 /* 1657 * Update tmc -> group / child -> group connection 1658 */ 1659 if (i == 0) { 1660 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); 1661 1662 raw_spin_lock_irq(&group->lock); 1663 1664 tmc->tmgroup = group; 1665 tmc->childmask = BIT(group->num_children++); 1666 1667 raw_spin_unlock_irq(&group->lock); 1668 1669 trace_tmigr_connect_cpu_parent(tmc); 1670 1671 /* There are no children that need to be connected */ 1672 continue; 1673 } else { 1674 child = stack[i - 1]; 1675 /* Will be activated at online time */ 1676 tmigr_connect_child_parent(child, group, false); 1677 } 1678 1679 /* check if uppermost level was newly created */ 1680 if (top != i) 1681 continue; 1682 1683 WARN_ON_ONCE(top == 0); 1684 1685 lvllist = &tmigr_level_list[top]; 1686 if (group->num_children == 1 && list_is_singular(lvllist)) { 1687 /* 1688 * The target CPU must never do the prepare work, except 1689 * on early boot when the boot CPU is the target. Otherwise 1690 * it may spuriously activate the old top level group inside 1691 * the new one (nevertheless whether old top level group is 1692 * active or not) and/or release an uninitialized childmask. 1693 */ 1694 WARN_ON_ONCE(cpu == raw_smp_processor_id()); 1695 1696 lvllist = &tmigr_level_list[top - 1]; 1697 list_for_each_entry(child, lvllist, list) { 1698 if (child->parent) 1699 continue; 1700 1701 tmigr_connect_child_parent(child, group, true); 1702 } 1703 } 1704 } 1705 1706 kfree(stack); 1707 1708 return err; 1709 } 1710 1711 static int tmigr_add_cpu(unsigned int cpu) 1712 { 1713 int node = cpu_to_node(cpu); 1714 int ret; 1715 1716 mutex_lock(&tmigr_mutex); 1717 ret = tmigr_setup_groups(cpu, node); 1718 mutex_unlock(&tmigr_mutex); 1719 1720 return ret; 1721 } 1722 1723 static int tmigr_cpu_prepare(unsigned int cpu) 1724 { 1725 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); 1726 int ret = 0; 1727 1728 /* Not first online attempt? */ 1729 if (tmc->tmgroup) 1730 return ret; 1731 1732 raw_spin_lock_init(&tmc->lock); 1733 timerqueue_init(&tmc->cpuevt.nextevt); 1734 tmc->cpuevt.nextevt.expires = KTIME_MAX; 1735 tmc->cpuevt.ignore = true; 1736 tmc->cpuevt.cpu = cpu; 1737 tmc->remote = false; 1738 WRITE_ONCE(tmc->wakeup, KTIME_MAX); 1739 1740 ret = tmigr_add_cpu(cpu); 1741 if (ret < 0) 1742 return ret; 1743 1744 if (tmc->childmask == 0) 1745 return -EINVAL; 1746 1747 return ret; 1748 } 1749 1750 static int __init tmigr_init(void) 1751 { 1752 unsigned int cpulvl, nodelvl, cpus_per_node, i; 1753 unsigned int nnodes = num_possible_nodes(); 1754 unsigned int ncpus = num_possible_cpus(); 1755 int ret = -ENOMEM; 1756 1757 BUILD_BUG_ON_NOT_POWER_OF_2(TMIGR_CHILDREN_PER_GROUP); 1758 1759 /* Nothing to do if running on UP */ 1760 if (ncpus == 1) 1761 return 0; 1762 1763 /* 1764 * Calculate the required hierarchy levels. Unfortunately there is no 1765 * reliable information available, unless all possible CPUs have been 1766 * brought up and all NUMA nodes are populated. 1767 * 1768 * Estimate the number of levels with the number of possible nodes and 1769 * the number of possible CPUs. Assume CPUs are spread evenly across 1770 * nodes. We cannot rely on cpumask_of_node() because it only works for 1771 * online CPUs. 1772 */ 1773 cpus_per_node = DIV_ROUND_UP(ncpus, nnodes); 1774 1775 /* Calc the hierarchy levels required to hold the CPUs of a node */ 1776 cpulvl = DIV_ROUND_UP(order_base_2(cpus_per_node), 1777 ilog2(TMIGR_CHILDREN_PER_GROUP)); 1778 1779 /* Calculate the extra levels to connect all nodes */ 1780 nodelvl = DIV_ROUND_UP(order_base_2(nnodes), 1781 ilog2(TMIGR_CHILDREN_PER_GROUP)); 1782 1783 tmigr_hierarchy_levels = cpulvl + nodelvl; 1784 1785 /* 1786 * If a NUMA node spawns more than one CPU level group then the next 1787 * level(s) of the hierarchy contains groups which handle all CPU groups 1788 * of the same NUMA node. The level above goes across NUMA nodes. Store 1789 * this information for the setup code to decide in which level node 1790 * matching is no longer required. 1791 */ 1792 tmigr_crossnode_level = cpulvl; 1793 1794 tmigr_level_list = kcalloc(tmigr_hierarchy_levels, sizeof(struct list_head), GFP_KERNEL); 1795 if (!tmigr_level_list) 1796 goto err; 1797 1798 for (i = 0; i < tmigr_hierarchy_levels; i++) 1799 INIT_LIST_HEAD(&tmigr_level_list[i]); 1800 1801 pr_info("Timer migration: %d hierarchy levels; %d children per group;" 1802 " %d crossnode level\n", 1803 tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP, 1804 tmigr_crossnode_level); 1805 1806 ret = cpuhp_setup_state(CPUHP_TMIGR_PREPARE, "tmigr:prepare", 1807 tmigr_cpu_prepare, NULL); 1808 if (ret) 1809 goto err; 1810 1811 ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online", 1812 tmigr_cpu_online, tmigr_cpu_offline); 1813 if (ret) 1814 goto err; 1815 1816 return 0; 1817 1818 err: 1819 pr_err("Timer migration setup failed\n"); 1820 return ret; 1821 } 1822 early_initcall(tmigr_init); 1823